| # Copyright 2015 The Chromium Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| """Unit tests for start_try_job module.""" |
| |
| __author__ = 'sullivan@google.com (Annie Sullivan)' |
| |
| import base64 |
| import json |
| import unittest |
| |
| import httplib2 |
| import mock |
| import webapp2 |
| import webtest |
| |
| from google.appengine.ext import ndb |
| |
| from dashboard import rietveld_service |
| from dashboard import start_try_job |
| from dashboard import testing_common |
| from dashboard.models import bug_data |
| from dashboard.models import graph_data |
| from dashboard.models import try_job |
| |
| |
| # Below is a series of test strings which may contain long lines. |
| # pylint: disable=line-too-long |
| _EXPECTED_BISECT_CONFIG_DIFF = """config = { |
| - 'command': '', |
| - 'good_revision': '', |
| - 'bad_revision': '', |
| - 'metric': '', |
| - 'repeat_count':'', |
| - 'max_time_minutes': '', |
| - 'truncate_percent':'', |
| + "bad_revision": "215828", |
| + "bisect_mode": "mean", |
| + "bug_id": "12345", |
| + "builder_type": "", |
| + "command": "tools/perf/run_benchmark -v --browser=release dromaeo.jslibstylejquery", |
| + "good_revision": "215806", |
| + "max_time_minutes": "20", |
| + "metric": "jslib/jslib", |
| + "repeat_count": "20", |
| + "target_arch": "ia32", |
| + "truncate_percent": "25" |
| } |
| """ |
| |
| _EXPECTED_BISECT_CONFIG_DIFF_FOR_INTERNAL_TEST = """config = { |
| - 'command': '', |
| - 'good_revision': '', |
| - 'bad_revision': '', |
| - 'metric': '', |
| - 'repeat_count':'', |
| - 'max_time_minutes': '', |
| - 'truncate_percent':'', |
| + "bad_revision": "f14a8f733cce874d5d66e8e6b86e75bbac240b0e", |
| + "bisect_mode": "mean", |
| + "bug_id": "12345", |
| + "builder_type": "", |
| + "command": "tools/perf/run_benchmark -v --browser=android-chrome start_with_url.cold.startup_pages", |
| + "good_revision": "d82ccc77c8a86ce9893a8035fb55aca666f044c8", |
| + "max_time_minutes": "20", |
| + "metric": "foreground_tab_request_start/foreground_tab_request_start", |
| + "repeat_count": "20", |
| + "target_arch": "ia32", |
| + "truncate_percent": "25" |
| } |
| """ |
| |
| _EXPECTED_BISECT_CONFIG_DIFF_WITH_ARCHIVE = """config = { |
| - 'command': '', |
| - 'good_revision': '', |
| - 'bad_revision': '', |
| - 'metric': '', |
| - 'repeat_count':'', |
| - 'max_time_minutes': '', |
| - 'truncate_percent':'', |
| + "bad_revision": "215828", |
| + "bisect_mode": "mean", |
| + "bug_id": "12345", |
| + "builder_type": "perf", |
| + "command": "tools/perf/run_benchmark -v --browser=release dromaeo.jslibstylejquery", |
| + "good_revision": "215806", |
| + "max_time_minutes": "20", |
| + "metric": "jslib/jslib", |
| + "repeat_count": "20", |
| + "target_arch": "ia32", |
| + "truncate_percent": "25" |
| } |
| """ |
| |
| _EXPECTED_PERF_CONFIG_DIFF = """config = { |
| - 'command': '', |
| - 'metric': '', |
| - 'repeat_count': '', |
| - 'max_time_minutes': '', |
| - 'truncate_percent': '', |
| + "bad_revision": "215828", |
| + "command": "tools/perf/run_benchmark -v --browser=release dromaeo.jslibstylejquery", |
| + "good_revision": "215806", |
| + "max_time_minutes": "60", |
| + "repeat_count": "1", |
| + "truncate_percent": "0" |
| } |
| """ |
| |
| _FAKE_XSRF_TOKEN = '1234567890' |
| |
| _ISSUE_CREATED_RESPONSE = """Issue created. https://test-rietveld.appspot.com/33001 |
| 1 |
| 1001 filename |
| """ |
| |
| _BISECT_CONFIG_CONTENTS = """# Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| \"\"\"Config file for Run Performance Test Bisect Tool |
| |
| This script is intended for use by anyone that wants to run a remote bisection |
| on a range of revisions to look for a performance regression. Modify the config |
| below and add the revision range, performance command, and metric. You can then |
| run a git try <bot>. |
| |
| Changes to this file should never be submitted. |
| |
| Args: |
| 'command': This is the full command line to pass to the |
| bisect-perf-regression.py script in order to execute the test. |
| 'good_revision': An svn or git revision where the metric hadn't regressed yet. |
| 'bad_revision': An svn or git revision sometime after the metric had |
| regressed. |
| 'metric': The name of the metric to parse out from the results of the |
| performance test. You can retrieve the metric by looking at the stdio of |
| the performance test. Look for lines of the format: |
| |
| RESULT <graph>: <trace>= <value> <units> |
| |
| The metric name is "<graph>/<trace>". |
| 'repeat_count': The number of times to repeat the performance test. |
| 'max_time_minutes': The script will attempt to run the performance test |
| "repeat_count" times, unless it exceeds "max_time_minutes". |
| 'truncate_percent': Discard the highest/lowest % values from performance test. |
| |
| Sample config: |
| |
| config = { |
| 'command': './out/Release/performance_ui_tests' + |
| ' --gtest_filter=PageCyclerTest.Intl1File', |
| 'good_revision': '179755', |
| 'bad_revision': '179782', |
| 'metric': 'times/t', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| On Windows: |
| - If you're calling a python script you will need to add "python" to |
| the command: |
| |
| config = { |
| 'command': 'python tools/perf/run_benchmark -v --browser=release kraken', |
| 'good_revision': '185319', |
| 'bad_revision': '185364', |
| 'metric': 'Total/Total', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| |
| On ChromeOS: |
| - Script accepts either ChromeOS versions, or unix timestamps as revisions. |
| - You don't need to specify --identity and --remote, they will be added to |
| the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values. |
| |
| config = { |
| 'command': './tools/perf/run_benchmark -v '\ |
| '--browser=cros-chrome-guest '\ |
| 'dromaeo tools/perf/page_sets/dromaeo/jslibstylejquery.json', |
| 'good_revision': '4086.0.0', |
| 'bad_revision': '4087.0.0', |
| 'metric': 'jslib/jslib', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| \"\"\" |
| |
| config = { |
| 'command': '', |
| 'good_revision': '', |
| 'bad_revision': '', |
| 'metric': '', |
| 'repeat_count':'', |
| 'max_time_minutes': '', |
| 'truncate_percent':'', |
| } |
| |
| # Workaround git try issue, see crbug.com/257689""" |
| |
| _PERF_CONFIG_CONTENTS = """# Copyright 2013 The Chromium Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| \"\"\"Config file for Run Performance Test Bot |
| |
| This script is intended for use by anyone that wants to run a remote performance |
| test. Modify the config below and add the command to run the performance test, |
| the metric you're interested in, and repeat/discard parameters. You can then |
| run a git try <bot>. |
| |
| Changes to this file should never be submitted. |
| |
| Args: |
| 'command': This is the full command line to pass to the |
| bisect-perf-regression.py script in order to execute the test. |
| 'metric': The name of the metric to parse out from the results of the |
| performance test. You can retrieve the metric by looking at the stdio of |
| the performance test. Look for lines of the format: |
| |
| RESULT <graph>: <trace>= <value> <units> |
| |
| The metric name is "<graph>/<trace>". |
| 'repeat_count': The number of times to repeat the performance test. |
| 'max_time_minutes': The script will attempt to run the performance test |
| "repeat_count" times, unless it exceeds "max_time_minutes". |
| 'truncate_percent': Discard the highest/lowest % values from performance test. |
| |
| Sample config: |
| |
| config = { |
| 'command': './tools/perf/run_benchmark --browser=release smoothness.key_mobile_sites', |
| 'metric': 'mean_frame_time/mean_frame_time', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| On Windows: |
| - If you're calling a python script you will need to add "python" to |
| the command: |
| |
| config = { |
| 'command': 'python tools/perf/run_benchmark -v --browser=release smoothness.key_mobile_sites', |
| 'metric': 'mean_frame_time/mean_frame_time', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| |
| On ChromeOS: |
| - Script accepts either ChromeOS versions, or unix timestamps as revisions. |
| - You don't need to specify --identity and --remote, they will be added to |
| the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values. |
| |
| config = { |
| 'command': './tools/perf/run_benchmark -v '\ |
| '--browser=cros-chrome-guest '\ |
| 'smoothness.key_mobile_sites', |
| 'metric': 'mean_frame_time/mean_frame_time', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| } |
| |
| \"\"\" |
| |
| config = { |
| 'command': '', |
| 'metric': '', |
| 'repeat_count': '', |
| 'max_time_minutes': '', |
| 'truncate_percent': '', |
| } |
| |
| # Workaround git try issue, see crbug.com/257689""" |
| # pylint: enable=line-too-long |
| |
| # These globals are set in tests and checked in _MockMakeRequest. |
| _EXPECTED_CONFIG_DIFF = None |
| _TEST_EXPECTED_BOT = None |
| _TEST_EXPECTED_CONFIG_CONTENTS = None |
| |
| |
| def _MockFetch(url=None): |
| if start_try_job._BISECT_CONFIG_PATH in url: |
| return testing_common.FakeResponseObject( |
| 200, base64.encodestring(_BISECT_CONFIG_CONTENTS)) |
| elif start_try_job._PERF_CONFIG_PATH in url: |
| return testing_common.FakeResponseObject( |
| 200, base64.encodestring(_PERF_CONFIG_CONTENTS)) |
| |
| |
| def _MockMakeRequest(path, *args, **kwargs): # pylint: disable=unused-argument |
| """Mocks out a request, returning a canned response.""" |
| if path.endswith('xsrf_token'): |
| assert kwargs['headers']['X-Requesting-XSRF-Token'] == 1 |
| return (httplib2.Response({'status': '200'}), _FAKE_XSRF_TOKEN) |
| if path == 'upload': |
| assert kwargs['method'] == 'POST' |
| assert _EXPECTED_CONFIG_DIFF in kwargs['body'], ( |
| '%s\nnot in\n%s\n' % (_EXPECTED_CONFIG_DIFF, kwargs['body'])) |
| return (httplib2.Response({'status': '200'}), _ISSUE_CREATED_RESPONSE) |
| if path == '33001/upload_content/1/1001': |
| assert kwargs['method'] == 'POST' |
| assert _TEST_EXPECTED_CONFIG_CONTENTS in kwargs['body'] |
| return (httplib2.Response({'status': '200'}), 'Dummy content') |
| if path == '33001/upload_complete/1': |
| assert kwargs['method'] == 'POST' |
| return (httplib2.Response({'status': '200'}), 'Dummy content') |
| if path == '33001/try/1': |
| assert _TEST_EXPECTED_BOT in kwargs['body'] |
| return (httplib2.Response({'status': '200'}), 'Dummy content') |
| assert False, 'Invalid url %s requested!' % path |
| |
| |
| class StartBisectTest(testing_common.TestCase): |
| |
| def setUp(self): |
| super(StartBisectTest, self).setUp() |
| app = webapp2.WSGIApplication( |
| [('/start_try_job', start_try_job.StartBisectHandler)]) |
| self.testapp = webtest.TestApp(app) |
| |
| def testPost_InvalidUser_ShowsErrorMessage(self): |
| self.SetCurrentUser('foo@yahoo.com') |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': 'ChromiumPerf/win7/morejs/times/page_load_time', |
| 'step': 'prefill-info', |
| }) |
| self.assertEqual( |
| {'error': ('You must be logged in to either a chromium.org' |
| ' or google.com account to run a bisect job.')}, |
| json.loads(response.body)) |
| |
| def testPost_PrefillInfoStep(self): |
| self.SetCurrentUser('foo@google.com') |
| testing_common.AddDataToMockDataStore( |
| ['ChromiumPerf'], |
| ['win7', |
| 'android-nexus10', |
| 'chromium-rel-win8-dual', |
| 'chromium-rel-xp-single'], { |
| 'page_cycler.morejs': { |
| 'times': { |
| 'page_load_time': {}, |
| 'page_load_time_ref': {}, |
| 'blog.chromium.org': {}, |
| 'dev.chromium.org': {}, |
| 'test.blogspot.com': {}, |
| 'http___test.com_': {} |
| }, |
| 'vm_final_size_renderer': { |
| 'ref': {}, |
| 'vm_final_size_renderer_extcs1': {} |
| }, |
| }, |
| 'blink_perf': { |
| 'Animation_balls': {} |
| } |
| } |
| ) |
| tests = graph_data.Test.query().fetch() |
| for test in tests: |
| name = test.key.string_id() |
| if name in ('times', 'page_cycler.morejs', 'blink_perf'): |
| continue |
| test.has_rows = True |
| ndb.put_multi(tests) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': ('ChromiumPerf/win7/page_cycler.morejs/' |
| 'times/page_load_time'), |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual('win_perf_bisect', info['bisect_bot']) |
| self.assertEqual('foo@google.com', info['email']) |
| self.assertEqual('page_cycler.morejs', info['suite']) |
| self.assertEqual('times/page_load_time', info['default_metric']) |
| self.assertEqual('ChromiumPerf', info['master']) |
| self.assertFalse(info['internal_only']) |
| self.assertTrue(info['use_archive']) |
| self.assertEqual(start_try_job._BISECT_BOTS, info['all_bots']) |
| |
| self.assertEqual( |
| ['times/blog.chromium.org', |
| 'times/dev.chromium.org', |
| 'times/http___test.com_', |
| 'times/page_load_time', |
| 'times/test.blogspot.com'], |
| info['all_metrics']) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': ('ChromiumPerf/win7/page_cycler.morejs/' |
| 'vm_final_size_renderer'), |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual( |
| ['vm_final_size_renderer/vm_final_size_renderer', |
| 'vm_final_size_renderer/vm_final_size_renderer_extcs1'], |
| info['all_metrics']) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': 'ChromiumPerf/win7/blink_perf/Animation_balls', |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual('Animation_balls/Animation_balls', info['default_metric']) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': 'ChromiumPerf/android-nexus10/blink_perf/Animation_balls', |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual('android_nexus10_perf_bisect', info['bisect_bot']) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': ('ChromiumPerf/chromium-rel-win8-dual/' |
| 'blink_perf/Animation_balls'), |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual('win_8_perf_bisect', info['bisect_bot']) |
| |
| response = self.testapp.post('/start_try_job', { |
| 'test_path': ('ChromiumPerf/chromium-rel-xp-single/' |
| 'blink_perf/Animation_balls'), |
| 'step': 'prefill-info', |
| }) |
| info = json.loads(response.body) |
| self.assertEqual('win_xp_perf_bisect', info['bisect_bot']) |
| |
| def _TestGetBisectConfig(self, parameters, expected_config_dict): |
| """Helper method to test get-config requests.""" |
| response = start_try_job.GetBisectConfig(**parameters) |
| self.assertEqual(expected_config_dict, response['config_dict']) |
| |
| def testGetConfig_EmptyUseArchiveParameter_GivesEmptyBuilderType(self): |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'page_cycler.moz', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'use_archive': '' |
| }, |
| { |
| 'command': ('tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.moz'), |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': '', |
| 'target_arch': 'ia32', |
| 'bisect_mode': 'mean', |
| }) |
| |
| def testGetConfig_NonEmptyUseArchiveParameter_GivesNonEmptyBuilderType(self): |
| # Any non-empty value for use_archive means that archives should be used. |
| # Even if value of use_archive is "false", archives will still be used! |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'page_cycler.moz', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'use_archive': '' |
| }, |
| { |
| 'command': ('tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.moz'), |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': '', |
| 'target_arch': 'ia32', |
| 'bisect_mode': 'mean', |
| }) |
| |
| def testGetConfig_Telemetry(self): |
| """Tests that the right config is returned for a normal Telemetry test.""" |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'win_perf_bisect', |
| 'suite': 'page_cycler.morejs', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '12345', |
| 'bad_revision': '23456', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| }, |
| { |
| 'command': ('python tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.morejs'), |
| 'good_revision': '12345', |
| 'bad_revision': '23456', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': '', |
| 'target_arch': 'ia32', |
| 'bisect_mode': 'mean', |
| }) |
| |
| def testGetConfig_BisectModeAsReturnCode(self): |
| """Tests that the right config is returned for return_code bisect mode.""" |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'page_cycler.moz', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'use_archive': '', |
| 'bisect_mode': 'return_code' |
| }, |
| { |
| 'command': ('tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.moz'), |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': '', |
| 'target_arch': 'ia32', |
| 'bisect_mode': 'return_code', |
| }) |
| |
| def _TestGetConfigCommand(self, expected_command, **params_to_override): |
| """Helper method to test the command returned for a get-config request.""" |
| parameters = dict( |
| { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'page_cycler.moz', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '10', |
| 'bug_id': '-1', |
| 'use_archive': '' |
| }, **params_to_override) |
| response = start_try_job.GetBisectConfig(**parameters) |
| self.assertEqual(expected_command, response['config_dict'].get('command')) |
| |
| def testGetConfig_AndroidTelemetry(self): |
| """Tests that the right config is returned for an android bot.""" |
| self._TestGetConfigCommand( |
| ('tools/perf/run_benchmark -v ' |
| '--browser=android-chrome-shell ' |
| 'page_cycler.morejs'), |
| bisect_bot='android_motoe_perf_bisect', |
| suite='page_cycler.morejs') |
| |
| def testGetConfig_CCPerftests(self): |
| """Tests the config returned for a cc_perftests test on linux.""" |
| self._TestGetConfigCommand( |
| ('./out/Release/cc_perftests ' |
| '--test-launcher-print-test-stdio=always'), |
| bisect_bot='linux_perf_bisect', |
| suite='cc_perftests') |
| |
| def testGetConfig_AndroidCCPerftests(self): |
| """Tests the command returned for cc_perftests tests on android.""" |
| self._TestGetConfigCommand( |
| 'build/android/test_runner.py gtest --release -s cc_perftests', |
| bisect_bot='android_motoe_perf_bisect', |
| suite='cc_perftests') |
| |
| def testGetConfig_IdbPerf(self): |
| """Tests the command returned for idb_perf tests on windows.""" |
| self._TestGetConfigCommand( |
| (r'.\out\Release\performance_ui_tests.exe ' |
| '--gtest_filter=IndexedDBTest.Perf'), |
| bisect_bot='win_perf_bisect', |
| suite='idb_perf') |
| |
| def testGetConfig_Startup(self): |
| """Tests that a custom flag is added for startup tests.""" |
| self._TestGetConfigCommand( |
| ('python tools/perf/run_benchmark -v ' |
| '--browser=release ' |
| '--profile-dir=out/Release/generated_profile/small_profile ' |
| 'startup.cold.blank_page'), |
| bisect_bot='win_perf_bisect', |
| suite='startup.cold.dirty.blank_page') |
| |
| def testGetConfig_SessionRestore(self): |
| """Tests that a custom flag is added for session_restore tests.""" |
| self._TestGetConfigCommand( |
| ('python tools/perf/run_benchmark -v ' |
| '--browser=release ' |
| '--profile-dir=out/Release/generated_profile/small_profile ' |
| 'session_restore.warm.typical_25'), |
| bisect_bot='win_perf_bisect', |
| suite='session_restore.warm.typical_25') |
| |
| def testGetConfig_PerformanceBrowserTests(self): |
| self._TestGetConfigCommand( |
| ('./out/Release/performance_browser_tests ' |
| '--test-launcher-print-test-stdio=always ' |
| '--enable-gpu'), |
| bisect_bot='linux_perf_bisect', |
| suite='performance_browser_tests') |
| |
| @mock.patch.object(start_try_job.buildbucket_service, 'PutJob', |
| mock.MagicMock(return_value='1234567')) |
| def testPerformBuildbucketBisect(self): |
| self.SetCurrentUser('foo@google.com') |
| # Fake Rietveld auth info |
| cfg = rietveld_service.RietveldConfig( |
| id='default_rietveld_config', |
| client_email='sullivan@google.com', |
| service_account_key='Fake Account Key', |
| server_url='https://test-rietveld.appspot.com') |
| cfg.put() |
| |
| # Create bug. |
| bug_data.Bug(id=12345).put() |
| |
| query_parameters = { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'dromaeo.jslibstylejquery', |
| 'metric': 'jslib/jslib', |
| 'good_revision': '215806', |
| 'bad_revision': '215828', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| 'bug_id': 12345, |
| 'use_archive': '', |
| 'use_recipe': 'true', |
| 'step': 'perform-bisect', |
| } |
| response = self.testapp.post('/start_try_job', query_parameters) |
| response_dict = json.loads(response.body) |
| self.assertEqual(response_dict['issue_id'], '1234567') |
| self.assertIn('1234567', response_dict['issue_url']) |
| job_entities = try_job.TryJob.query( |
| try_job.TryJob.buildbucket_job_id == '1234567').fetch() |
| self.assertEqual(1, len(job_entities)) |
| self.assertTrue(job_entities[0].use_buildbucket) |
| |
| @mock.patch( |
| 'google.appengine.api.urlfetch.fetch', |
| mock.MagicMock(side_effect=_MockFetch)) |
| @mock.patch.object( |
| start_try_job.rietveld_service.RietveldService, '_MakeRequest', |
| mock.MagicMock(side_effect=_MockMakeRequest)) |
| def testPerformBisect(self): |
| self.SetCurrentUser('foo@google.com') |
| # Fake Rietveld auth info |
| cfg = rietveld_service.RietveldConfig( |
| id='default_rietveld_config', |
| client_email='sullivan@google.com', |
| service_account_key='Fake Account Key', |
| server_url='https://test-rietveld.appspot.com') |
| cfg.put() |
| |
| # Create bug. |
| bug_data.Bug(id=12345).put() |
| |
| query_parameters = { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'dromaeo.jslibstylejquery', |
| 'metric': 'jslib/jslib', |
| 'good_revision': '215806', |
| 'bad_revision': '215828', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| 'bug_id': 12345, |
| 'use_archive': '', |
| 'step': 'perform-bisect', |
| } |
| global _EXPECTED_CONFIG_DIFF |
| global _TEST_EXPECTED_BOT |
| global _TEST_EXPECTED_CONFIG_CONTENTS |
| _EXPECTED_CONFIG_DIFF = _EXPECTED_BISECT_CONFIG_DIFF |
| _TEST_EXPECTED_BOT = 'linux_perf_bisect' |
| _TEST_EXPECTED_CONFIG_CONTENTS = _BISECT_CONFIG_CONTENTS |
| response = self.testapp.post('/start_try_job', query_parameters) |
| self.assertEqual( |
| json.dumps({'issue_id': '33001', |
| 'issue_url': 'https://test-rietveld.appspot.com/33001'}), |
| response.body) |
| |
| @mock.patch( |
| 'google.appengine.api.urlfetch.fetch', |
| mock.MagicMock(side_effect=_MockFetch)) |
| @mock.patch.object( |
| start_try_job.rietveld_service.RietveldService, '_MakeRequest', |
| mock.MagicMock(side_effect=_MockMakeRequest)) |
| def testPerformPerfTry(self): |
| self.SetCurrentUser('foo@google.com') |
| # Fake Rietveld auth info |
| cfg = rietveld_service.RietveldConfig( |
| id='default_rietveld_config', |
| client_email='sullivan@google.com', |
| service_account_key='Fake Account Key', |
| server_url='https://test-rietveld.appspot.com/') |
| cfg.put() |
| |
| query_parameters = { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'dromaeo.jslibstylejquery', |
| 'good_revision': '215806', |
| 'bad_revision': '215828', |
| 'step': 'perform-perf-try', |
| 'rerun_option': '', |
| } |
| global _EXPECTED_CONFIG_DIFF |
| global _TEST_EXPECTED_CONFIG_CONTENTS |
| _EXPECTED_CONFIG_DIFF = _EXPECTED_PERF_CONFIG_DIFF |
| _TEST_EXPECTED_CONFIG_CONTENTS = _PERF_CONFIG_CONTENTS |
| response = self.testapp.post('/start_try_job', query_parameters) |
| self.assertEqual(json.dumps({'issue_id': '33001'}), response.body) |
| |
| @mock.patch( |
| 'google.appengine.api.urlfetch.fetch', |
| mock.MagicMock(side_effect=_MockFetch)) |
| @mock.patch.object( |
| start_try_job.rietveld_service.RietveldService, '_MakeRequest', |
| mock.MagicMock(side_effect=_MockMakeRequest)) |
| def testPerformBisectWithArchive(self): |
| self.SetCurrentUser('foo@google.com') |
| # Fake Rietveld auth info |
| cfg = rietveld_service.RietveldConfig( |
| id='default_rietveld_config', |
| client_email='sullivan@google.com', |
| service_account_key='Fake Account Key', |
| server_url='https://test-rietveld.appspot.com') |
| cfg.put() |
| |
| # Create bug. |
| bug_data.Bug(id=12345).put() |
| |
| query_parameters = { |
| 'bisect_bot': 'linux_perf_bisect', |
| 'suite': 'dromaeo.jslibstylejquery', |
| 'metric': 'jslib/jslib', |
| 'good_revision': '215806', |
| 'bad_revision': '215828', |
| 'repeat_count': '20', |
| 'max_time_minutes': '20', |
| 'truncate_percent': '25', |
| 'bug_id': 12345, |
| 'use_archive': 'true', |
| 'bisect_mode': 'mean', |
| 'step': 'perform-bisect', |
| } |
| global _EXPECTED_CONFIG_DIFF |
| _EXPECTED_CONFIG_DIFF = _EXPECTED_BISECT_CONFIG_DIFF_WITH_ARCHIVE |
| global _TEST_EXPECTED_BOT |
| _TEST_EXPECTED_BOT = 'linux_perf_bisect' |
| response = self.testapp.post('/start_try_job', query_parameters) |
| self.assertEqual( |
| json.dumps({'issue_id': '33001', |
| 'issue_url': 'https://test-rietveld.appspot.com/33001'}), |
| response.body) |
| |
| def testGetConfigWithArchive(self): |
| """Tests GetConfig method with use_archive attribute set.""" |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'win_perf_bisect', |
| 'suite': 'page_cycler.morejs', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '12345', |
| 'bad_revision': '23456', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'use_archive': 'true', |
| }, |
| { |
| 'command': ('python tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.morejs'), |
| 'good_revision': '12345', |
| 'bad_revision': '23456', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': 'perf', |
| 'target_arch': 'ia32', |
| 'bisect_mode': 'mean', |
| }) |
| |
| def testGetConfigWithTargetArch(self): |
| self._TestGetBisectConfig( |
| { |
| 'bisect_bot': 'win_x64_perf_bisect', |
| 'suite': 'page_cycler.moz', |
| 'metric': 'times/page_load_time', |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'use_archive': '' |
| }, |
| { |
| 'command': ('python tools/perf/run_benchmark -v ' |
| '--browser=release page_cycler.moz'), |
| 'good_revision': '265549', |
| 'bad_revision': '265556', |
| 'metric': 'times/page_load_time', |
| 'repeat_count': '15', |
| 'max_time_minutes': '8', |
| 'truncate_percent': '30', |
| 'bug_id': '-1', |
| 'builder_type': '', |
| 'target_arch': 'x64', |
| 'bisect_mode': 'mean', |
| }) |
| |
| def testCheckBisectability(self): |
| """Tests CheckBisectability function.""" |
| bisectable_test_path = 'ChromiumPerf/linux-release/sunspider/Total' |
| not_bisectable_test_path = 'ChromiumPerf/linux-release/sizes/chrome' |
| bisectable_good_rev = 200000 |
| not_bisectable_good_rev = 1400000000 |
| bisectable_bad_rev = 200002 |
| self.assertIsNone(start_try_job.CheckBisectability( |
| bisectable_good_rev, bisectable_bad_rev, bisectable_test_path)) |
| self.assertIsNotNone(start_try_job.CheckBisectability( |
| not_bisectable_good_rev, bisectable_bad_rev, bisectable_test_path)) |
| self.assertIsNotNone(start_try_job.CheckBisectability( |
| bisectable_good_rev, bisectable_bad_rev, not_bisectable_test_path)) |
| |
| def testCheckBisectabilityForAndroid(self): |
| # Test android builds prior to 265549 |
| not_bisectable_good_rev = 265548 |
| bisectable_bad_rev = 265560 |
| bot = 'android_nexus7_perf_bisect' |
| bisectable_test_path = 'ChromiumPerf/linux-release/sunspider/Total' |
| self.assertIsNotNone(start_try_job.CheckBisectability( |
| not_bisectable_good_rev, bisectable_bad_rev, bisectable_test_path, bot)) |
| bisectable_good_rev = 265549 |
| self.assertIsNone(start_try_job.CheckBisectability( |
| bisectable_good_rev, bisectable_bad_rev, bisectable_test_path, bot)) |
| |
| def testCheckBisectabilityForWindows(self): |
| # Test windows builds range 289987-290716 |
| not_bisectable_good_rev = 289987 |
| not_bisectable_bad_rev = 290716 |
| bisectable_test_path = 'ChromiumPerf/linux-release/sunspider/Total' |
| bot = 'win_xp_perf_bisect' |
| self.assertIsNotNone(start_try_job.CheckBisectability( |
| not_bisectable_good_rev, not_bisectable_bad_rev, |
| bisectable_test_path, bot)) |
| bisectable_good_rev = 290716 |
| bisectable_bad_rev = 290800 |
| bot = 'win_xp_perf_bisect' |
| self.assertIsNone(start_try_job.CheckBisectability( |
| bisectable_good_rev, bisectable_bad_rev, bisectable_test_path, bot)) |
| |
| |
| if __name__ == '__main__': |
| unittest.main() |