Snap for 10447354 from 40214b48188358a80b7478bfff21d4814dd9177c to mainline-cellbroadcast-release
Change-Id: Iab10affc9aca7bb19286273919eafe394c92241d
diff --git a/.gitignore b/.gitignore
index 072bd9e..92ee4cb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
.mypy_cache/
llvm-project-copy/
compiler_wrapper/compiler_wrapper
+/rust-analyzer-chromiumos-wrapper/target
diff --git a/.style.yapf b/.style.yapf
deleted file mode 100644
index c4472bd..0000000
--- a/.style.yapf
+++ /dev/null
@@ -1,4 +0,0 @@
-[style]
-based_on_style = pep8
-blank_line_before_module_docstring = true
-indent_width = 2
diff --git a/LICENSE b/LICENSE
index 50bac5d..73b03ad 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-// Copyright (c) 2011-2016 The Chromium OS Authors. All rights reserved.
+// Copyright 2011-2016 The ChromiumOS Authors
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
-// * Neither the name of Google Inc. nor the names of its
+// * Neither the name of Google LLC nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
diff --git a/METADATA b/METADATA
index d97975c..ab8b7fa 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,19 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update toolchain-utils
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
+name: "toolchain-utils"
+description: "Various utilities used by the ChromeOS toolchain team."
third_party {
+ url {
+ type: GIT
+ value: "https://chromium.googlesource.com/chromiumos/third_party/toolchain-utils/"
+ }
+ version: "2c474af4f370b143032144aff1ff1985f789e20f"
license_type: NOTICE
+ last_upgrade_date {
+ year: 2022
+ month: 11
+ day: 8
+ }
}
diff --git a/OWNERS.toolchain b/OWNERS.toolchain
index 67a4cd0..e8cdbf9 100644
--- a/OWNERS.toolchain
+++ b/OWNERS.toolchain
@@ -1,12 +1,9 @@
+adriandole@google.com
ajordanr@google.com
cjdb@google.com
denik@chromium.org
gbiv@chromium.org
inglorion@chromium.org
-llozano@chromium.org
manojgupta@chromium.org
mbenfield@google.com
ryanbeltran@chromium.org
-
-# Temporary; see comment #2 on crbug.com/982498
-llozano@google.com
diff --git a/README.chromium b/README.chromium
index acbbc5e..57aa603 100644
--- a/README.chromium
+++ b/README.chromium
@@ -7,11 +7,11 @@
Description:
This contains scripts used to help maintain the toolchain. These
-include tools for downloading and building Chromium OS; building
-custom versions of the toolchain inside Chromium OS; launching
+include tools for downloading and building ChromiumOS; building
+custom versions of the toolchain inside ChromiumOS; launching
performance tests, analyzing the results and generating reports;
running toolchain regression tests; and using binary search to isolate
toolchain issues.
NOTE: These tools are strictly for Chromium developers; none of them
-ship on the final product (devices that run Chromium OS).
+ship on the final product (devices that run ChromiumOS).
diff --git a/README.md b/README.md
index a318e38..4a82ec1 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# `toolchain-utils`
-Various utilities used by the Chrome OS toolchain team.
+Various utilities used by the ChromeOS toolchain team.
## Note
diff --git a/afdo_metadata/README.md b/afdo_metadata/README.md
index 2338404..ad71c0b 100644
--- a/afdo_metadata/README.md
+++ b/afdo_metadata/README.md
@@ -1,6 +1,6 @@
# Overview
This directory contains JSON files describing metadata of AFDO profiles
-used to compile packages (Chrome and kernel) in Chrome OS.
+used to compile packages (Chrome and kernel) in ChromeOS.
# Description of each JSON Files
kernel_afdo.json contains the name of the latest AFDO profiles for each
@@ -15,7 +15,7 @@
production GS bucket, a bot submits to modify the corresponding JSON
file to reflect the updates.
-## Roll to Chrome OS
+## Roll to ChromeOS
There will be scheduler jobs listening to the changes made to these
JSON files. When changes detected, buildbot will roll these changes into
-corresponding Chrome OS packages.
+corresponding ChromeOS packages.
diff --git a/afdo_metadata/kernel_afdo.json b/afdo_metadata/kernel_afdo.json
index 49e1927..98dab12 100644
--- a/afdo_metadata/kernel_afdo.json
+++ b/afdo_metadata/kernel_afdo.json
@@ -1,14 +1,17 @@
{
"chromeos-kernel-4_4": {
- "name": "R100-14516.0-1645439511"
+ "name": "R108-15117.10-1664184941"
},
"chromeos-kernel-4_14": {
- "name": "R100-14516.0-1645439661"
+ "name": "R109-15183.8-1666603998"
},
"chromeos-kernel-4_19": {
- "name": "R100-14516.0-1645439606"
+ "name": "R109-15183.8-1666604011"
},
"chromeos-kernel-5_4": {
- "name": "R100-14516.0-1645439482"
+ "name": "R109-15183.8-1666603918"
+ },
+ "chromeos-kernel-5_10": {
+ "name": "R109-15183.8-1666604219"
}
}
diff --git a/afdo_metadata/kernel_arm_afdo.json b/afdo_metadata/kernel_arm_afdo.json
new file mode 100644
index 0000000..e73d2eb
--- /dev/null
+++ b/afdo_metadata/kernel_arm_afdo.json
@@ -0,0 +1,5 @@
+{
+ "chromeos-kernel-5_15": {
+ "name": "R109-15183.8-1666604194"
+ }
+}
diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py
index 02bae92..0779d2a 100755
--- a/afdo_redaction/redact_profile.py
+++ b/afdo_redaction/redact_profile.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -24,7 +24,6 @@
to stdout. A summary of what the script actually did is printed to stderr.
"""
-from __future__ import division, print_function
import collections
import re
@@ -32,23 +31,23 @@
def _count_samples(samples):
- """Count the total number of samples in a function."""
- line_re = re.compile(r'^(\s*)\d+(?:\.\d+)?: (\d+)\s*$')
+ """Count the total number of samples in a function."""
+ line_re = re.compile(r"^(\s*)\d+(?:\.\d+)?: (\d+)\s*$")
- top_level_samples = 0
- all_samples = 0
- for line in samples:
- m = line_re.match(line)
- if not m:
- continue
+ top_level_samples = 0
+ all_samples = 0
+ for line in samples:
+ m = line_re.match(line)
+ if not m:
+ continue
- spaces, n = m.groups()
- n = int(n)
- all_samples += n
- if len(spaces) == 1:
- top_level_samples += n
+ spaces, n = m.groups()
+ n = int(n)
+ all_samples += n
+ if len(spaces) == 1:
+ top_level_samples += n
- return top_level_samples, all_samples
+ return top_level_samples, all_samples
# A ProfileRecord is a set of samples for a top-level symbol in a textual AFDO
@@ -80,70 +79,75 @@
# And samples look like one of:
# arbitrary_number: sample_count
# arbitrary_number: inlined_function_symbol:inlined_entry_count
-ProfileRecord = collections.namedtuple('ProfileRecord',
- ['function_line', 'samples'])
+ProfileRecord = collections.namedtuple(
+ "ProfileRecord", ["function_line", "samples"]
+)
def _normalize_samples(samples):
- """Normalizes the samples in the given function body.
+ """Normalizes the samples in the given function body.
- Normalization just means that we redact inlined function names. This is
- done so that a bit of templating doesn't make two function bodies look
- distinct. Namely:
+ Normalization just means that we redact inlined function names. This is
+ done so that a bit of templating doesn't make two function bodies look
+ distinct. Namely:
- template <typename T>
- __attribute__((noinline))
- int getNumber() { return 1; }
+ template <typename T>
+ __attribute__((noinline))
+ int getNumber() { return 1; }
- template <typename T>
- __attribute__((noinline))
- int getNumberIndirectly() { return getNumber<T>(); }
+ template <typename T>
+ __attribute__((noinline))
+ int getNumberIndirectly() { return getNumber<T>(); }
- int main() {
- return getNumber<int>() + getNumber<float>();
- }
+ int main() {
+ return getNumber<int>() + getNumber<float>();
+ }
- If the profile has the mangled name for getNumber<float> in
- getNumberIndirectly<float> (and similar for <int>), we'll consider them to
- be distinct when they're not.
- """
+ If the profile has the mangled name for getNumber<float> in
+ getNumberIndirectly<float> (and similar for <int>), we'll consider them to
+ be distinct when they're not.
+ """
- # I'm not actually sure if this ends up being an issue in practice, but it's
- # simple enough to guard against.
- inlined_re = re.compile(r'(^\s*\d+): [^:]+:(\s*\d+)\s*$')
- result = []
- for s in samples:
- m = inlined_re.match(s)
- if m:
- result.append('%s: __REDACTED__:%s' % m.groups())
- else:
- result.append(s)
- return tuple(result)
+ # I'm not actually sure if this ends up being an issue in practice, but it's
+ # simple enough to guard against.
+ inlined_re = re.compile(r"(^\s*\d+): [^:]+:(\s*\d+)\s*$")
+ result = []
+ for s in samples:
+ m = inlined_re.match(s)
+ if m:
+ result.append("%s: __REDACTED__:%s" % m.groups())
+ else:
+ result.append(s)
+ return tuple(result)
def _read_textual_afdo_profile(stream):
- """Parses an AFDO profile from a line stream into ProfileRecords."""
- # ProfileRecords are actually nested, due to inlining. For the purpose of
- # this script, that doesn't matter.
- lines = (line.rstrip() for line in stream)
- function_line = None
- samples = []
- for line in lines:
- if not line:
- continue
+ """Parses an AFDO profile from a line stream into ProfileRecords."""
+ # ProfileRecords are actually nested, due to inlining. For the purpose of
+ # this script, that doesn't matter.
+ lines = (line.rstrip() for line in stream)
+ function_line = None
+ samples = []
+ for line in lines:
+ if not line:
+ continue
- if line[0].isspace():
- assert function_line is not None, 'sample exists outside of a function?'
- samples.append(line)
- continue
+ if line[0].isspace():
+ assert (
+ function_line is not None
+ ), "sample exists outside of a function?"
+ samples.append(line)
+ continue
+
+ if function_line is not None:
+ yield ProfileRecord(
+ function_line=function_line, samples=tuple(samples)
+ )
+ function_line = line
+ samples = []
if function_line is not None:
- yield ProfileRecord(function_line=function_line, samples=tuple(samples))
- function_line = line
- samples = []
-
- if function_line is not None:
- yield ProfileRecord(function_line=function_line, samples=tuple(samples))
+ yield ProfileRecord(function_line=function_line, samples=tuple(samples))
# The default of 100 is arbitrarily selected, but it does make the overwhelming
@@ -157,86 +161,96 @@
# Non-nm based approaches are superior because they don't require any prior
# build artifacts; just an AFDO profile.
def dedup_records(profile_records, summary_file, max_repeats=100):
- """Removes heavily duplicated records from profile_records.
+ """Removes heavily duplicated records from profile_records.
- profile_records is expected to be an iterable of ProfileRecord.
- max_repeats ia how many functions must share identical bodies for us to
- consider it 'heavily duplicated' and remove the results.
- """
+ profile_records is expected to be an iterable of ProfileRecord.
+ max_repeats ia how many functions must share identical bodies for us to
+ consider it 'heavily duplicated' and remove the results.
+ """
- # Build a mapping of function structure -> list of functions with identical
- # structure and sample counts
- counts = collections.defaultdict(list)
- for record in profile_records:
- counts[_normalize_samples(record.samples)].append(record)
+ # Build a mapping of function structure -> list of functions with identical
+ # structure and sample counts
+ counts = collections.defaultdict(list)
+ for record in profile_records:
+ counts[_normalize_samples(record.samples)].append(record)
- # Be sure that we didn't see any duplicate functions, since that's bad...
- total_functions_recorded = sum(len(records) for records in counts.values())
+ # Be sure that we didn't see any duplicate functions, since that's bad...
+ total_functions_recorded = sum(len(records) for records in counts.values())
- unique_function_names = {
- record.function_line.split(':')[0]
- for records in counts.values()
- for record in records
- }
+ unique_function_names = {
+ record.function_line.split(":")[0]
+ for records in counts.values()
+ for record in records
+ }
- assert len(unique_function_names) == total_functions_recorded, \
- 'duplicate function names?'
+ assert (
+ len(unique_function_names) == total_functions_recorded
+ ), "duplicate function names?"
- num_kept = 0
- num_samples_kept = 0
- num_top_samples_kept = 0
- num_total = 0
- num_samples_total = 0
- num_top_samples_total = 0
+ num_kept = 0
+ num_samples_kept = 0
+ num_top_samples_kept = 0
+ num_total = 0
+ num_samples_total = 0
+ num_top_samples_total = 0
- for normalized_samples, records in counts.items():
- top_sample_count, all_sample_count = _count_samples(normalized_samples)
- top_sample_count *= len(records)
- all_sample_count *= len(records)
+ for normalized_samples, records in counts.items():
+ top_sample_count, all_sample_count = _count_samples(normalized_samples)
+ top_sample_count *= len(records)
+ all_sample_count *= len(records)
- num_total += len(records)
- num_samples_total += all_sample_count
- num_top_samples_total += top_sample_count
+ num_total += len(records)
+ num_samples_total += all_sample_count
+ num_top_samples_total += top_sample_count
- if len(records) >= max_repeats:
- continue
+ if len(records) >= max_repeats:
+ continue
- num_kept += len(records)
- num_samples_kept += all_sample_count
- num_top_samples_kept += top_sample_count
- for record in records:
- yield record
+ num_kept += len(records)
+ num_samples_kept += all_sample_count
+ num_top_samples_kept += top_sample_count
+ for record in records:
+ yield record
- print(
- 'Retained {:,}/{:,} functions'.format(num_kept, num_total),
- file=summary_file)
- print(
- 'Retained {:,}/{:,} samples, total'.format(num_samples_kept,
- num_samples_total),
- file=summary_file)
- print('Retained {:,}/{:,} top-level samples' \
- .format(num_top_samples_kept, num_top_samples_total),
- file=summary_file)
+ print(
+ "Retained {:,}/{:,} functions".format(num_kept, num_total),
+ file=summary_file,
+ )
+ print(
+ "Retained {:,}/{:,} samples, total".format(
+ num_samples_kept, num_samples_total
+ ),
+ file=summary_file,
+ )
+ print(
+ "Retained {:,}/{:,} top-level samples".format(
+ num_top_samples_kept, num_top_samples_total
+ ),
+ file=summary_file,
+ )
def run(profile_input_file, summary_output_file, profile_output_file):
- profile_records = _read_textual_afdo_profile(profile_input_file)
+ profile_records = _read_textual_afdo_profile(profile_input_file)
- # Sort this so we get deterministic output. AFDO doesn't care what order it's
- # in.
- deduped = sorted(
- dedup_records(profile_records, summary_output_file),
- key=lambda r: r.function_line)
- for function_line, samples in deduped:
- print(function_line, file=profile_output_file)
- print('\n'.join(samples), file=profile_output_file)
+ # Sort this so we get deterministic output. AFDO doesn't care what order it's
+ # in.
+ deduped = sorted(
+ dedup_records(profile_records, summary_output_file),
+ key=lambda r: r.function_line,
+ )
+ for function_line, samples in deduped:
+ print(function_line, file=profile_output_file)
+ print("\n".join(samples), file=profile_output_file)
def _main():
- run(profile_input_file=sys.stdin,
- summary_output_file=sys.stderr,
- profile_output_file=sys.stdout)
+ run(
+ profile_input_file=sys.stdin,
+ summary_output_file=sys.stderr,
+ profile_output_file=sys.stdout,
+ )
-if __name__ == '__main__':
- _main()
+if __name__ == "__main__":
+ _main()
diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py
index e243897..93c6551 100755
--- a/afdo_redaction/redact_profile_test.py
+++ b/afdo_redaction/redact_profile_test.py
@@ -1,136 +1,139 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for redact_profile.py."""
-from __future__ import division, print_function
import io
import unittest
from afdo_redaction import redact_profile
+
_redact_limit = redact_profile.dedup_records.__defaults__[0]
def _redact(input_lines, summary_to=None):
- if isinstance(input_lines, str):
- input_lines = input_lines.splitlines()
+ if isinstance(input_lines, str):
+ input_lines = input_lines.splitlines()
- if summary_to is None:
- summary_to = io.StringIO()
+ if summary_to is None:
+ summary_to = io.StringIO()
- output_to = io.StringIO()
- redact_profile.run(
- profile_input_file=input_lines,
- summary_output_file=summary_to,
- profile_output_file=output_to)
- return output_to.getvalue()
+ output_to = io.StringIO()
+ redact_profile.run(
+ profile_input_file=input_lines,
+ summary_output_file=summary_to,
+ profile_output_file=output_to,
+ )
+ return output_to.getvalue()
def _redact_with_summary(input_lines):
- summary = io.StringIO()
- result = _redact(input_lines, summary_to=summary)
- return result, summary.getvalue()
+ summary = io.StringIO()
+ result = _redact(input_lines, summary_to=summary)
+ return result, summary.getvalue()
-def _generate_repeated_function_body(repeats, fn_name='_some_name'):
- # Arbitrary function body ripped from a textual AFDO profile.
- function_header = fn_name + ':1234:185'
- function_body = [
- ' 6: 83',
- ' 15: 126',
- ' 62832: 126',
- ' 6: _ZNK5blink10PaintLayer14GroupedMappingEv:2349',
- ' 1: 206',
- ' 1: _ZNK5blink10PaintLayer14GroupedMappersEv:2060',
- ' 1: 206',
- ' 11: _ZNK5blink10PaintLayer25GetCompositedLayerMappingEv:800',
- ' 2.1: 80',
- ]
+def _generate_repeated_function_body(repeats, fn_name="_some_name"):
+ # Arbitrary function body ripped from a textual AFDO profile.
+ function_header = fn_name + ":1234:185"
+ function_body = [
+ " 6: 83",
+ " 15: 126",
+ " 62832: 126",
+ " 6: _ZNK5blink10PaintLayer14GroupedMappingEv:2349",
+ " 1: 206",
+ " 1: _ZNK5blink10PaintLayer14GroupedMappersEv:2060",
+ " 1: 206",
+ " 11: _ZNK5blink10PaintLayer25GetCompositedLayerMappingEv:800",
+ " 2.1: 80",
+ ]
- # Be sure to zfill this, so the functions are output in sorted order.
- num_width = len(str(repeats))
+ # Be sure to zfill this, so the functions are output in sorted order.
+ num_width = len(str(repeats))
- lines = []
- for i in range(repeats):
- num = str(i).zfill(num_width)
- lines.append(num + function_header)
- lines.extend(function_body)
- return lines
+ lines = []
+ for i in range(repeats):
+ num = str(i).zfill(num_width)
+ lines.append(num + function_header)
+ lines.extend(function_body)
+ return lines
class Tests(unittest.TestCase):
- """All of our tests for redact_profile."""
+ """All of our tests for redact_profile."""
- def test_no_input_works(self):
- self.assertEqual(_redact(''), '')
+ def test_no_input_works(self):
+ self.assertEqual(_redact(""), "")
- def test_single_function_works(self):
- lines = _generate_repeated_function_body(1)
- result_file = '\n'.join(lines) + '\n'
- self.assertEqual(_redact(lines), result_file)
+ def test_single_function_works(self):
+ lines = _generate_repeated_function_body(1)
+ result_file = "\n".join(lines) + "\n"
+ self.assertEqual(_redact(lines), result_file)
- def test_duplicate_of_single_function_works(self):
- lines = _generate_repeated_function_body(2)
- result_file = '\n'.join(lines) + '\n'
- self.assertEqual(_redact(lines), result_file)
+ def test_duplicate_of_single_function_works(self):
+ lines = _generate_repeated_function_body(2)
+ result_file = "\n".join(lines) + "\n"
+ self.assertEqual(_redact(lines), result_file)
- def test_not_too_many_duplicates_of_single_function_redacts_none(self):
- lines = _generate_repeated_function_body(_redact_limit - 1)
- result_file = '\n'.join(lines) + '\n'
- self.assertEqual(_redact(lines), result_file)
+ def test_not_too_many_duplicates_of_single_function_redacts_none(self):
+ lines = _generate_repeated_function_body(_redact_limit - 1)
+ result_file = "\n".join(lines) + "\n"
+ self.assertEqual(_redact(lines), result_file)
- def test_many_duplicates_of_single_function_redacts_them_all(self):
- lines = _generate_repeated_function_body(_redact_limit)
- self.assertEqual(_redact(lines), '')
+ def test_many_duplicates_of_single_function_redacts_them_all(self):
+ lines = _generate_repeated_function_body(_redact_limit)
+ self.assertEqual(_redact(lines), "")
- def test_many_duplicates_of_single_function_leaves_other_functions(self):
- kept_lines = _generate_repeated_function_body(1, fn_name='_keep_me')
- # Something to distinguish us from the rest. Just bump a random counter.
- kept_lines[1] += '1'
+ def test_many_duplicates_of_single_function_leaves_other_functions(self):
+ kept_lines = _generate_repeated_function_body(1, fn_name="_keep_me")
+ # Something to distinguish us from the rest. Just bump a random counter.
+ kept_lines[1] += "1"
- result_file = '\n'.join(kept_lines) + '\n'
+ result_file = "\n".join(kept_lines) + "\n"
- lines = _generate_repeated_function_body(
- _redact_limit, fn_name='_discard_me')
- self.assertEqual(_redact(kept_lines + lines), result_file)
- self.assertEqual(_redact(lines + kept_lines), result_file)
+ lines = _generate_repeated_function_body(
+ _redact_limit, fn_name="_discard_me"
+ )
+ self.assertEqual(_redact(kept_lines + lines), result_file)
+ self.assertEqual(_redact(lines + kept_lines), result_file)
- more_lines = _generate_repeated_function_body(
- _redact_limit, fn_name='_and_discard_me')
- self.assertEqual(_redact(lines + kept_lines + more_lines), result_file)
- self.assertEqual(_redact(lines + more_lines), '')
+ more_lines = _generate_repeated_function_body(
+ _redact_limit, fn_name="_and_discard_me"
+ )
+ self.assertEqual(_redact(lines + kept_lines + more_lines), result_file)
+ self.assertEqual(_redact(lines + more_lines), "")
- def test_correct_summary_is_printed_when_nothing_is_redacted(self):
- lines = _generate_repeated_function_body(1)
- _, summary = _redact_with_summary(lines)
- self.assertIn('Retained 1/1 functions', summary)
- self.assertIn('Retained 827/827 samples, total', summary)
- # Note that top-level samples == "samples without inlining taken into
- # account," not "sum(entry_counts)"
- self.assertIn('Retained 335/335 top-level samples', summary)
+ def test_correct_summary_is_printed_when_nothing_is_redacted(self):
+ lines = _generate_repeated_function_body(1)
+ _, summary = _redact_with_summary(lines)
+ self.assertIn("Retained 1/1 functions", summary)
+ self.assertIn("Retained 827/827 samples, total", summary)
+ # Note that top-level samples == "samples without inlining taken into
+ # account," not "sum(entry_counts)"
+ self.assertIn("Retained 335/335 top-level samples", summary)
- def test_correct_summary_is_printed_when_everything_is_redacted(self):
- lines = _generate_repeated_function_body(_redact_limit)
- _, summary = _redact_with_summary(lines)
- self.assertIn('Retained 0/100 functions', summary)
- self.assertIn('Retained 0/82,700 samples, total', summary)
- self.assertIn('Retained 0/33,500 top-level samples', summary)
+ def test_correct_summary_is_printed_when_everything_is_redacted(self):
+ lines = _generate_repeated_function_body(_redact_limit)
+ _, summary = _redact_with_summary(lines)
+ self.assertIn("Retained 0/100 functions", summary)
+ self.assertIn("Retained 0/82,700 samples, total", summary)
+ self.assertIn("Retained 0/33,500 top-level samples", summary)
- def test_correct_summary_is_printed_when_most_everything_is_redacted(self):
- kept_lines = _generate_repeated_function_body(1, fn_name='_keep_me')
- kept_lines[1] += '1'
+ def test_correct_summary_is_printed_when_most_everything_is_redacted(self):
+ kept_lines = _generate_repeated_function_body(1, fn_name="_keep_me")
+ kept_lines[1] += "1"
- lines = _generate_repeated_function_body(_redact_limit)
- _, summary = _redact_with_summary(kept_lines + lines)
- self.assertIn('Retained 1/101 functions', summary)
- self.assertIn('Retained 1,575/84,275 samples, total', summary)
- self.assertIn('Retained 1,083/34,583 top-level samples', summary)
+ lines = _generate_repeated_function_body(_redact_limit)
+ _, summary = _redact_with_summary(kept_lines + lines)
+ self.assertIn("Retained 1/101 functions", summary)
+ self.assertIn("Retained 1,575/84,275 samples, total", summary)
+ self.assertIn("Retained 1,083/34,583 top-level samples", summary)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/afdo_redaction/remove_cold_functions.py b/afdo_redaction/remove_cold_functions.py
index 097085d..c6043bc 100755
--- a/afdo_redaction/remove_cold_functions.py
+++ b/afdo_redaction/remove_cold_functions.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,7 +14,7 @@
target, so the result profile will always have less than or equal to the
given number of functions.
-The script is intended to be used on production Chrome OS profiles, after
+The script is intended to be used on production ChromeOS profiles, after
other redaction/trimming scripts. It can be used with given textual CWP
and benchmark profiles, in order to analyze how many removed functions are
from which profile (or both), which can be used an indicator of fairness
@@ -24,160 +24,191 @@
Chrome binary size. See crbug.com/1062014 for more context.
"""
-from __future__ import division, print_function
import argparse
import collections
import re
import sys
-_function_line_re = re.compile(r'^([\w\$\.@]+):(\d+)(?::\d+)?$')
+
+_function_line_re = re.compile(r"^([\w\$\.@]+):(\d+)(?::\d+)?$")
ProfileRecord = collections.namedtuple(
- 'ProfileRecord', ['function_count', 'function_body', 'function_name'])
+ "ProfileRecord", ["function_count", "function_body", "function_name"]
+)
def _read_sample_count(line):
- m = _function_line_re.match(line)
- assert m, 'Failed to interpret function line %s' % line
- return m.group(1), int(m.group(2))
+ m = _function_line_re.match(line)
+ assert m, "Failed to interpret function line %s" % line
+ return m.group(1), int(m.group(2))
def _read_textual_afdo_profile(stream):
- """Parses an AFDO profile from a line stream into ProfileRecords."""
- # ProfileRecords are actually nested, due to inlining. For the purpose of
- # this script, that doesn't matter.
- lines = (line.rstrip() for line in stream)
- function_line = None
- samples = []
- ret = []
- for line in lines:
- if not line:
- continue
+ """Parses an AFDO profile from a line stream into ProfileRecords."""
+ # ProfileRecords are actually nested, due to inlining. For the purpose of
+ # this script, that doesn't matter.
+ lines = (line.rstrip() for line in stream)
+ function_line = None
+ samples = []
+ ret = []
+ for line in lines:
+ if not line:
+ continue
- if line[0].isspace():
- assert function_line is not None, 'sample exists outside of a function?'
- samples.append(line)
- continue
+ if line[0].isspace():
+ assert (
+ function_line is not None
+ ), "sample exists outside of a function?"
+ samples.append(line)
+ continue
+
+ if function_line is not None:
+ name, count = _read_sample_count(function_line)
+ body = [function_line] + samples
+ ret.append(
+ ProfileRecord(
+ function_count=count, function_body=body, function_name=name
+ )
+ )
+ function_line = line
+ samples = []
if function_line is not None:
- name, count = _read_sample_count(function_line)
- body = [function_line] + samples
- ret.append(
- ProfileRecord(
- function_count=count, function_body=body, function_name=name))
- function_line = line
- samples = []
-
- if function_line is not None:
- name, count = _read_sample_count(function_line)
- body = [function_line] + samples
- ret.append(
- ProfileRecord(
- function_count=count, function_body=body, function_name=name))
- return ret
+ name, count = _read_sample_count(function_line)
+ body = [function_line] + samples
+ ret.append(
+ ProfileRecord(
+ function_count=count, function_body=body, function_name=name
+ )
+ )
+ return ret
def write_textual_afdo_profile(stream, records):
- for r in records:
- print('\n'.join(r.function_body), file=stream)
+ for r in records:
+ print("\n".join(r.function_body), file=stream)
def analyze_functions(records, cwp, benchmark):
- cwp_functions = {x.function_name for x in cwp}
- benchmark_functions = {x.function_name for x in benchmark}
- all_functions = {x.function_name for x in records}
- cwp_only_functions = len((all_functions & cwp_functions) -
- benchmark_functions)
- benchmark_only_functions = len((all_functions & benchmark_functions) -
- cwp_functions)
- common_functions = len(all_functions & benchmark_functions & cwp_functions)
- none_functions = len(all_functions - benchmark_functions - cwp_functions)
+ cwp_functions = {x.function_name for x in cwp}
+ benchmark_functions = {x.function_name for x in benchmark}
+ all_functions = {x.function_name for x in records}
+ cwp_only_functions = len(
+ (all_functions & cwp_functions) - benchmark_functions
+ )
+ benchmark_only_functions = len(
+ (all_functions & benchmark_functions) - cwp_functions
+ )
+ common_functions = len(all_functions & benchmark_functions & cwp_functions)
+ none_functions = len(all_functions - benchmark_functions - cwp_functions)
- assert not none_functions
- return cwp_only_functions, benchmark_only_functions, common_functions
+ assert not none_functions
+ return cwp_only_functions, benchmark_only_functions, common_functions
def run(input_stream, output_stream, goal, cwp=None, benchmark=None):
- records = _read_textual_afdo_profile(input_stream)
- num_functions = len(records)
- if not num_functions:
- return
- assert goal, "It's invalid to remove all functions in the profile"
+ records = _read_textual_afdo_profile(input_stream)
+ num_functions = len(records)
+ if not num_functions:
+ return
+ assert goal, "It's invalid to remove all functions in the profile"
- if cwp and benchmark:
- cwp_records = _read_textual_afdo_profile(cwp)
- benchmark_records = _read_textual_afdo_profile(benchmark)
- cwp_num, benchmark_num, common_num = analyze_functions(
- records, cwp_records, benchmark_records)
+ if cwp and benchmark:
+ cwp_records = _read_textual_afdo_profile(cwp)
+ benchmark_records = _read_textual_afdo_profile(benchmark)
+ cwp_num, benchmark_num, common_num = analyze_functions(
+ records, cwp_records, benchmark_records
+ )
- records.sort(key=lambda x: (-x.function_count, x.function_name))
- records = records[:goal]
+ records.sort(key=lambda x: (-x.function_count, x.function_name))
+ records = records[:goal]
- print(
- 'Retained %d/%d (%.1f%%) functions in the profile' %
- (len(records), num_functions, 100.0 * len(records) / num_functions),
- file=sys.stderr)
- write_textual_afdo_profile(output_stream, records)
-
- if cwp and benchmark:
- cwp_num_after, benchmark_num_after, common_num_after = analyze_functions(
- records, cwp_records, benchmark_records)
print(
- 'Retained %d/%d (%.1f%%) functions only appear in the CWP profile' %
- (cwp_num_after, cwp_num, 100.0 * cwp_num_after / cwp_num),
- file=sys.stderr)
- print(
- 'Retained %d/%d (%.1f%%) functions only appear in the benchmark profile'
- % (benchmark_num_after, benchmark_num,
- 100.0 * benchmark_num_after / benchmark_num),
- file=sys.stderr)
- print(
- 'Retained %d/%d (%.1f%%) functions appear in both CWP and benchmark'
- ' profiles' % (common_num_after, common_num,
- 100.0 * common_num_after / common_num),
- file=sys.stderr)
+ "Retained %d/%d (%.1f%%) functions in the profile"
+ % (len(records), num_functions, 100.0 * len(records) / num_functions),
+ file=sys.stderr,
+ )
+ write_textual_afdo_profile(output_stream, records)
+
+ if cwp and benchmark:
+ (
+ cwp_num_after,
+ benchmark_num_after,
+ common_num_after,
+ ) = analyze_functions(records, cwp_records, benchmark_records)
+ print(
+ "Retained %d/%d (%.1f%%) functions only appear in the CWP profile"
+ % (cwp_num_after, cwp_num, 100.0 * cwp_num_after / cwp_num),
+ file=sys.stderr,
+ )
+ print(
+ "Retained %d/%d (%.1f%%) functions only appear in the benchmark profile"
+ % (
+ benchmark_num_after,
+ benchmark_num,
+ 100.0 * benchmark_num_after / benchmark_num,
+ ),
+ file=sys.stderr,
+ )
+ print(
+ "Retained %d/%d (%.1f%%) functions appear in both CWP and benchmark"
+ " profiles"
+ % (
+ common_num_after,
+ common_num,
+ 100.0 * common_num_after / common_num,
+ ),
+ file=sys.stderr,
+ )
def main():
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- '--input',
- default='/dev/stdin',
- help='File to read from. Defaults to stdin.')
- parser.add_argument(
- '--output',
- default='/dev/stdout',
- help='File to write to. Defaults to stdout.')
- parser.add_argument(
- '--number',
- type=int,
- required=True,
- help='Number of functions to retain in the profile.')
- parser.add_argument(
- '--cwp', help='Textualized CWP profiles, used for further analysis')
- parser.add_argument(
- '--benchmark',
- help='Textualized benchmark profile, used for further analysis')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--input",
+ default="/dev/stdin",
+ help="File to read from. Defaults to stdin.",
+ )
+ parser.add_argument(
+ "--output",
+ default="/dev/stdout",
+ help="File to write to. Defaults to stdout.",
+ )
+ parser.add_argument(
+ "--number",
+ type=int,
+ required=True,
+ help="Number of functions to retain in the profile.",
+ )
+ parser.add_argument(
+ "--cwp", help="Textualized CWP profiles, used for further analysis"
+ )
+ parser.add_argument(
+ "--benchmark",
+ help="Textualized benchmark profile, used for further analysis",
+ )
+ args = parser.parse_args()
- if not args.number:
- parser.error("It's invalid to remove the number of functions to 0.")
+ if not args.number:
+ parser.error("It's invalid to remove the number of functions to 0.")
- if (args.cwp and not args.benchmark) or (not args.cwp and args.benchmark):
- parser.error('Please specify both --cwp and --benchmark')
+ if (args.cwp and not args.benchmark) or (not args.cwp and args.benchmark):
+ parser.error("Please specify both --cwp and --benchmark")
- with open(args.input) as stdin:
- with open(args.output, 'w') as stdout:
- # When user specify textualized cwp and benchmark profiles, perform
- # the analysis. Otherwise, just trim the cold functions from profile.
- if args.cwp and args.benchmark:
- with open(args.cwp) as cwp:
- with open(args.benchmark) as benchmark:
- run(stdin, stdout, args.number, cwp, benchmark)
- else:
- run(stdin, stdout, args.number)
+ with open(args.input) as stdin:
+ with open(args.output, "w") as stdout:
+ # When user specify textualized cwp and benchmark profiles, perform
+ # the analysis. Otherwise, just trim the cold functions from profile.
+ if args.cwp and args.benchmark:
+ with open(args.cwp) as cwp:
+ with open(args.benchmark) as benchmark:
+ run(stdin, stdout, args.number, cwp, benchmark)
+ else:
+ run(stdin, stdout, args.number)
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/afdo_redaction/remove_cold_functions_test.py b/afdo_redaction/remove_cold_functions_test.py
index 14f946b..89a87f8 100755
--- a/afdo_redaction/remove_cold_functions_test.py
+++ b/afdo_redaction/remove_cold_functions_test.py
@@ -1,28 +1,27 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for remove_cold_functions."""
-from __future__ import print_function
import io
-from unittest.mock import patch
import unittest
+from unittest.mock import patch
from afdo_redaction import remove_cold_functions
def _construct_profile(indices=None):
- real_world_profile_functions = [
- """SomeFunction1:24150:300
+ real_world_profile_functions = [
+ """SomeFunction1:24150:300
2: 75
3: 23850
39: 225
""",
- """SomeFunction2:8925:225
+ """SomeFunction2:8925:225
0: 225
0.2: 150
0.1: SomeFunction2:6300
@@ -30,7 +29,7 @@
0.2: SomeFunction2:150
3: 75
""",
- """SomeFunction3:7500:75
+ """SomeFunction3:7500:75
0: 75
0.2: 75
0.1: SomeFunction3:6600
@@ -38,7 +37,7 @@
0.2: SomeFunction3:75
1: 75
""",
- """LargerFunction4:51450:0
+ """LargerFunction4:51450:0
1: 0
3: 0
3.1: 7350
@@ -59,7 +58,7 @@
8: 0
9: 0
""",
- """SomeFakeFunction5:7500:75
+ """SomeFakeFunction5:7500:75
0: 75
0.2: 75
0.1: SomeFakeFunction5:6600
@@ -67,80 +66,87 @@
0.2: SomeFakeFunction5:75
1: 75
""",
- ]
+ ]
- ret = []
- if not indices:
- for x in real_world_profile_functions:
- ret += x.strip().splitlines()
+ ret = []
+ if not indices:
+ for x in real_world_profile_functions:
+ ret += x.strip().splitlines()
+ return ret
+
+ ret = []
+ for i in indices:
+ ret += real_world_profile_functions[i].strip().splitlines()
return ret
- ret = []
- for i in indices:
- ret += real_world_profile_functions[i].strip().splitlines()
- return ret
-
def _run_test(input_lines, goal, cwp_file=None, benchmark_file=None):
- input_buf = io.StringIO('\n'.join(input_lines))
- output_buf = io.StringIO()
- remove_cold_functions.run(input_buf, output_buf, goal, cwp_file,
- benchmark_file)
- return output_buf.getvalue().splitlines()
+ input_buf = io.StringIO("\n".join(input_lines))
+ output_buf = io.StringIO()
+ remove_cold_functions.run(
+ input_buf, output_buf, goal, cwp_file, benchmark_file
+ )
+ return output_buf.getvalue().splitlines()
class Test(unittest.TestCase):
- """Test functions in remove_cold_functions.py"""
+ """Test functions in remove_cold_functions.py"""
- def test_empty_profile(self):
- self.assertEqual(_run_test([], 0), [])
+ def test_empty_profile(self):
+ self.assertEqual(_run_test([], 0), [])
- def test_remove_all_functions_fail(self):
- input_profile_lines = _construct_profile()
- with self.assertRaises(Exception) as context:
- _run_test(input_profile_lines, 0)
- self.assertEqual(
- str(context.exception),
- "It's invalid to remove all functions in the profile")
+ def test_remove_all_functions_fail(self):
+ input_profile_lines = _construct_profile()
+ with self.assertRaises(Exception) as context:
+ _run_test(input_profile_lines, 0)
+ self.assertEqual(
+ str(context.exception),
+ "It's invalid to remove all functions in the profile",
+ )
- def test_remove_cold_functions_work(self):
- input_profile_lines = _construct_profile()
- # To make sure the cold functions are removed in order
- expected_profile_lines = {
- 5: input_profile_lines,
- # Entry 4 wins the tie breaker because the name is smaller
- # alphabetically.
- 4: _construct_profile([0, 1, 3, 4]),
- 3: _construct_profile([0, 1, 3]),
- 2: _construct_profile([0, 3]),
- 1: _construct_profile([3]),
- }
+ def test_remove_cold_functions_work(self):
+ input_profile_lines = _construct_profile()
+ # To make sure the cold functions are removed in order
+ expected_profile_lines = {
+ 5: input_profile_lines,
+ # Entry 4 wins the tie breaker because the name is smaller
+ # alphabetically.
+ 4: _construct_profile([0, 1, 3, 4]),
+ 3: _construct_profile([0, 1, 3]),
+ 2: _construct_profile([0, 3]),
+ 1: _construct_profile([3]),
+ }
- for num in expected_profile_lines:
- self.assertCountEqual(
- _run_test(input_profile_lines, num), expected_profile_lines[num])
+ for num in expected_profile_lines:
+ self.assertCountEqual(
+ _run_test(input_profile_lines, num), expected_profile_lines[num]
+ )
- def test_analyze_cwp_and_benchmark_work(self):
- input_profile_lines = _construct_profile()
- cwp_profile = _construct_profile([0, 1, 3, 4])
- benchmark_profile = _construct_profile([1, 2, 3, 4])
- cwp_buf = io.StringIO('\n'.join(cwp_profile))
- benchmark_buf = io.StringIO('\n'.join(benchmark_profile))
- with patch('sys.stderr', new=io.StringIO()) as fake_output:
- _run_test(input_profile_lines, 3, cwp_buf, benchmark_buf)
+ def test_analyze_cwp_and_benchmark_work(self):
+ input_profile_lines = _construct_profile()
+ cwp_profile = _construct_profile([0, 1, 3, 4])
+ benchmark_profile = _construct_profile([1, 2, 3, 4])
+ cwp_buf = io.StringIO("\n".join(cwp_profile))
+ benchmark_buf = io.StringIO("\n".join(benchmark_profile))
+ with patch("sys.stderr", new=io.StringIO()) as fake_output:
+ _run_test(input_profile_lines, 3, cwp_buf, benchmark_buf)
- output = fake_output.getvalue()
- self.assertIn('Retained 3/5 (60.0%) functions in the profile', output)
- self.assertIn(
- 'Retained 1/1 (100.0%) functions only appear in the CWP profile',
- output)
- self.assertIn(
- 'Retained 0/1 (0.0%) functions only appear in the benchmark profile',
- output)
- self.assertIn(
- 'Retained 2/3 (66.7%) functions appear in both CWP and benchmark'
- ' profiles', output)
+ output = fake_output.getvalue()
+ self.assertIn("Retained 3/5 (60.0%) functions in the profile", output)
+ self.assertIn(
+ "Retained 1/1 (100.0%) functions only appear in the CWP profile",
+ output,
+ )
+ self.assertIn(
+ "Retained 0/1 (0.0%) functions only appear in the benchmark profile",
+ output,
+ )
+ self.assertIn(
+ "Retained 2/3 (66.7%) functions appear in both CWP and benchmark"
+ " profiles",
+ output,
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py
index 0dc1507..32dab3f 100755
--- a/afdo_redaction/remove_indirect_calls.py
+++ b/afdo_redaction/remove_indirect_calls.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -17,21 +17,20 @@
size (or worse). See crbug.com/1005023 and crbug.com/916130.
"""
-from __future__ import division, print_function
import argparse
import re
def _remove_indirect_call_targets(lines):
- # Lines with indirect call targets look like:
- # 1.1: 1234 foo:111 bar:122
- #
- # Where 1.1 is the line info/discriminator, 1234 is the total number of
- # samples seen for that line/discriminator, foo:111 is "111 of the calls here
- # went to foo," and bar:122 is "122 of the calls here went to bar."
- call_target_re = re.compile(
- r"""
+ # Lines with indirect call targets look like:
+ # 1.1: 1234 foo:111 bar:122
+ #
+ # Where 1.1 is the line info/discriminator, 1234 is the total number of
+ # samples seen for that line/discriminator, foo:111 is "111 of the calls here
+ # went to foo," and bar:122 is "122 of the calls here went to bar."
+ call_target_re = re.compile(
+ r"""
^\s+ # Top-level lines are function records.
\d+(?:\.\d+)?: # Line info/discriminator
\s+
@@ -39,42 +38,48 @@
\s+
((?:[^\s:]+:\d+\s*)+) # Indirect call target(s)
$
- """, re.VERBOSE)
- for line in lines:
- line = line.rstrip()
+ """,
+ re.VERBOSE,
+ )
+ for line in lines:
+ line = line.rstrip()
- match = call_target_re.match(line)
- if not match:
- yield line + '\n'
- continue
+ match = call_target_re.match(line)
+ if not match:
+ yield line + "\n"
+ continue
- group_start, group_end = match.span(1)
- assert group_end == len(line)
- yield line[:group_start].rstrip() + '\n'
+ group_start, group_end = match.span(1)
+ assert group_end == len(line)
+ yield line[:group_start].rstrip() + "\n"
def run(input_stream, output_stream):
- for line in _remove_indirect_call_targets(input_stream):
- output_stream.write(line)
+ for line in _remove_indirect_call_targets(input_stream):
+ output_stream.write(line)
def main():
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- '--input',
- default='/dev/stdin',
- help='File to read from. Defaults to stdin.')
- parser.add_argument(
- '--output',
- default='/dev/stdout',
- help='File to write to. Defaults to stdout.')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--input",
+ default="/dev/stdin",
+ help="File to read from. Defaults to stdin.",
+ )
+ parser.add_argument(
+ "--output",
+ default="/dev/stdout",
+ help="File to write to. Defaults to stdout.",
+ )
+ args = parser.parse_args()
- with open(args.input) as stdin:
- with open(args.output, 'w') as stdout:
- run(stdin, stdout)
+ with open(args.input) as stdin:
+ with open(args.output, "w") as stdout:
+ run(stdin, stdout)
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py
index 164b284..640b747 100755
--- a/afdo_redaction/remove_indirect_calls_test.py
+++ b/afdo_redaction/remove_indirect_calls_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for remove_indirect_calls"""
-from __future__ import print_function
import io
import unittest
@@ -15,22 +14,22 @@
def _run_test(input_lines):
- input_buf = io.StringIO('\n'.join(input_lines))
- output_buf = io.StringIO()
- remove_indirect_calls.run(input_buf, output_buf)
- return output_buf.getvalue().splitlines()
+ input_buf = io.StringIO("\n".join(input_lines))
+ output_buf = io.StringIO()
+ remove_indirect_calls.run(input_buf, output_buf)
+ return output_buf.getvalue().splitlines()
class Test(unittest.TestCase):
- """Tests"""
+ """Tests"""
- def test_empty_profile(self):
- self.assertEqual(_run_test([]), [])
+ def test_empty_profile(self):
+ self.assertEqual(_run_test([]), [])
- def test_removal_on_real_world_code(self):
- # These are copied from an actual textual AFDO profile, but the names made
- # lints unhappy due to their length, so I had to be creative.
- profile_lines = """_ZLongSymbolName:52862:1766
+ def test_removal_on_real_world_code(self):
+ # These are copied from an actual textual AFDO profile, but the names made
+ # lints unhappy due to their length, so I had to be creative.
+ profile_lines = """_ZLongSymbolName:52862:1766
14: 2483
8.1: _SomeInlinedSym:45413
11: _AndAnother:35481
@@ -45,7 +44,7 @@
0: 2483
""".strip().splitlines()
- expected_lines = """_ZLongSymbolName:52862:1766
+ expected_lines = """_ZLongSymbolName:52862:1766
14: 2483
8.1: _SomeInlinedSym:45413
11: _AndAnother:35481
@@ -60,8 +59,8 @@
0: 2483
""".strip().splitlines()
- self.assertEqual(_run_test(profile_lines), expected_lines)
+ self.assertEqual(_run_test(profile_lines), expected_lines)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py
index ce8afd6..c9ca921 100755
--- a/afdo_tools/bisection/afdo_prof_analysis.py
+++ b/afdo_tools/bisection/afdo_prof_analysis.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -20,7 +20,6 @@
- >127: quit immediately
"""
-from __future__ import division, print_function
import argparse
import json
@@ -35,11 +34,12 @@
class StatusEnum(IntEnum):
- """Enum of valid statuses returned by profile decider."""
- GOOD_STATUS = 0
- BAD_STATUS = 1
- SKIP_STATUS = 125
- PROBLEM_STATUS = 127
+ """Enum of valid statuses returned by profile decider."""
+
+ GOOD_STATUS = 0
+ BAD_STATUS = 1
+ SKIP_STATUS = 125
+ PROBLEM_STATUS = 127
statuses = StatusEnum.__members__.values()
@@ -48,396 +48,442 @@
def json_to_text(json_prof):
- text_profile = []
- for func in json_prof:
- text_profile.append(func)
- text_profile.append(json_prof[func])
- return ''.join(text_profile)
+ text_profile = []
+ for func in json_prof:
+ text_profile.append(func)
+ text_profile.append(json_prof[func])
+ return "".join(text_profile)
def text_to_json(f):
- """Performs basic parsing of an AFDO text-based profile.
+ """Performs basic parsing of an AFDO text-based profile.
- This parsing expects an input file object with contents of the form generated
- by bin/llvm-profdata (within an LLVM build).
- """
- results = {}
- curr_func = None
- curr_data = []
- for line in f:
- if not line.startswith(' '):
- if curr_func:
- results[curr_func] = ''.join(curr_data)
- curr_data = []
- curr_func, rest = line.split(':', 1)
- curr_func = curr_func.strip()
- curr_data.append(':' + rest)
- else:
- curr_data.append(line)
+ This parsing expects an input file object with contents of the form generated
+ by bin/llvm-profdata (within an LLVM build).
+ """
+ results = {}
+ curr_func = None
+ curr_data = []
+ for line in f:
+ if not line.startswith(" "):
+ if curr_func:
+ results[curr_func] = "".join(curr_data)
+ curr_data = []
+ curr_func, rest = line.split(":", 1)
+ curr_func = curr_func.strip()
+ curr_data.append(":" + rest)
+ else:
+ curr_data.append(line)
- if curr_func:
- results[curr_func] = ''.join(curr_data)
- return results
+ if curr_func:
+ results[curr_func] = "".join(curr_data)
+ return results
def prof_to_tmp(prof):
- """Creates (and returns) temp filename for given JSON-based AFDO profile."""
- fd, temp_path = mkstemp()
- text_profile = json_to_text(prof)
- with open(temp_path, 'w') as f:
- f.write(text_profile)
- os.close(fd)
- return temp_path
+ """Creates (and returns) temp filename for given JSON-based AFDO profile."""
+ fd, temp_path = mkstemp()
+ text_profile = json_to_text(prof)
+ with open(temp_path, "w") as f:
+ f.write(text_profile)
+ os.close(fd)
+ return temp_path
class DeciderState(object):
- """Class for the external decider."""
+ """Class for the external decider."""
- def __init__(self, state_file, external_decider, seed):
- self.accumulated_results = [] # over this run of the script
- self.external_decider = external_decider
- self.saved_results = [] # imported from a previous run of this script
- self.state_file = state_file
- self.seed = seed if seed is not None else time.time()
+ def __init__(self, state_file, external_decider, seed):
+ self.accumulated_results = [] # over this run of the script
+ self.external_decider = external_decider
+ self.saved_results = [] # imported from a previous run of this script
+ self.state_file = state_file
+ self.seed = seed if seed is not None else time.time()
- def load_state(self):
- if not os.path.exists(self.state_file):
- logging.info('State file %s is empty, starting from beginning',
- self.state_file)
- return
+ def load_state(self):
+ if not os.path.exists(self.state_file):
+ logging.info(
+ "State file %s is empty, starting from beginning",
+ self.state_file,
+ )
+ return
- with open(self.state_file, encoding='utf-8') as f:
- try:
- data = json.load(f)
- except:
- raise ValueError('Provided state file %s to resume from does not'
- ' contain a valid JSON.' % self.state_file)
+ with open(self.state_file, encoding="utf-8") as f:
+ try:
+ data = json.load(f)
+ except:
+ raise ValueError(
+ "Provided state file %s to resume from does not"
+ " contain a valid JSON." % self.state_file
+ )
- if 'seed' not in data or 'accumulated_results' not in data:
- raise ValueError('Provided state file %s to resume from does not contain'
- ' the correct information' % self.state_file)
+ if "seed" not in data or "accumulated_results" not in data:
+ raise ValueError(
+ "Provided state file %s to resume from does not contain"
+ " the correct information" % self.state_file
+ )
- self.seed = data['seed']
- self.saved_results = data['accumulated_results']
- logging.info('Restored state from %s...', self.state_file)
+ self.seed = data["seed"]
+ self.saved_results = data["accumulated_results"]
+ logging.info("Restored state from %s...", self.state_file)
- def save_state(self):
- state = {'seed': self.seed, 'accumulated_results': self.accumulated_results}
- tmp_file = self.state_file + '.new'
- with open(tmp_file, 'w', encoding='utf-8') as f:
- json.dump(state, f, indent=2)
- os.rename(tmp_file, self.state_file)
- logging.info('Logged state to %s...', self.state_file)
+ def save_state(self):
+ state = {
+ "seed": self.seed,
+ "accumulated_results": self.accumulated_results,
+ }
+ tmp_file = self.state_file + ".new"
+ with open(tmp_file, "w", encoding="utf-8") as f:
+ json.dump(state, f, indent=2)
+ os.rename(tmp_file, self.state_file)
+ logging.info("Logged state to %s...", self.state_file)
- def run(self, prof, save_run=True):
- """Run the external deciding script on the given profile."""
- if self.saved_results and save_run:
- result = self.saved_results.pop(0)
- self.accumulated_results.append(result)
- self.save_state()
- return StatusEnum(result)
+ def run(self, prof, save_run=True):
+ """Run the external deciding script on the given profile."""
+ if self.saved_results and save_run:
+ result = self.saved_results.pop(0)
+ self.accumulated_results.append(result)
+ self.save_state()
+ return StatusEnum(result)
- filename = prof_to_tmp(prof)
+ filename = prof_to_tmp(prof)
- try:
- return_code = subprocess.call([self.external_decider, filename])
- finally:
- os.remove(filename)
+ try:
+ return_code = subprocess.call([self.external_decider, filename])
+ finally:
+ os.remove(filename)
- if return_code in statuses:
- status = StatusEnum(return_code)
- if status == StatusEnum.PROBLEM_STATUS:
- prof_file = prof_to_tmp(prof)
- raise RuntimeError('Provided decider script returned PROBLEM_STATUS '
- 'when run on profile stored at %s. AFDO Profile '
- 'analysis aborting' % prof_file)
- if save_run:
- self.accumulated_results.append(status.value)
- logging.info('Run %d of external script %s returned %s',
- len(self.accumulated_results), self.external_decider,
- status.name)
- self.save_state()
- return status
- raise ValueError(
- 'Provided external script had unexpected return code %d' % return_code)
+ if return_code in statuses:
+ status = StatusEnum(return_code)
+ if status == StatusEnum.PROBLEM_STATUS:
+ prof_file = prof_to_tmp(prof)
+ raise RuntimeError(
+ "Provided decider script returned PROBLEM_STATUS "
+ "when run on profile stored at %s. AFDO Profile "
+ "analysis aborting" % prof_file
+ )
+ if save_run:
+ self.accumulated_results.append(status.value)
+ logging.info(
+ "Run %d of external script %s returned %s",
+ len(self.accumulated_results),
+ self.external_decider,
+ status.name,
+ )
+ self.save_state()
+ return status
+ raise ValueError(
+ "Provided external script had unexpected return code %d"
+ % return_code
+ )
def bisect_profiles(decider, good, bad, common_funcs, lo, hi):
- """Recursive function which bisects good and bad profiles.
+ """Recursive function which bisects good and bad profiles.
- Args:
- decider: function which, given a JSON-based AFDO profile, returns an
- element of 'statuses' based on the status of the profile
- good: JSON-based good AFDO profile
- bad: JSON-based bad AFDO profile
- common_funcs: the list of functions which have top-level profiles in both
- 'good' and 'bad'
- lo: lower bound of range being bisected on
- hi: upper bound of range being bisected on
+ Args:
+ decider: function which, given a JSON-based AFDO profile, returns an
+ element of 'statuses' based on the status of the profile
+ good: JSON-based good AFDO profile
+ bad: JSON-based bad AFDO profile
+ common_funcs: the list of functions which have top-level profiles in both
+ 'good' and 'bad'
+ lo: lower bound of range being bisected on
+ hi: upper bound of range being bisected on
- Returns a dictionary with two keys: 'individuals' and 'ranges'.
- 'individuals': a list of individual functions found to make the profile BAD
- 'ranges': a list of lists of function names. Each list of functions is a list
- such that including all of those from the bad profile makes the good
- profile BAD. It may not be the smallest problematic combination, but
- definitely contains a problematic combination of profiles.
- """
+ Returns a dictionary with two keys: 'individuals' and 'ranges'.
+ 'individuals': a list of individual functions found to make the profile BAD
+ 'ranges': a list of lists of function names. Each list of functions is a list
+ such that including all of those from the bad profile makes the good
+ profile BAD. It may not be the smallest problematic combination, but
+ definitely contains a problematic combination of profiles.
+ """
- results = {'individuals': [], 'ranges': []}
- if hi - lo <= 1:
- logging.info('Found %s as a problematic function profile', common_funcs[lo])
- results['individuals'].append(common_funcs[lo])
+ results = {"individuals": [], "ranges": []}
+ if hi - lo <= 1:
+ logging.info(
+ "Found %s as a problematic function profile", common_funcs[lo]
+ )
+ results["individuals"].append(common_funcs[lo])
+ return results
+
+ mid = (lo + hi) // 2
+ lo_mid_prof = good.copy() # covers bad from lo:mid
+ mid_hi_prof = good.copy() # covers bad from mid:hi
+ for func in common_funcs[lo:mid]:
+ lo_mid_prof[func] = bad[func]
+ for func in common_funcs[mid:hi]:
+ mid_hi_prof[func] = bad[func]
+
+ lo_mid_verdict = decider.run(lo_mid_prof)
+ mid_hi_verdict = decider.run(mid_hi_prof)
+
+ if lo_mid_verdict == StatusEnum.BAD_STATUS:
+ result = bisect_profiles(decider, good, bad, common_funcs, lo, mid)
+ results["individuals"].extend(result["individuals"])
+ results["ranges"].extend(result["ranges"])
+ if mid_hi_verdict == StatusEnum.BAD_STATUS:
+ result = bisect_profiles(decider, good, bad, common_funcs, mid, hi)
+ results["individuals"].extend(result["individuals"])
+ results["ranges"].extend(result["ranges"])
+
+ # neither half is bad -> the issue is caused by several things occuring
+ # in conjunction, and this combination crosses 'mid'
+ if lo_mid_verdict == mid_hi_verdict == StatusEnum.GOOD_STATUS:
+ problem_range = range_search(decider, good, bad, common_funcs, lo, hi)
+ if problem_range:
+ logging.info(
+ "Found %s as a problematic combination of profiles",
+ str(problem_range),
+ )
+ results["ranges"].append(problem_range)
+
return results
- mid = (lo + hi) // 2
- lo_mid_prof = good.copy() # covers bad from lo:mid
- mid_hi_prof = good.copy() # covers bad from mid:hi
- for func in common_funcs[lo:mid]:
- lo_mid_prof[func] = bad[func]
- for func in common_funcs[mid:hi]:
- mid_hi_prof[func] = bad[func]
-
- lo_mid_verdict = decider.run(lo_mid_prof)
- mid_hi_verdict = decider.run(mid_hi_prof)
-
- if lo_mid_verdict == StatusEnum.BAD_STATUS:
- result = bisect_profiles(decider, good, bad, common_funcs, lo, mid)
- results['individuals'].extend(result['individuals'])
- results['ranges'].extend(result['ranges'])
- if mid_hi_verdict == StatusEnum.BAD_STATUS:
- result = bisect_profiles(decider, good, bad, common_funcs, mid, hi)
- results['individuals'].extend(result['individuals'])
- results['ranges'].extend(result['ranges'])
-
- # neither half is bad -> the issue is caused by several things occuring
- # in conjunction, and this combination crosses 'mid'
- if lo_mid_verdict == mid_hi_verdict == StatusEnum.GOOD_STATUS:
- problem_range = range_search(decider, good, bad, common_funcs, lo, hi)
- if problem_range:
- logging.info('Found %s as a problematic combination of profiles',
- str(problem_range))
- results['ranges'].append(problem_range)
-
- return results
-
def bisect_profiles_wrapper(decider, good, bad, perform_check=True):
- """Wrapper for recursive profile bisection."""
+ """Wrapper for recursive profile bisection."""
- # Validate good and bad profiles are such, otherwise bisection reports noise
- # Note that while decider is a random mock, these assertions may fail.
- if perform_check:
- if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS:
- raise ValueError('Supplied good profile is not actually GOOD')
- if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS:
- raise ValueError('Supplied bad profile is not actually BAD')
+ # Validate good and bad profiles are such, otherwise bisection reports noise
+ # Note that while decider is a random mock, these assertions may fail.
+ if perform_check:
+ if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS:
+ raise ValueError("Supplied good profile is not actually GOOD")
+ if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS:
+ raise ValueError("Supplied bad profile is not actually BAD")
- common_funcs = sorted(func for func in good if func in bad)
- if not common_funcs:
- return {'ranges': [], 'individuals': []}
+ common_funcs = sorted(func for func in good if func in bad)
+ if not common_funcs:
+ return {"ranges": [], "individuals": []}
- # shuffle because the results of our analysis can be quite order-dependent
- # but this list has no inherent ordering. By shuffling each time, the chances
- # of finding new, potentially interesting results are increased each time
- # the program is run
- random.shuffle(common_funcs)
- results = bisect_profiles(decider, good, bad, common_funcs, 0,
- len(common_funcs))
- results['ranges'].sort()
- results['individuals'].sort()
- return results
+ # shuffle because the results of our analysis can be quite order-dependent
+ # but this list has no inherent ordering. By shuffling each time, the chances
+ # of finding new, potentially interesting results are increased each time
+ # the program is run
+ random.shuffle(common_funcs)
+ results = bisect_profiles(
+ decider, good, bad, common_funcs, 0, len(common_funcs)
+ )
+ results["ranges"].sort()
+ results["individuals"].sort()
+ return results
def range_search(decider, good, bad, common_funcs, lo, hi):
- """Searches for problematic range crossing mid border.
+ """Searches for problematic range crossing mid border.
- The main inner algorithm is the following, which looks for the smallest
- possible ranges with problematic combinations. It starts the upper bound at
- the midpoint, and increments in halves until it gets a BAD profile.
- Then, it increments the lower bound (in halves) until the resultant profile
- is GOOD, and then we have a range that causes 'BAD'ness.
+ The main inner algorithm is the following, which looks for the smallest
+ possible ranges with problematic combinations. It starts the upper bound at
+ the midpoint, and increments in halves until it gets a BAD profile.
+ Then, it increments the lower bound (in halves) until the resultant profile
+ is GOOD, and then we have a range that causes 'BAD'ness.
- It does this _NUM_RUNS_RANGE_SEARCH times, and shuffles the functions being
- looked at uniquely each time to try and get the smallest possible range
- of functions in a reasonable timeframe.
- """
+ It does this _NUM_RUNS_RANGE_SEARCH times, and shuffles the functions being
+ looked at uniquely each time to try and get the smallest possible range
+ of functions in a reasonable timeframe.
+ """
- average = lambda x, y: int(round((x + y) // 2.0))
+ average = lambda x, y: int(round((x + y) // 2.0))
- def find_upper_border(good_copy, funcs, lo, hi, last_bad_val=None):
- """Finds the upper border of problematic range."""
- mid = average(lo, hi)
- if mid in (lo, hi):
- return last_bad_val or hi
+ def find_upper_border(good_copy, funcs, lo, hi, last_bad_val=None):
+ """Finds the upper border of problematic range."""
+ mid = average(lo, hi)
+ if mid in (lo, hi):
+ return last_bad_val or hi
- for func in funcs[lo:mid]:
- good_copy[func] = bad[func]
- verdict = decider.run(good_copy)
+ for func in funcs[lo:mid]:
+ good_copy[func] = bad[func]
+ verdict = decider.run(good_copy)
- # reset for next iteration
- for func in funcs:
- good_copy[func] = good[func]
+ # reset for next iteration
+ for func in funcs:
+ good_copy[func] = good[func]
- if verdict == StatusEnum.BAD_STATUS:
- return find_upper_border(good_copy, funcs, lo, mid, mid)
- return find_upper_border(good_copy, funcs, mid, hi, last_bad_val)
+ if verdict == StatusEnum.BAD_STATUS:
+ return find_upper_border(good_copy, funcs, lo, mid, mid)
+ return find_upper_border(good_copy, funcs, mid, hi, last_bad_val)
- def find_lower_border(good_copy, funcs, lo, hi, last_bad_val=None):
- """Finds the lower border of problematic range."""
- mid = average(lo, hi)
- if mid in (lo, hi):
- return last_bad_val or lo
+ def find_lower_border(good_copy, funcs, lo, hi, last_bad_val=None):
+ """Finds the lower border of problematic range."""
+ mid = average(lo, hi)
+ if mid in (lo, hi):
+ return last_bad_val or lo
- for func in funcs[lo:mid]:
- good_copy[func] = good[func]
- verdict = decider.run(good_copy)
+ for func in funcs[lo:mid]:
+ good_copy[func] = good[func]
+ verdict = decider.run(good_copy)
- # reset for next iteration
- for func in funcs:
- good_copy[func] = bad[func]
+ # reset for next iteration
+ for func in funcs:
+ good_copy[func] = bad[func]
- if verdict == StatusEnum.BAD_STATUS:
- return find_lower_border(good_copy, funcs, mid, hi, lo)
- return find_lower_border(good_copy, funcs, lo, mid, last_bad_val)
+ if verdict == StatusEnum.BAD_STATUS:
+ return find_lower_border(good_copy, funcs, mid, hi, lo)
+ return find_lower_border(good_copy, funcs, lo, mid, last_bad_val)
- lo_mid_funcs = []
- mid_hi_funcs = []
- min_range_funcs = []
- for _ in range(_NUM_RUNS_RANGE_SEARCH):
+ lo_mid_funcs = []
+ mid_hi_funcs = []
+ min_range_funcs = []
+ for _ in range(_NUM_RUNS_RANGE_SEARCH):
- if min_range_funcs: # only examine range we've already narrowed to
- random.shuffle(lo_mid_funcs)
- random.shuffle(mid_hi_funcs)
- else: # consider lo-mid and mid-hi separately bc must cross border
- mid = (lo + hi) // 2
- lo_mid_funcs = common_funcs[lo:mid]
- mid_hi_funcs = common_funcs[mid:hi]
+ if min_range_funcs: # only examine range we've already narrowed to
+ random.shuffle(lo_mid_funcs)
+ random.shuffle(mid_hi_funcs)
+ else: # consider lo-mid and mid-hi separately bc must cross border
+ mid = (lo + hi) // 2
+ lo_mid_funcs = common_funcs[lo:mid]
+ mid_hi_funcs = common_funcs[mid:hi]
- funcs = lo_mid_funcs + mid_hi_funcs
- hi = len(funcs)
- mid = len(lo_mid_funcs)
- lo = 0
+ funcs = lo_mid_funcs + mid_hi_funcs
+ hi = len(funcs)
+ mid = len(lo_mid_funcs)
+ lo = 0
- # because we need the problematic pair to pop up before we can narrow it
- prof = good.copy()
- for func in lo_mid_funcs:
- prof[func] = bad[func]
+ # because we need the problematic pair to pop up before we can narrow it
+ prof = good.copy()
+ for func in lo_mid_funcs:
+ prof[func] = bad[func]
- upper_border = find_upper_border(prof, funcs, mid, hi)
- for func in lo_mid_funcs + funcs[mid:upper_border]:
- prof[func] = bad[func]
+ upper_border = find_upper_border(prof, funcs, mid, hi)
+ for func in lo_mid_funcs + funcs[mid:upper_border]:
+ prof[func] = bad[func]
- lower_border = find_lower_border(prof, funcs, lo, mid)
- curr_range_funcs = funcs[lower_border:upper_border]
+ lower_border = find_lower_border(prof, funcs, lo, mid)
+ curr_range_funcs = funcs[lower_border:upper_border]
- if not min_range_funcs or len(curr_range_funcs) < len(min_range_funcs):
- min_range_funcs = curr_range_funcs
- lo_mid_funcs = lo_mid_funcs[lo_mid_funcs.index(min_range_funcs[0]):]
- mid_hi_funcs = mid_hi_funcs[:mid_hi_funcs.index(min_range_funcs[-1]) + 1]
- if len(min_range_funcs) == 2:
- min_range_funcs.sort()
- return min_range_funcs # can't get any smaller
+ if not min_range_funcs or len(curr_range_funcs) < len(min_range_funcs):
+ min_range_funcs = curr_range_funcs
+ lo_mid_funcs = lo_mid_funcs[
+ lo_mid_funcs.index(min_range_funcs[0]) :
+ ]
+ mid_hi_funcs = mid_hi_funcs[
+ : mid_hi_funcs.index(min_range_funcs[-1]) + 1
+ ]
+ if len(min_range_funcs) == 2:
+ min_range_funcs.sort()
+ return min_range_funcs # can't get any smaller
- min_range_funcs.sort()
- return min_range_funcs
+ min_range_funcs.sort()
+ return min_range_funcs
def check_good_not_bad(decider, good, bad):
- """Check if bad prof becomes GOOD by adding funcs it lacks from good prof"""
- bad_copy = bad.copy()
- for func in good:
- if func not in bad:
- bad_copy[func] = good[func]
- return decider.run(bad_copy) == StatusEnum.GOOD_STATUS
+ """Check if bad prof becomes GOOD by adding funcs it lacks from good prof"""
+ bad_copy = bad.copy()
+ for func in good:
+ if func not in bad:
+ bad_copy[func] = good[func]
+ return decider.run(bad_copy) == StatusEnum.GOOD_STATUS
def check_bad_not_good(decider, good, bad):
- """Check if good prof BAD after adding funcs bad prof has that good doesnt"""
- good_copy = good.copy()
- for func in bad:
- if func not in good:
- good_copy[func] = bad[func]
- return decider.run(good_copy) == StatusEnum.BAD_STATUS
+ """Check if good prof BAD after adding funcs bad prof has that good doesnt"""
+ good_copy = good.copy()
+ for func in bad:
+ if func not in good:
+ good_copy[func] = bad[func]
+ return decider.run(good_copy) == StatusEnum.BAD_STATUS
def parse_args():
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- '--good_prof',
- required=True,
- help='Text-based "Good" profile for analysis')
- parser.add_argument(
- '--bad_prof', required=True, help='Text-based "Bad" profile for analysis')
- parser.add_argument(
- '--external_decider',
- required=True,
- help='External script that, given an AFDO profile, returns '
- 'GOOD/BAD/SKIP')
- parser.add_argument(
- '--analysis_output_file',
- required=True,
- help='File to output JSON results to')
- parser.add_argument(
- '--state_file',
- default='%s/afdo_analysis_state.json' % os.getcwd(),
- help='File path containing state to load from initially, and will be '
- 'overwritten with new state on each iteration')
- parser.add_argument(
- '--no_resume',
- action='store_true',
- help='If enabled, no initial state will be loaded and the program will '
- 'run from the beginning')
- parser.add_argument(
- '--remove_state_on_completion',
- action='store_true',
- help='If enabled, state file will be removed once profile analysis is '
- 'completed')
- parser.add_argument(
- '--seed', type=float, help='Float specifying seed for randomness')
- return parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--good_prof",
+ required=True,
+ help='Text-based "Good" profile for analysis',
+ )
+ parser.add_argument(
+ "--bad_prof",
+ required=True,
+ help='Text-based "Bad" profile for analysis',
+ )
+ parser.add_argument(
+ "--external_decider",
+ required=True,
+ help="External script that, given an AFDO profile, returns "
+ "GOOD/BAD/SKIP",
+ )
+ parser.add_argument(
+ "--analysis_output_file",
+ required=True,
+ help="File to output JSON results to",
+ )
+ parser.add_argument(
+ "--state_file",
+ default="%s/afdo_analysis_state.json" % os.getcwd(),
+ help="File path containing state to load from initially, and will be "
+ "overwritten with new state on each iteration",
+ )
+ parser.add_argument(
+ "--no_resume",
+ action="store_true",
+ help="If enabled, no initial state will be loaded and the program will "
+ "run from the beginning",
+ )
+ parser.add_argument(
+ "--remove_state_on_completion",
+ action="store_true",
+ help="If enabled, state file will be removed once profile analysis is "
+ "completed",
+ )
+ parser.add_argument(
+ "--seed", type=float, help="Float specifying seed for randomness"
+ )
+ return parser.parse_args()
def main(flags):
- logging.getLogger().setLevel(logging.INFO)
- if not flags.no_resume and flags.seed: # conflicting seeds
- raise RuntimeError('Ambiguous seed value; do not resume from existing '
- 'state and also specify seed by command line flag')
+ logging.getLogger().setLevel(logging.INFO)
+ if not flags.no_resume and flags.seed: # conflicting seeds
+ raise RuntimeError(
+ "Ambiguous seed value; do not resume from existing "
+ "state and also specify seed by command line flag"
+ )
- decider = DeciderState(
- flags.state_file, flags.external_decider, seed=flags.seed)
- if not flags.no_resume:
- decider.load_state()
- random.seed(decider.seed)
+ decider = DeciderState(
+ flags.state_file, flags.external_decider, seed=flags.seed
+ )
+ if not flags.no_resume:
+ decider.load_state()
+ random.seed(decider.seed)
- with open(flags.good_prof) as good_f:
- good_items = text_to_json(good_f)
- with open(flags.bad_prof) as bad_f:
- bad_items = text_to_json(bad_f)
+ with open(flags.good_prof) as good_f:
+ good_items = text_to_json(good_f)
+ with open(flags.bad_prof) as bad_f:
+ bad_items = text_to_json(bad_f)
- bisect_results = bisect_profiles_wrapper(decider, good_items, bad_items)
- gnb_result = check_good_not_bad(decider, good_items, bad_items)
- bng_result = check_bad_not_good(decider, good_items, bad_items)
+ bisect_results = bisect_profiles_wrapper(decider, good_items, bad_items)
+ gnb_result = check_good_not_bad(decider, good_items, bad_items)
+ bng_result = check_bad_not_good(decider, good_items, bad_items)
- results = {
- 'seed': decider.seed,
- 'bisect_results': bisect_results,
- 'good_only_functions': gnb_result,
- 'bad_only_functions': bng_result
- }
- with open(flags.analysis_output_file, 'w', encoding='utf-8') as f:
- json.dump(results, f, indent=2)
- if flags.remove_state_on_completion:
- os.remove(flags.state_file)
- logging.info('Removed state file %s following completion of script...',
- flags.state_file)
- else:
- completed_state_file = '%s.completed.%s' % (flags.state_file,
- str(date.today()))
- os.rename(flags.state_file, completed_state_file)
- logging.info('Stored completed state file as %s...', completed_state_file)
- return results
+ results = {
+ "seed": decider.seed,
+ "bisect_results": bisect_results,
+ "good_only_functions": gnb_result,
+ "bad_only_functions": bng_result,
+ }
+ with open(flags.analysis_output_file, "w", encoding="utf-8") as f:
+ json.dump(results, f, indent=2)
+ if flags.remove_state_on_completion:
+ os.remove(flags.state_file)
+ logging.info(
+ "Removed state file %s following completion of script...",
+ flags.state_file,
+ )
+ else:
+ completed_state_file = "%s.completed.%s" % (
+ flags.state_file,
+ str(date.today()),
+ )
+ os.rename(flags.state_file, completed_state_file)
+ logging.info(
+ "Stored completed state file as %s...", completed_state_file
+ )
+ return results
-if __name__ == '__main__':
- main(parse_args())
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
index b293b8a..8a0dae3 100755
--- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
+++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End-to-end test for afdo_prof_analysis."""
-from __future__ import absolute_import, division, print_function
import json
import os
@@ -19,263 +18,273 @@
class ObjectWithFields(object):
- """Turns kwargs given to the constructor into fields on an object.
+ """Turns kwargs given to the constructor into fields on an object.
- Examples:
- x = ObjectWithFields(a=1, b=2)
- assert x.a == 1
- assert x.b == 2
- """
+ Examples:
+ x = ObjectWithFields(a=1, b=2)
+ assert x.a == 1
+ assert x.b == 2
+ """
- def __init__(self, **kwargs):
- for key, val in kwargs.items():
- setattr(self, key, val)
+ def __init__(self, **kwargs):
+ for key, val in kwargs.items():
+ setattr(self, key, val)
class AfdoProfAnalysisE2ETest(unittest.TestCase):
- """Class for end-to-end testing of AFDO Profile Analysis"""
+ """Class for end-to-end testing of AFDO Profile Analysis"""
- # nothing significant about the values, just easier to remember even vs odd
- good_prof = {
- 'func_a': ':1\n 1: 3\n 3: 5\n 5: 7\n',
- 'func_b': ':3\n 3: 5\n 5: 7\n 7: 9\n',
- 'func_c': ':5\n 5: 7\n 7: 9\n 9: 11\n',
- 'func_d': ':7\n 7: 9\n 9: 11\n 11: 13\n',
- 'good_func_a': ':11\n',
- 'good_func_b': ':13\n'
- }
-
- bad_prof = {
- 'func_a': ':2\n 2: 4\n 4: 6\n 6: 8\n',
- 'func_b': ':4\n 4: 6\n 6: 8\n 8: 10\n',
- 'func_c': ':6\n 6: 8\n 8: 10\n 10: 12\n',
- 'func_d': ':8\n 8: 10\n 10: 12\n 12: 14\n',
- 'bad_func_a': ':12\n',
- 'bad_func_b': ':14\n'
- }
-
- expected = {
- 'good_only_functions': False,
- 'bad_only_functions': True,
- 'bisect_results': {
- 'ranges': [],
- 'individuals': ['func_a']
- }
- }
-
- def test_afdo_prof_analysis(self):
- # Individual issues take precedence by nature of our algos
- # so first, that should be caught
- good = self.good_prof.copy()
- bad = self.bad_prof.copy()
- self.run_check(good, bad, self.expected)
-
- # Now remove individuals and exclusively BAD, and check that range is caught
- bad['func_a'] = good['func_a']
- bad.pop('bad_func_a')
- bad.pop('bad_func_b')
-
- expected_cp = self.expected.copy()
- expected_cp['bad_only_functions'] = False
- expected_cp['bisect_results'] = {
- 'individuals': [],
- 'ranges': [['func_b', 'func_c', 'func_d']]
+ # nothing significant about the values, just easier to remember even vs odd
+ good_prof = {
+ "func_a": ":1\n 1: 3\n 3: 5\n 5: 7\n",
+ "func_b": ":3\n 3: 5\n 5: 7\n 7: 9\n",
+ "func_c": ":5\n 5: 7\n 7: 9\n 9: 11\n",
+ "func_d": ":7\n 7: 9\n 9: 11\n 11: 13\n",
+ "good_func_a": ":11\n",
+ "good_func_b": ":13\n",
}
- self.run_check(good, bad, expected_cp)
+ bad_prof = {
+ "func_a": ":2\n 2: 4\n 4: 6\n 6: 8\n",
+ "func_b": ":4\n 4: 6\n 6: 8\n 8: 10\n",
+ "func_c": ":6\n 6: 8\n 8: 10\n 10: 12\n",
+ "func_d": ":8\n 8: 10\n 10: 12\n 12: 14\n",
+ "bad_func_a": ":12\n",
+ "bad_func_b": ":14\n",
+ }
- def test_afdo_prof_state(self):
- """Verifies that saved state is correct replication."""
- temp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-
- good = self.good_prof.copy()
- bad = self.bad_prof.copy()
- # add more functions to data
- for x in range(400):
- good['func_%d' % x] = ''
- bad['func_%d' % x] = ''
-
- fd_first, first_result = tempfile.mkstemp(dir=temp_dir)
- os.close(fd_first)
- fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
- os.close(fd_state)
- self.run_check(
- self.good_prof,
- self.bad_prof,
- self.expected,
- state_file=state_file,
- out_file=first_result)
-
- fd_second, second_result = tempfile.mkstemp(dir=temp_dir)
- os.close(fd_second)
- completed_state_file = '%s.completed.%s' % (state_file, str(date.today()))
- self.run_check(
- self.good_prof,
- self.bad_prof,
- self.expected,
- state_file=completed_state_file,
- no_resume=False,
- out_file=second_result)
-
- with open(first_result) as f:
- initial_run = json.load(f)
- with open(second_result) as f:
- loaded_run = json.load(f)
- self.assertEqual(initial_run, loaded_run)
-
- def test_exit_on_problem_status(self):
- temp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-
- fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
- os.close(fd_state)
- with self.assertRaises(RuntimeError):
- self.run_check(
- self.good_prof,
- self.bad_prof,
- self.expected,
- state_file=state_file,
- extern_decider='problemstatus_external.sh')
-
- def test_state_assumption(self):
-
- def compare_runs(tmp_dir, first_ctr, second_ctr):
- """Compares given prof versions between first and second run in test."""
- first_prof = '%s/.first_run_%d' % (tmp_dir, first_ctr)
- second_prof = '%s/.second_run_%d' % (tmp_dir, second_ctr)
- with open(first_prof) as f:
- first_prof_text = f.read()
- with open(second_prof) as f:
- second_prof_text = f.read()
- self.assertEqual(first_prof_text, second_prof_text)
-
- good_prof = {'func_a': ':1\n3: 3\n5: 7\n'}
- bad_prof = {'func_a': ':2\n4: 4\n6: 8\n'}
- # add some noise to the profiles; 15 is an arbitrary choice
- for x in range(15):
- func = 'func_%d' % x
- good_prof[func] = ':%d\n' % (x)
- bad_prof[func] = ':%d\n' % (x + 1)
expected = {
- 'bisect_results': {
- 'ranges': [],
- 'individuals': ['func_a']
- },
- 'good_only_functions': False,
- 'bad_only_functions': False
+ "good_only_functions": False,
+ "bad_only_functions": True,
+ "bisect_results": {"ranges": [], "individuals": ["func_a"]},
}
- # using a static temp dir rather than a dynamic one because these files are
- # shared between the bash scripts and this Python test, and the arguments
- # to the bash scripts are fixed by afdo_prof_analysis.py so it would be
- # difficult to communicate dynamically generated directory to bash scripts
- scripts_tmp_dir = '%s/afdo_test_tmp' % os.getcwd()
- os.mkdir(scripts_tmp_dir)
- self.addCleanup(shutil.rmtree, scripts_tmp_dir, ignore_errors=True)
+ def test_afdo_prof_analysis(self):
+ # Individual issues take precedence by nature of our algos
+ # so first, that should be caught
+ good = self.good_prof.copy()
+ bad = self.bad_prof.copy()
+ self.run_check(good, bad, self.expected)
- # files used in the bash scripts used as external deciders below
- # - count_file tracks the current number of calls to the script in total
- # - local_count_file tracks the number of calls to the script without
- # interruption
- count_file = '%s/.count' % scripts_tmp_dir
- local_count_file = '%s/.local_count' % scripts_tmp_dir
+ # Now remove individuals and exclusively BAD, and check that range is caught
+ bad["func_a"] = good["func_a"]
+ bad.pop("bad_func_a")
+ bad.pop("bad_func_b")
- # runs through whole thing at once
- initial_seed = self.run_check(
- good_prof,
- bad_prof,
- expected,
- extern_decider='state_assumption_external.sh')
- with open(count_file) as f:
- num_calls = int(f.read())
- os.remove(count_file) # reset counts for second run
- finished_state_file = 'afdo_analysis_state.json.completed.%s' % str(
- date.today())
- self.addCleanup(os.remove, finished_state_file)
+ expected_cp = self.expected.copy()
+ expected_cp["bad_only_functions"] = False
+ expected_cp["bisect_results"] = {
+ "individuals": [],
+ "ranges": [["func_b", "func_c", "func_d"]],
+ }
- # runs the same analysis but interrupted each iteration
- for i in range(2 * num_calls + 1):
- no_resume_run = (i == 0)
- seed = initial_seed if no_resume_run else None
- try:
+ self.run_check(good, bad, expected_cp)
+
+ def test_afdo_prof_state(self):
+ """Verifies that saved state is correct replication."""
+ temp_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+
+ good = self.good_prof.copy()
+ bad = self.bad_prof.copy()
+ # add more functions to data
+ for x in range(400):
+ good["func_%d" % x] = ""
+ bad["func_%d" % x] = ""
+
+ fd_first, first_result = tempfile.mkstemp(dir=temp_dir)
+ os.close(fd_first)
+ fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
+ os.close(fd_state)
self.run_check(
+ self.good_prof,
+ self.bad_prof,
+ self.expected,
+ state_file=state_file,
+ out_file=first_result,
+ )
+
+ fd_second, second_result = tempfile.mkstemp(dir=temp_dir)
+ os.close(fd_second)
+ completed_state_file = "%s.completed.%s" % (
+ state_file,
+ str(date.today()),
+ )
+ self.run_check(
+ self.good_prof,
+ self.bad_prof,
+ self.expected,
+ state_file=completed_state_file,
+ no_resume=False,
+ out_file=second_result,
+ )
+
+ with open(first_result) as f:
+ initial_run = json.load(f)
+ with open(second_result) as f:
+ loaded_run = json.load(f)
+ self.assertEqual(initial_run, loaded_run)
+
+ def test_exit_on_problem_status(self):
+ temp_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+
+ fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
+ os.close(fd_state)
+ with self.assertRaises(RuntimeError):
+ self.run_check(
+ self.good_prof,
+ self.bad_prof,
+ self.expected,
+ state_file=state_file,
+ extern_decider="problemstatus_external.sh",
+ )
+
+ def test_state_assumption(self):
+ def compare_runs(tmp_dir, first_ctr, second_ctr):
+ """Compares given prof versions between first and second run in test."""
+ first_prof = "%s/.first_run_%d" % (tmp_dir, first_ctr)
+ second_prof = "%s/.second_run_%d" % (tmp_dir, second_ctr)
+ with open(first_prof) as f:
+ first_prof_text = f.read()
+ with open(second_prof) as f:
+ second_prof_text = f.read()
+ self.assertEqual(first_prof_text, second_prof_text)
+
+ good_prof = {"func_a": ":1\n3: 3\n5: 7\n"}
+ bad_prof = {"func_a": ":2\n4: 4\n6: 8\n"}
+ # add some noise to the profiles; 15 is an arbitrary choice
+ for x in range(15):
+ func = "func_%d" % x
+ good_prof[func] = ":%d\n" % (x)
+ bad_prof[func] = ":%d\n" % (x + 1)
+ expected = {
+ "bisect_results": {"ranges": [], "individuals": ["func_a"]},
+ "good_only_functions": False,
+ "bad_only_functions": False,
+ }
+
+ # using a static temp dir rather than a dynamic one because these files are
+ # shared between the bash scripts and this Python test, and the arguments
+ # to the bash scripts are fixed by afdo_prof_analysis.py so it would be
+ # difficult to communicate dynamically generated directory to bash scripts
+ scripts_tmp_dir = "%s/afdo_test_tmp" % os.getcwd()
+ os.mkdir(scripts_tmp_dir)
+ self.addCleanup(shutil.rmtree, scripts_tmp_dir, ignore_errors=True)
+
+ # files used in the bash scripts used as external deciders below
+ # - count_file tracks the current number of calls to the script in total
+ # - local_count_file tracks the number of calls to the script without
+ # interruption
+ count_file = "%s/.count" % scripts_tmp_dir
+ local_count_file = "%s/.local_count" % scripts_tmp_dir
+
+ # runs through whole thing at once
+ initial_seed = self.run_check(
good_prof,
bad_prof,
expected,
- no_resume=no_resume_run,
- extern_decider='state_assumption_interrupt.sh',
- seed=seed)
- break
- except RuntimeError:
- # script was interrupted, so we restart local count
- os.remove(local_count_file)
- else:
- raise RuntimeError('Test failed -- took too many iterations')
+ extern_decider="state_assumption_external.sh",
+ )
+ with open(count_file) as f:
+ num_calls = int(f.read())
+ os.remove(count_file) # reset counts for second run
+ finished_state_file = "afdo_analysis_state.json.completed.%s" % str(
+ date.today()
+ )
+ self.addCleanup(os.remove, finished_state_file)
- for initial_ctr in range(3): # initial runs unaffected by interruption
- compare_runs(scripts_tmp_dir, initial_ctr, initial_ctr)
+ # runs the same analysis but interrupted each iteration
+ for i in range(2 * num_calls + 1):
+ no_resume_run = i == 0
+ seed = initial_seed if no_resume_run else None
+ try:
+ self.run_check(
+ good_prof,
+ bad_prof,
+ expected,
+ no_resume=no_resume_run,
+ extern_decider="state_assumption_interrupt.sh",
+ seed=seed,
+ )
+ break
+ except RuntimeError:
+ # script was interrupted, so we restart local count
+ os.remove(local_count_file)
+ else:
+ raise RuntimeError("Test failed -- took too many iterations")
- start = 3
- for ctr in range(start, num_calls):
- # second run counter incremented by 4 for each one first run is because
- # +2 for performing initial checks on good and bad profs each time
- # +1 for PROBLEM_STATUS run which causes error and restart
- compare_runs(scripts_tmp_dir, ctr, 6 + (ctr - start) * 4)
+ for initial_ctr in range(3): # initial runs unaffected by interruption
+ compare_runs(scripts_tmp_dir, initial_ctr, initial_ctr)
- def run_check(self,
- good_prof,
- bad_prof,
- expected,
- state_file=None,
- no_resume=True,
- out_file=None,
- extern_decider=None,
- seed=None):
+ start = 3
+ for ctr in range(start, num_calls):
+ # second run counter incremented by 4 for each one first run is because
+ # +2 for performing initial checks on good and bad profs each time
+ # +1 for PROBLEM_STATUS run which causes error and restart
+ compare_runs(scripts_tmp_dir, ctr, 6 + (ctr - start) * 4)
- temp_dir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+ def run_check(
+ self,
+ good_prof,
+ bad_prof,
+ expected,
+ state_file=None,
+ no_resume=True,
+ out_file=None,
+ extern_decider=None,
+ seed=None,
+ ):
- good_prof_file = '%s/%s' % (temp_dir, 'good_prof.txt')
- bad_prof_file = '%s/%s' % (temp_dir, 'bad_prof.txt')
- good_prof_text = analysis.json_to_text(good_prof)
- bad_prof_text = analysis.json_to_text(bad_prof)
- with open(good_prof_file, 'w') as f:
- f.write(good_prof_text)
- with open(bad_prof_file, 'w') as f:
- f.write(bad_prof_text)
+ temp_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
- dir_path = os.path.dirname(os.path.realpath(__file__)) # dir of this file
- external_script = '%s/%s' % (dir_path, extern_decider or 'e2e_external.sh')
+ good_prof_file = "%s/%s" % (temp_dir, "good_prof.txt")
+ bad_prof_file = "%s/%s" % (temp_dir, "bad_prof.txt")
+ good_prof_text = analysis.json_to_text(good_prof)
+ bad_prof_text = analysis.json_to_text(bad_prof)
+ with open(good_prof_file, "w") as f:
+ f.write(good_prof_text)
+ with open(bad_prof_file, "w") as f:
+ f.write(bad_prof_text)
- # FIXME: This test ideally shouldn't be writing to $PWD
- if state_file is None:
- state_file = '%s/afdo_analysis_state.json' % os.getcwd()
+ dir_path = os.path.dirname(
+ os.path.realpath(__file__)
+ ) # dir of this file
+ external_script = "%s/%s" % (
+ dir_path,
+ extern_decider or "e2e_external.sh",
+ )
- def rm_state():
- try:
- os.unlink(state_file)
- except OSError:
- # Probably because the file DNE. That's fine.
- pass
+ # FIXME: This test ideally shouldn't be writing to $PWD
+ if state_file is None:
+ state_file = "%s/afdo_analysis_state.json" % os.getcwd()
- self.addCleanup(rm_state)
+ def rm_state():
+ try:
+ os.unlink(state_file)
+ except OSError:
+ # Probably because the file DNE. That's fine.
+ pass
- actual = analysis.main(
- ObjectWithFields(
- good_prof=good_prof_file,
- bad_prof=bad_prof_file,
- external_decider=external_script,
- analysis_output_file=out_file or '/dev/null',
- state_file=state_file,
- no_resume=no_resume,
- remove_state_on_completion=False,
- seed=seed,
- ))
- actual_seed = actual.pop('seed') # nothing to check
- self.assertEqual(actual, expected)
- return actual_seed
+ self.addCleanup(rm_state)
+
+ actual = analysis.main(
+ ObjectWithFields(
+ good_prof=good_prof_file,
+ bad_prof=bad_prof_file,
+ external_decider=external_script,
+ analysis_output_file=out_file or "/dev/null",
+ state_file=state_file,
+ no_resume=no_resume,
+ remove_state_on_completion=False,
+ seed=seed,
+ )
+ )
+ actual_seed = actual.pop("seed") # nothing to check
+ self.assertEqual(actual, expected)
+ return actual_seed
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py
index 245edc3..babfc02 100755
--- a/afdo_tools/bisection/afdo_prof_analysis_test.py
+++ b/afdo_tools/bisection/afdo_prof_analysis_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for afdo_prof_analysis."""
-from __future__ import print_function
import random
import io
@@ -16,139 +15,154 @@
class AfdoProfAnalysisTest(unittest.TestCase):
- """Class for testing AFDO Profile Analysis"""
- bad_items = {'func_a': '1', 'func_b': '3', 'func_c': '5'}
- good_items = {'func_a': '2', 'func_b': '4', 'func_d': '5'}
- random.seed(13) # 13 is an arbitrary choice. just for consistency
- # add some extra info to make tests more reflective of real scenario
- for num in range(128):
- func_name = 'func_extra_%d' % num
- # 1/3 to both, 1/3 only to good, 1/3 only to bad
- rand_val = random.randint(1, 101)
- if rand_val < 67:
- bad_items[func_name] = 'test_data'
- if rand_val < 34 or rand_val >= 67:
- good_items[func_name] = 'test_data'
+ """Class for testing AFDO Profile Analysis"""
- analysis.random.seed(5) # 5 is an arbitrary choice. For consistent testing
+ bad_items = {"func_a": "1", "func_b": "3", "func_c": "5"}
+ good_items = {"func_a": "2", "func_b": "4", "func_d": "5"}
+ random.seed(13) # 13 is an arbitrary choice. just for consistency
+ # add some extra info to make tests more reflective of real scenario
+ for num in range(128):
+ func_name = "func_extra_%d" % num
+ # 1/3 to both, 1/3 only to good, 1/3 only to bad
+ rand_val = random.randint(1, 101)
+ if rand_val < 67:
+ bad_items[func_name] = "test_data"
+ if rand_val < 34 or rand_val >= 67:
+ good_items[func_name] = "test_data"
- def test_text_to_json(self):
- test_data = io.StringIO('deflate_slow:87460059:3\n'
- ' 3: 24\n'
- ' 14: 54767\n'
- ' 15: 664 fill_window:22\n'
- ' 16: 661\n'
- ' 19: 637\n'
- ' 41: 36692 longest_match:36863\n'
- ' 44: 36692\n'
- ' 44.2: 5861\n'
- ' 46: 13942\n'
- ' 46.1: 14003\n')
- expected = {
- 'deflate_slow': ':87460059:3\n'
- ' 3: 24\n'
- ' 14: 54767\n'
- ' 15: 664 fill_window:22\n'
- ' 16: 661\n'
- ' 19: 637\n'
- ' 41: 36692 longest_match:36863\n'
- ' 44: 36692\n'
- ' 44.2: 5861\n'
- ' 46: 13942\n'
- ' 46.1: 14003\n'
- }
- actual = analysis.text_to_json(test_data)
- self.assertEqual(actual, expected)
- test_data.close()
+ analysis.random.seed(5) # 5 is an arbitrary choice. For consistent testing
- def test_text_to_json_empty_afdo(self):
- expected = {}
- actual = analysis.text_to_json('')
- self.assertEqual(actual, expected)
+ def test_text_to_json(self):
+ test_data = io.StringIO(
+ "deflate_slow:87460059:3\n"
+ " 3: 24\n"
+ " 14: 54767\n"
+ " 15: 664 fill_window:22\n"
+ " 16: 661\n"
+ " 19: 637\n"
+ " 41: 36692 longest_match:36863\n"
+ " 44: 36692\n"
+ " 44.2: 5861\n"
+ " 46: 13942\n"
+ " 46.1: 14003\n"
+ )
+ expected = {
+ "deflate_slow": ":87460059:3\n"
+ " 3: 24\n"
+ " 14: 54767\n"
+ " 15: 664 fill_window:22\n"
+ " 16: 661\n"
+ " 19: 637\n"
+ " 41: 36692 longest_match:36863\n"
+ " 44: 36692\n"
+ " 44.2: 5861\n"
+ " 46: 13942\n"
+ " 46.1: 14003\n"
+ }
+ actual = analysis.text_to_json(test_data)
+ self.assertEqual(actual, expected)
+ test_data.close()
- def test_json_to_text(self):
- example_prof = {'func_a': ':1\ndata\n', 'func_b': ':2\nmore data\n'}
- expected_text = 'func_a:1\ndata\nfunc_b:2\nmore data\n'
- self.assertEqual(analysis.json_to_text(example_prof), expected_text)
+ def test_text_to_json_empty_afdo(self):
+ expected = {}
+ actual = analysis.text_to_json("")
+ self.assertEqual(actual, expected)
- def test_bisect_profiles(self):
+ def test_json_to_text(self):
+ example_prof = {"func_a": ":1\ndata\n", "func_b": ":2\nmore data\n"}
+ expected_text = "func_a:1\ndata\nfunc_b:2\nmore data\n"
+ self.assertEqual(analysis.json_to_text(example_prof), expected_text)
- # mock run of external script with arbitrarily-chosen bad profile vals
- # save_run specified and unused b/c afdo_prof_analysis.py
- # will call with argument explicitly specified
- # pylint: disable=unused-argument
- class DeciderClass(object):
- """Class for this tests's decider."""
+ def test_bisect_profiles(self):
- def run(self, prof, save_run=False):
- if '1' in prof['func_a'] or '3' in prof['func_b']:
- return analysis.StatusEnum.BAD_STATUS
- return analysis.StatusEnum.GOOD_STATUS
+ # mock run of external script with arbitrarily-chosen bad profile vals
+ # save_run specified and unused b/c afdo_prof_analysis.py
+ # will call with argument explicitly specified
+ # pylint: disable=unused-argument
+ class DeciderClass(object):
+ """Class for this tests's decider."""
- results = analysis.bisect_profiles_wrapper(DeciderClass(), self.good_items,
- self.bad_items)
- self.assertEqual(results['individuals'], sorted(['func_a', 'func_b']))
- self.assertEqual(results['ranges'], [])
+ def run(self, prof, save_run=False):
+ if "1" in prof["func_a"] or "3" in prof["func_b"]:
+ return analysis.StatusEnum.BAD_STATUS
+ return analysis.StatusEnum.GOOD_STATUS
- def test_range_search(self):
+ results = analysis.bisect_profiles_wrapper(
+ DeciderClass(), self.good_items, self.bad_items
+ )
+ self.assertEqual(results["individuals"], sorted(["func_a", "func_b"]))
+ self.assertEqual(results["ranges"], [])
- # arbitrarily chosen functions whose values in the bad profile constitute
- # a problematic pair
- # pylint: disable=unused-argument
- class DeciderClass(object):
- """Class for this tests's decider."""
+ def test_range_search(self):
- def run(self, prof, save_run=False):
- if '1' in prof['func_a'] and '3' in prof['func_b']:
- return analysis.StatusEnum.BAD_STATUS
- return analysis.StatusEnum.GOOD_STATUS
+ # arbitrarily chosen functions whose values in the bad profile constitute
+ # a problematic pair
+ # pylint: disable=unused-argument
+ class DeciderClass(object):
+ """Class for this tests's decider."""
- # put the problematic combination in separate halves of the common funcs
- # so that non-bisecting search is invoked for its actual use case
- common_funcs = [func for func in self.good_items if func in self.bad_items]
- common_funcs.remove('func_a')
- common_funcs.insert(0, 'func_a')
- common_funcs.remove('func_b')
- common_funcs.append('func_b')
+ def run(self, prof, save_run=False):
+ if "1" in prof["func_a"] and "3" in prof["func_b"]:
+ return analysis.StatusEnum.BAD_STATUS
+ return analysis.StatusEnum.GOOD_STATUS
- problem_range = analysis.range_search(DeciderClass(), self.good_items,
- self.bad_items, common_funcs, 0,
- len(common_funcs))
+ # put the problematic combination in separate halves of the common funcs
+ # so that non-bisecting search is invoked for its actual use case
+ common_funcs = [
+ func for func in self.good_items if func in self.bad_items
+ ]
+ common_funcs.remove("func_a")
+ common_funcs.insert(0, "func_a")
+ common_funcs.remove("func_b")
+ common_funcs.append("func_b")
- self.assertEqual(['func_a', 'func_b'], problem_range)
+ problem_range = analysis.range_search(
+ DeciderClass(),
+ self.good_items,
+ self.bad_items,
+ common_funcs,
+ 0,
+ len(common_funcs),
+ )
- def test_check_good_not_bad(self):
- func_in_good = 'func_c'
+ self.assertEqual(["func_a", "func_b"], problem_range)
- # pylint: disable=unused-argument
- class DeciderClass(object):
- """Class for this tests's decider."""
+ def test_check_good_not_bad(self):
+ func_in_good = "func_c"
- def run(self, prof, save_run=False):
- if func_in_good in prof:
- return analysis.StatusEnum.GOOD_STATUS
- return analysis.StatusEnum.BAD_STATUS
+ # pylint: disable=unused-argument
+ class DeciderClass(object):
+ """Class for this tests's decider."""
- self.assertTrue(
- analysis.check_good_not_bad(DeciderClass(), self.good_items,
- self.bad_items))
+ def run(self, prof, save_run=False):
+ if func_in_good in prof:
+ return analysis.StatusEnum.GOOD_STATUS
+ return analysis.StatusEnum.BAD_STATUS
- def test_check_bad_not_good(self):
- func_in_bad = 'func_d'
+ self.assertTrue(
+ analysis.check_good_not_bad(
+ DeciderClass(), self.good_items, self.bad_items
+ )
+ )
- # pylint: disable=unused-argument
- class DeciderClass(object):
- """Class for this tests's decider."""
+ def test_check_bad_not_good(self):
+ func_in_bad = "func_d"
- def run(self, prof, save_run=False):
- if func_in_bad in prof:
- return analysis.StatusEnum.BAD_STATUS
- return analysis.StatusEnum.GOOD_STATUS
+ # pylint: disable=unused-argument
+ class DeciderClass(object):
+ """Class for this tests's decider."""
- self.assertTrue(
- analysis.check_bad_not_good(DeciderClass(), self.good_items,
- self.bad_items))
+ def run(self, prof, save_run=False):
+ if func_in_bad in prof:
+ return analysis.StatusEnum.BAD_STATUS
+ return analysis.StatusEnum.GOOD_STATUS
+
+ self.assertTrue(
+ analysis.check_bad_not_good(
+ DeciderClass(), self.good_items, self.bad_items
+ )
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/afdo_tools/bisection/state_assumption_external.sh b/afdo_tools/bisection/state_assumption_external.sh
index 1ad78ee..a2076b0 100755
--- a/afdo_tools/bisection/state_assumption_external.sh
+++ b/afdo_tools/bisection/state_assumption_external.sh
@@ -1,5 +1,5 @@
#!/bin/bash -eu
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/afdo_tools/bisection/state_assumption_interrupt.sh b/afdo_tools/bisection/state_assumption_interrupt.sh
index eba3a4b..d1599d0 100755
--- a/afdo_tools/bisection/state_assumption_interrupt.sh
+++ b/afdo_tools/bisection/state_assumption_interrupt.sh
@@ -1,5 +1,5 @@
#!/bin/bash -eu
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py
index 3ed578e..e398f8a 100755
--- a/afdo_tools/generate_afdo_from_tryjob.py
+++ b/afdo_tools/generate_afdo_from_tryjob.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given a tryjob and perf profile, generates an AFDO profile."""
-from __future__ import print_function
import argparse
import distutils.spawn
@@ -17,149 +16,162 @@
import sys
import tempfile
-_CREATE_LLVM_PROF = 'create_llvm_prof'
-_GS_PREFIX = 'gs://'
+
+_CREATE_LLVM_PROF = "create_llvm_prof"
+_GS_PREFIX = "gs://"
def _fetch_gs_artifact(remote_name, local_name):
- assert remote_name.startswith(_GS_PREFIX)
- subprocess.check_call(['gsutil', 'cp', remote_name, local_name])
+ assert remote_name.startswith(_GS_PREFIX)
+ subprocess.check_call(["gsutil", "cp", remote_name, local_name])
def _fetch_and_maybe_unpack(remote_name, local_name):
- unpackers = [
- ('.tar.bz2', ['tar', 'xaf']),
- ('.bz2', ['bunzip2']),
- ('.tar.xz', ['tar', 'xaf']),
- ('.xz', ['xz', '-d']),
- ]
+ unpackers = [
+ (".tar.bz2", ["tar", "xaf"]),
+ (".bz2", ["bunzip2"]),
+ (".tar.xz", ["tar", "xaf"]),
+ (".xz", ["xz", "-d"]),
+ ]
- unpack_ext = None
- unpack_cmd = None
- for ext, unpack in unpackers:
- if remote_name.endswith(ext):
- unpack_ext, unpack_cmd = ext, unpack
- break
+ unpack_ext = None
+ unpack_cmd = None
+ for ext, unpack in unpackers:
+ if remote_name.endswith(ext):
+ unpack_ext, unpack_cmd = ext, unpack
+ break
- download_to = local_name + unpack_ext if unpack_ext else local_name
- _fetch_gs_artifact(remote_name, download_to)
- if unpack_cmd is not None:
- print('Unpacking', download_to)
- subprocess.check_output(unpack_cmd + [download_to])
- assert os.path.exists(local_name)
+ download_to = local_name + unpack_ext if unpack_ext else local_name
+ _fetch_gs_artifact(remote_name, download_to)
+ if unpack_cmd is not None:
+ print("Unpacking", download_to)
+ subprocess.check_output(unpack_cmd + [download_to])
+ assert os.path.exists(local_name)
def _generate_afdo(perf_profile_loc, tryjob_loc, output_name):
- if perf_profile_loc.startswith(_GS_PREFIX):
- local_loc = 'perf.data'
- _fetch_and_maybe_unpack(perf_profile_loc, local_loc)
- perf_profile_loc = local_loc
+ if perf_profile_loc.startswith(_GS_PREFIX):
+ local_loc = "perf.data"
+ _fetch_and_maybe_unpack(perf_profile_loc, local_loc)
+ perf_profile_loc = local_loc
- chrome_in_debug_loc = 'debug/opt/google/chrome/chrome.debug'
- debug_out = 'debug.tgz'
- _fetch_gs_artifact(os.path.join(tryjob_loc, 'debug.tgz'), debug_out)
+ chrome_in_debug_loc = "debug/opt/google/chrome/chrome.debug"
+ debug_out = "debug.tgz"
+ _fetch_gs_artifact(os.path.join(tryjob_loc, "debug.tgz"), debug_out)
- print('Extracting chrome.debug.')
- # This has tons of artifacts, and we only want Chrome; don't waste time
- # extracting the rest in _fetch_and_maybe_unpack.
- subprocess.check_call(['tar', 'xaf', 'debug.tgz', chrome_in_debug_loc])
+ print("Extracting chrome.debug.")
+ # This has tons of artifacts, and we only want Chrome; don't waste time
+ # extracting the rest in _fetch_and_maybe_unpack.
+ subprocess.check_call(["tar", "xaf", "debug.tgz", chrome_in_debug_loc])
- # Note that the AFDO tool *requires* a binary named `chrome` to be present if
- # we're generating a profile for chrome. It's OK for this to be split debug
- # information.
- os.rename(chrome_in_debug_loc, 'chrome')
+ # Note that the AFDO tool *requires* a binary named `chrome` to be present if
+ # we're generating a profile for chrome. It's OK for this to be split debug
+ # information.
+ os.rename(chrome_in_debug_loc, "chrome")
- print('Generating AFDO profile.')
- subprocess.check_call([
- _CREATE_LLVM_PROF, '--out=' + output_name, '--binary=chrome',
- '--profile=' + perf_profile_loc
- ])
+ print("Generating AFDO profile.")
+ subprocess.check_call(
+ [
+ _CREATE_LLVM_PROF,
+ "--out=" + output_name,
+ "--binary=chrome",
+ "--profile=" + perf_profile_loc,
+ ]
+ )
def _abspath_or_gs_link(path):
- if path.startswith(_GS_PREFIX):
- return path
- return os.path.abspath(path)
+ if path.startswith(_GS_PREFIX):
+ return path
+ return os.path.abspath(path)
def _tryjob_arg(tryjob_arg):
- # Forward gs args through
- if tryjob_arg.startswith(_GS_PREFIX):
- return tryjob_arg
+ # Forward gs args through
+ if tryjob_arg.startswith(_GS_PREFIX):
+ return tryjob_arg
- # Clicking on the 'Artifacts' link gives us a pantheon link that's basically
- # a preamble and gs path.
- pantheon = 'https://pantheon.corp.google.com/storage/browser/'
- if tryjob_arg.startswith(pantheon):
- return _GS_PREFIX + tryjob_arg[len(pantheon):]
+ # Clicking on the 'Artifacts' link gives us a pantheon link that's basically
+ # a preamble and gs path.
+ pantheon = "https://pantheon.corp.google.com/storage/browser/"
+ if tryjob_arg.startswith(pantheon):
+ return _GS_PREFIX + tryjob_arg[len(pantheon) :]
- # Otherwise, only do things with a tryjob ID (e.g. R75-11965.0.0-b3648595)
- if not tryjob_arg.startswith('R'):
- raise ValueError('Unparseable tryjob arg; give a tryjob ID, pantheon '
- 'link, or gs:// link. Please see source for more.')
+ # Otherwise, only do things with a tryjob ID (e.g. R75-11965.0.0-b3648595)
+ if not tryjob_arg.startswith("R"):
+ raise ValueError(
+ "Unparseable tryjob arg; give a tryjob ID, pantheon "
+ "link, or gs:// link. Please see source for more."
+ )
- chell_path = 'chromeos-image-archive/chell-chrome-pfq-tryjob/'
- # ...And assume it's from chell, since that's the only thing we generate
- # profiles with today.
- return _GS_PREFIX + chell_path + tryjob_arg
+ chell_path = "chromeos-image-archive/chell-chrome-pfq-tryjob/"
+ # ...And assume it's from chell, since that's the only thing we generate
+ # profiles with today.
+ return _GS_PREFIX + chell_path + tryjob_arg
def main():
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--perf_profile',
- required=True,
- help='Path to our perf profile. Accepts either a gs:// path or local '
- 'filepath.')
- parser.add_argument(
- '--tryjob',
- required=True,
- type=_tryjob_arg,
- help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon "
- 'link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, '
- 'the assumption is that you ran a chell-chrome-pfq-tryjob.')
- parser.add_argument(
- '-o',
- '--output',
- default='afdo.prof',
- help='Where to put the AFDO profile. Default is afdo.prof.')
- parser.add_argument(
- '-k',
- '--keep_artifacts_on_failure',
- action='store_true',
- help="Don't remove the tempdir on failure")
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--perf_profile",
+ required=True,
+ help="Path to our perf profile. Accepts either a gs:// path or local "
+ "filepath.",
+ )
+ parser.add_argument(
+ "--tryjob",
+ required=True,
+ type=_tryjob_arg,
+ help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon "
+ "link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, "
+ "the assumption is that you ran a chell-chrome-pfq-tryjob.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ default="afdo.prof",
+ help="Where to put the AFDO profile. Default is afdo.prof.",
+ )
+ parser.add_argument(
+ "-k",
+ "--keep_artifacts_on_failure",
+ action="store_true",
+ help="Don't remove the tempdir on failure",
+ )
+ args = parser.parse_args()
- if not distutils.spawn.find_executable(_CREATE_LLVM_PROF):
- sys.exit(_CREATE_LLVM_PROF + ' not found; are you in the chroot?')
+ if not distutils.spawn.find_executable(_CREATE_LLVM_PROF):
+ sys.exit(_CREATE_LLVM_PROF + " not found; are you in the chroot?")
- profile = _abspath_or_gs_link(args.perf_profile)
- afdo_output = os.path.abspath(args.output)
+ profile = _abspath_or_gs_link(args.perf_profile)
+ afdo_output = os.path.abspath(args.output)
- initial_dir = os.getcwd()
- temp_dir = tempfile.mkdtemp(prefix='generate_afdo')
- success = True
- try:
- os.chdir(temp_dir)
- _generate_afdo(profile, args.tryjob, afdo_output)
+ initial_dir = os.getcwd()
+ temp_dir = tempfile.mkdtemp(prefix="generate_afdo")
+ success = True
+ try:
+ os.chdir(temp_dir)
+ _generate_afdo(profile, args.tryjob, afdo_output)
- # The AFDO tooling is happy to generate essentially empty profiles for us.
- # Chrome's profiles are often 8+ MB; if we only see a small fraction of
- # that, something's off. 512KB was arbitrarily selected.
- if os.path.getsize(afdo_output) < 512 * 1024:
- raise ValueError('The AFDO profile is suspiciously small for Chrome. '
- 'Something might have gone wrong.')
- except:
- success = False
- raise
- finally:
- os.chdir(initial_dir)
+ # The AFDO tooling is happy to generate essentially empty profiles for us.
+ # Chrome's profiles are often 8+ MB; if we only see a small fraction of
+ # that, something's off. 512KB was arbitrarily selected.
+ if os.path.getsize(afdo_output) < 512 * 1024:
+ raise ValueError(
+ "The AFDO profile is suspiciously small for Chrome. "
+ "Something might have gone wrong."
+ )
+ except:
+ success = False
+ raise
+ finally:
+ os.chdir(initial_dir)
- if success or not args.keep_artifacts_on_failure:
- shutil.rmtree(temp_dir, ignore_errors=True)
- else:
- print('Artifacts are available at', temp_dir)
+ if success or not args.keep_artifacts_on_failure:
+ shutil.rmtree(temp_dir, ignore_errors=True)
+ else:
+ print("Artifacts are available at", temp_dir)
-if __name__ == '__main__':
- sys.exit(main())
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py
index e14cd91..013e10c 100755
--- a/afdo_tools/run_afdo_tryjob.py
+++ b/afdo_tools/run_afdo_tryjob.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -49,7 +49,6 @@
since it's safer.
"""
-from __future__ import print_function
import argparse
import collections
@@ -60,112 +59,124 @@
def main():
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- '--force_no_patches',
- action='store_true',
- help='Run even if no patches are provided')
- parser.add_argument(
- '--tag_profiles_with_current_time',
- action='store_true',
- help='Perf profile names will have the current time added to them.')
- parser.add_argument(
- '--use_afdo_generation_stage',
- action='store_true',
- help='Perf profiles will be automatically converted to AFDO profiles.')
- parser.add_argument(
- '-g',
- '--patch',
- action='append',
- default=[],
- help='A patch to add to the AFDO run')
- parser.add_argument(
- '-n',
- '--dry_run',
- action='store_true',
- help='Just print the command that would be run')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--force_no_patches",
+ action="store_true",
+ help="Run even if no patches are provided",
+ )
+ parser.add_argument(
+ "--tag_profiles_with_current_time",
+ action="store_true",
+ help="Perf profile names will have the current time added to them.",
+ )
+ parser.add_argument(
+ "--use_afdo_generation_stage",
+ action="store_true",
+ help="Perf profiles will be automatically converted to AFDO profiles.",
+ )
+ parser.add_argument(
+ "-g",
+ "--patch",
+ action="append",
+ default=[],
+ help="A patch to add to the AFDO run",
+ )
+ parser.add_argument(
+ "-n",
+ "--dry_run",
+ action="store_true",
+ help="Just print the command that would be run",
+ )
+ args = parser.parse_args()
- dry_run = args.dry_run
- force_no_patches = args.force_no_patches
- tag_profiles_with_current_time = args.tag_profiles_with_current_time
- use_afdo_generation_stage = args.use_afdo_generation_stage
- user_patches = args.patch
+ dry_run = args.dry_run
+ force_no_patches = args.force_no_patches
+ tag_profiles_with_current_time = args.tag_profiles_with_current_time
+ use_afdo_generation_stage = args.use_afdo_generation_stage
+ user_patches = args.patch
- if tag_profiles_with_current_time and use_afdo_generation_stage:
- raise ValueError("You can't tag profiles with the time + have "
- 'afdo-generate')
+ if tag_profiles_with_current_time and use_afdo_generation_stage:
+ raise ValueError(
+ "You can't tag profiles with the time + have " "afdo-generate"
+ )
- if not tag_profiles_with_current_time and not use_afdo_generation_stage:
- print('Neither current_time nor afdo_generate asked for. Assuming you '
- 'prefer current time tagging.')
- print('You have 5 seconds to cancel and try again.')
- print()
+ if not tag_profiles_with_current_time and not use_afdo_generation_stage:
+ print(
+ "Neither current_time nor afdo_generate asked for. Assuming you "
+ "prefer current time tagging."
+ )
+ print("You have 5 seconds to cancel and try again.")
+ print()
+ if not dry_run:
+ time.sleep(5)
+ tag_profiles_with_current_time = True
+
+ patches = [
+ # Send profiles to localmirror instead of chromeos-prebuilt. This should
+ # always be done, since sending profiles into production is bad. :)
+ # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436158
+ 1436158,
+ # Force profile generation. Otherwise, we'll decide to not spawn off the
+ # perf hwtests.
+ # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1313291
+ 1313291,
+ ]
+
+ if tag_profiles_with_current_time:
+ # Tags the profiles with the current time of day. As detailed in the
+ # docstring, this is desirable unless you're sure that this is the only
+ # experimental profile that will be generated today.
+ # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436157
+ patches.append(1436157)
+
+ if use_afdo_generation_stage:
+ # Make the profile generation stage look in localmirror, instead of having
+ # it look in chromeos-prebuilt. Without this, we'll never upload
+ # chrome.debug or try to generate an AFDO profile.
+ # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1436583
+ patches.append(1436583)
+
+ if not user_patches and not force_no_patches:
+ raise ValueError(
+ "No patches given; pass --force_no_patches to force a " "tryjob"
+ )
+
+ for patch in user_patches:
+ # We accept two formats. Either a URL that ends with a number, or a number.
+ if patch.startswith("http"):
+ patch = patch.split("/")[-1]
+ patches.append(int(patch))
+
+ count = collections.Counter(patches)
+ too_many = [k for k, v in count.items() if v > 1]
+ if too_many:
+ too_many.sort()
+ raise ValueError(
+ "Patch(es) asked for application more than once: %s" % too_many
+ )
+
+ args = [
+ "cros",
+ "tryjob",
+ ]
+
+ for patch in patches:
+ args += ["-g", str(patch)]
+
+ args += [
+ "--nochromesdk",
+ "--hwtest",
+ "chell-chrome-pfq-tryjob",
+ ]
+
+ print(" ".join(pipes.quote(a) for a in args))
if not dry_run:
- time.sleep(5)
- tag_profiles_with_current_time = True
-
- patches = [
- # Send profiles to localmirror instead of chromeos-prebuilt. This should
- # always be done, since sending profiles into production is bad. :)
- # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436158
- 1436158,
- # Force profile generation. Otherwise, we'll decide to not spawn off the
- # perf hwtests.
- # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1313291
- 1313291,
- ]
-
- if tag_profiles_with_current_time:
- # Tags the profiles with the current time of day. As detailed in the
- # docstring, this is desirable unless you're sure that this is the only
- # experimental profile that will be generated today.
- # https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/1436157
- patches.append(1436157)
-
- if use_afdo_generation_stage:
- # Make the profile generation stage look in localmirror, instead of having
- # it look in chromeos-prebuilt. Without this, we'll never upload
- # chrome.debug or try to generate an AFDO profile.
- # https://chromium-review.googlesource.com/c/chromiumos/chromite/+/1436583
- patches.append(1436583)
-
- if not user_patches and not force_no_patches:
- raise ValueError('No patches given; pass --force_no_patches to force a '
- 'tryjob')
-
- for patch in user_patches:
- # We accept two formats. Either a URL that ends with a number, or a number.
- if patch.startswith('http'):
- patch = patch.split('/')[-1]
- patches.append(int(patch))
-
- count = collections.Counter(patches)
- too_many = [k for k, v in count.items() if v > 1]
- if too_many:
- too_many.sort()
- raise ValueError(
- 'Patch(es) asked for application more than once: %s' % too_many)
-
- args = [
- 'cros',
- 'tryjob',
- ]
-
- for patch in patches:
- args += ['-g', str(patch)]
-
- args += [
- '--nochromesdk',
- '--hwtest',
- 'chell-chrome-pfq-tryjob',
- ]
-
- print(' '.join(pipes.quote(a) for a in args))
- if not dry_run:
- sys.exit(subprocess.call(args))
+ sys.exit(subprocess.call(args))
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/afdo_tools/update_kernel_afdo b/afdo_tools/update_kernel_afdo
index ff0ab22..9e4d645 100755
--- a/afdo_tools/update_kernel_afdo
+++ b/afdo_tools/update_kernel_afdo
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,37 +9,74 @@
#
USAGE="
-Usage: $(basename $0) [main|beta|stable|all] [--help]
+Usage: $(basename "$0") [--noupload|-upload] [main|beta|stable|all] [--help]
Description:
The script takes one optional argument which is the channel where we want
to update the kernel afdo and creates a commit (or commits with \"all\"
channels) in the corresponding branch.
No arguments defaults to \"all\".
- Follow the prompt to submit the changes.
+ Follow the prompt to upload the changes.
NO CLEAN-UP NEEDED. The script ignores any local changes and keeps
the current branch unchanged.
+
+ Args:
+ --help Show this help.
+ --upload Upload CLs when the update succeeded (default).
+ --noupload Do not upload CLs. Instead, print the upload commands.
+ main|beta|stable Update metadata only on the specified channel.
"
set -eu
set -o pipefail
-GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel
-KVERS="4.4 4.14 4.19 5.4"
+AMD_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel
+ARM_GS_BASE=gs://chromeos-prebuilt/afdo-job/vetted/kernel/arm
+AMD_KVERS="4.14 4.19 5.4 5.10"
+ARM_KVERS="5.15"
failed_channels=""
# Add skipped chrome branches in ascending order here.
SKIPPED_BRANCHES="95"
+# NOTE: We enable/disable kernel AFDO starting from a particular branch.
+# For example if we want to enable kernel AFDO in 5.15, first, we do it
+# in main. In this case we want to disable it in beta and stable branches.
+# The second scenario is when we want to disable kernel AFDO (when all devices
+# move to kernelnext and there are no new profiles from the field). In this
+# case we disable AFDO in main but still keep it live in beta and stable.
+declare -A SKIPPED_KVERS_IN_BRANCHES
+# In SKIPPED_KVERS_IN_BRANCHES
+# - key is a branch number string;
+# - value is the list of kernels separated by space.
+# Example: SKIPPED_KVERS_IN_BRANCHES["105"]="4.4 4.14"
+
+# b/223115767. In M-100 there are no new profiles in 5.10. And AFDO is not
+# enabled on any 5.10 board in M-100 either.
+SKIPPED_KVERS_IN_BRANCHES["100"]="5.10"
+
script_dir=$(dirname "$0")
tc_utils_dir="${script_dir}/.."
metadata_dir="${tc_utils_dir}/afdo_metadata"
-outfile="$(realpath --relative-to="${tc_utils_dir}" \
+amd_outfile="$(realpath --relative-to="${tc_utils_dir}" \
"${metadata_dir}"/kernel_afdo.json)"
+arm_outfile="$(realpath --relative-to="${tc_utils_dir}" \
+ "${metadata_dir}"/kernel_arm_afdo.json)"
# Convert toolchain_utils into the absolute path.
-abs_tc_utils_dir="$(realpath ${tc_utils_dir})"
+abs_tc_utils_dir="$(realpath "${tc_utils_dir}")"
# Check profiles uploaded within the last week.
expected_time=$(date +%s -d "week ago")
+# Upload CLs on success.
+upload_cl=true
+
+ARCHS="amd arm"
+declare -A arch_gsbase arch_kvers arch_outfile
+arch_gsbase["amd"]="${AMD_GS_BASE}"
+arch_gsbase["arm"]="${ARM_GS_BASE}"
+arch_kvers["amd"]="${AMD_KVERS}"
+arch_kvers["arm"]="${ARM_KVERS}"
+arch_outfile["amd"]="${amd_outfile}"
+arch_outfile["arm"]="${arm_outfile}"
declare -A branch branch_number commit
remote_repo=$(git -C "${tc_utils_dir}" remote)
@@ -47,10 +84,9 @@
# Read the last two release-Rxx from remote branches
# and assign them to stable_ref and beta_ref.
# sort -V is the version sort which puts R100 after R99.
-last_branches=$(git -C "${tc_utils_dir}" ls-remote -h "${remote_repo}" \
- release-R\* | cut -f2 | sort -V | tail -n 2)
# We need `echo` to convert newlines into spaces for read.
-read stable_ref beta_ref <<< $(echo ${last_branches})
+read -r stable_ref beta_ref <<< "$(git -C "${tc_utils_dir}" ls-remote -h \
+ "${remote_repo}" release-R\* | cut -f2 | sort -V | tail -n 2 | paste -s)"
# Branch names which start from release-R.
branch["beta"]=${beta_ref##*/}
branch["stable"]=${stable_ref##*/}
@@ -62,33 +98,53 @@
branch_number["beta"]=$(echo "${branch["beta"]}" | \
sed -n -e "s/^release-R\([0-9][0-9]*\).*$/\1/p")
branch_number["canary"]="$((branch_number[beta] + 1))"
-for skipped_branch in $SKIPPED_BRANCHES ; do
- if [[ ${branch_number["canary"]} == $skipped_branch ]] ; then
+for skipped_branch in ${SKIPPED_BRANCHES} ; do
+ if [[ ${branch_number["canary"]} == "${skipped_branch}" ]] ; then
((branch_number[canary]++))
fi
done
# Without arguments the script updates all branches.
-channels=${1:-"all"}
-case "${channels}" in
+channels=""
+for arg in "$@"
+do
+ case "${arg}" in
stable | canary | beta )
+ channels="${channels} ${arg}"
;;
main )
- channels="canary"
+ channels="${channels} canary"
;;
all )
channels="canary beta stable"
;;
+ --noupload | --no-upload)
+ upload_cl=false
+ ;;
+ --upload)
+ upload_cl=true
+ ;;
--help | help | -h )
- echo "$USAGE"
+ echo "${USAGE}"
exit 0
;;
- * )
- echo "Channel \"${channels}\" is not supported.
-Must be main (or canary), beta, stable or all." >&2
- echo "$USAGE"
+ -*)
+ echo "Option \"${arg}\" is not supported." >&2
+ echo "${USAGE}"
exit 1
-esac
+ ;;
+ *)
+ echo "Channel \"${arg}\" is not supported.
+Must be main (or canary), beta, stable or all." >&2
+ echo "${USAGE}"
+ exit 1
+ esac
+done
+
+if [[ -z "${channels}" ]]
+then
+ channels="canary beta stable"
+fi
# Fetch latest branches.
git -C "${tc_utils_dir}" fetch "${remote_repo}"
@@ -99,11 +155,20 @@
# This way we don't need to clean-up and sync toolchain_utils before the
# change. Neither we should care about clean-up after the submit.
git -C "${tc_utils_dir}" worktree add --detach "${worktree_dir}"
-trap "git -C ${abs_tc_utils_dir} worktree remove ${worktree_dir}" EXIT
-cd "${worktree_dir}"
+trap 'git -C "${abs_tc_utils_dir}" worktree remove -f "${worktree_dir}"' EXIT
+pushd "${worktree_dir}"
for channel in ${channels}
do
+ set +u
+ if [[ -n "${commit[${channel}]}" ]]
+ then
+ echo "Skipping channel ${channel} which already has commit\
+ ${commit[${channel}]}."
+ continue
+ fi
+ set -u
+
errs=""
successes=0
curr_branch_number=${branch_number[${channel}]}
@@ -111,87 +176,124 @@
echo
echo "Checking \"${channel}\" channel..."
echo "branch_number=${curr_branch_number} branch=${curr_branch}"
- json="{"
- sep=""
- for kver in $KVERS
- do
- # Sort the gs output by timestamp (default ordering is by name, so
- # R86-13310.3-1594633089.gcov.xz goes after R86-13310.18-1595237847.gcov.xz)
- latest=$(gsutil.py ls -l "$GS_BASE/$kver/" | sort -k2 | \
- grep "R${curr_branch_number}" | tail -1 || true)
- if [[ -z "$latest" && "${channel}" != "stable" ]]
- then
- # if no profiles exist for the current branch, try the previous branch
- latest=$(gsutil.py ls -l "$GS_BASE/$kver/" | sort -k2 | \
- grep "R$((curr_branch_number - 1))" | tail -1)
- fi
- # Verify that the file has the expected date.
- file_time=$(echo "$latest" | awk '{print $2}')
- file_time_unix=$(date +%s -d "$file_time")
- if [ $file_time_unix -lt $expected_time ]
+ git reset --hard HEAD
+ git checkout "${remote_repo}/${curr_branch}"
+
+ for arch in ${ARCHS}
+ do
+ json="{"
+ sep=""
+ for kver in ${arch_kvers[${arch}]}
+ do
+ # Skip kernels disabled in this branch.
+ skipped=false
+ for skipped_branch in "${!SKIPPED_KVERS_IN_BRANCHES[@]}"
+ do
+ if [[ ${curr_branch_number} == "${skipped_branch}" ]]
+ then
+ # Current branch is in the keys of SKIPPED_KVERS_IN_BRANCHES.
+ # Now lets check if $kver is in the list.
+ for skipped_kver in ${SKIPPED_KVERS_IN_BRANCHES[${skipped_branch}]}
+ do
+ if [[ ${kver} == "${skipped_kver}" ]]
+ then
+ skipped=true
+ break
+ fi
+ done
+ fi
+ done
+ if ${skipped}
+ then
+ echo "${kver} is skipped in branch ${curr_branch_number}. Skip it."
+ continue
+ fi
+ # Sort the gs output by timestamp, default ordering is by name. So
+ # R86-13310.3-1594633089.gcov.xz goes after
+ # R86-13310.18-1595237847.gcov.xz.
+ latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | sort -k2 | \
+ grep "R${curr_branch_number}" | tail -1 || true)
+ if [[ -z "${latest}" && "${channel}" != "stable" ]]
+ then
+ # if no profiles exist for the current branch, try the previous branch
+ latest=$(gsutil.py ls -l "${arch_gsbase[${arch}]}/${kver}/" | \
+ sort -k2 | grep "R$((curr_branch_number - 1))" | tail -1)
+ fi
+
+ # Verify that the file has the expected date.
+ file_time=$(echo "${latest}" | awk '{print $2}')
+ file_time_unix=$(date +%s -d "${file_time}")
+ if [ "${file_time_unix}" -lt "${expected_time}" ]
+ then
+ expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @"${expected_time}")
+ echo "Wrong date for ${kver}: ${file_time} is before ${expected}" >&2
+ errs="${errs} ${kver}"
+ continue
+ fi
+
+ # Generate JSON.
+ json_kver=$(echo "${kver}" | tr . _)
+ # b/147370213 (migrating profiles from gcov format) may result in the
+ # pattern below no longer doing the right thing.
+ name="$(basename "${latest%.gcov.*}")"
+ # Skip kernels with no AFDO support in the current channel.
+ if [[ "${name}" == "" ]]
+ then
+ continue
+ fi
+ json=$(cat <<EOT
+${json}${sep}
+ "chromeos-kernel-${json_kver}": {
+ "name": "${name}"
+ }
+EOT
+ )
+ sep=","
+ successes=$((successes + 1))
+ done # kvers loop
+
+ # If we did not succeed for any kvers, exit now.
+ if [[ ${successes} -eq 0 ]]
then
- expected=$(env TZ=UTC date +%Y-%m-%dT%H:%M:%SZ -d @$expected_time)
- echo "Wrong date for $kver: $file_time is before $expected" >&2
- errs="$errs $kver"
+ echo "error: AFDO profiles out of date for all kernel versions" >&2
+ failed_channels="${failed_channels} ${channel}"
continue
fi
- # Generate JSON.
- json_kver=$(echo "$kver" | tr . _)
- # b/147370213 (migrating profiles from gcov format) may result in the
- # pattern below no longer doing the right thing.
- name=$(echo "$latest" | sed 's%.*/\(.*\)\.gcov.*%\1%')
- json=$(cat <<EOT
-$json$sep
- "chromeos-kernel-$json_kver": {
- "name": "$name"
- }
-EOT
- )
- sep=","
- successes=$((successes + 1))
- done
+ # Write new JSON file.
+ # Don't use `echo` since `json` might have esc characters in it.
+ printf "%s\n}\n" "${json}" > "${arch_outfile[${arch}]}"
- # If we did not succeed for any kvers, exit now.
- if [[ $successes -eq 0 ]]
- then
- echo "error: AFDO profiles out of date for all kernel versions" >&2
- failed_channels="${failed_channels} ${channel}"
- continue
- fi
+ # If no changes were made, say so.
+ outdir=$(dirname "${arch_outfile[${arch}]}")
+ shortstat=$(cd "${outdir}" &&\
+ git status --short "$(basename "${arch_outfile[${arch}]}")")
+ [ -z "${shortstat}" ] &&\
+ echo "$(basename "${arch_outfile[${arch}]}") is up to date." \
+ && continue
- git reset --hard HEAD
- echo git checkout "${remote_repo}/${curr_branch}"
- git checkout "${remote_repo}/${curr_branch}"
+ # If we had any errors, warn about them.
+ if [[ -n "${errs}" ]]
+ then
+ echo "warning: failed to update ${errs} in ${channel}" >&2
+ failed_channels="${failed_channels} ${channel}"
+ continue
+ fi
- # Write new JSON file.
- # Don't use `echo` since `json` might have esc characters in it.
- printf "%s\n}\n" "$json" > "$outfile"
+ git add "${arch_outfile[${arch}]}"
+ done # ARCHS loop
- # If no changes were made, say so.
- outdir=$(dirname "$outfile")
- shortstat=$(cd "$outdir" && git status --short $(basename "$outfile"))
- [ -z "$shortstat" ] && echo $(basename "$outfile")" is up to date." \
- && continue
-
- # If we had any errors, warn about them.
- if [[ -n "$errs" ]]
- then
- echo "warning: failed to update $errs in ${channel}" >&2
- failed_channels="${failed_channels} ${channel}"
- continue
- fi
-
- git add afdo_metadata/kernel_afdo.json
case "${channel}" in
canary )
- commit_contents="afdo_metadata: Publish the new kernel profiles
-
-Update chromeos-kernel-4_4
-Update chromeos-kernel-4_14
-Update chromeos-kernel-4_19
-Update chromeos-kernel-5_4
+ commit_contents=$'afdo_metadata: Publish the new kernel profiles\n\n'
+ for arch in ${ARCHS} ; do
+ for kver in ${arch_kvers[${arch}]} ; do
+ commit_contents="${commit_contents}Update ${arch} profile on\
+ chromeos-kernel-${kver}"$'\n'
+ done
+ done
+ commit_contents="${commit_contents}
BUG=None
TEST=Verified in kernel-release-afdo-verify-orchestrator"
@@ -215,20 +317,30 @@
commit[${channel}]=$(git -C "${worktree_dir}" rev-parse HEAD)
done
+popd
echo
# Array size check doesn't play well with the unbound variable option.
set +u
if [[ ${#commit[@]} -gt 0 ]]
then
set -u
- echo "The change is applied in ${!commit[@]}."
- echo "Run these commands to submit the change:"
- echo
- for channel in ${!commit[@]}
- do
- echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \
-${commit[${channel}]}:refs/for/${branch[${channel}]}"
- done
+ echo "The change is applied in ${!commit[*]}."
+ if ${upload_cl}
+ then
+ for channel in "${!commit[@]}"
+ do
+ git -C "${tc_utils_dir}" push "${remote_repo}" \
+ "${commit[${channel}]}:refs/for/${branch[${channel}]}"
+ done
+ else
+ echo "Run these commands to upload the change:"
+ echo
+ for channel in "${!commit[@]}"
+ do
+ echo -e "\tgit -C ${tc_utils_dir} push ${remote_repo} \
+ ${commit[${channel}]}:refs/for/${branch[${channel}]}"
+ done
+ fi
# Report failed channels.
if [[ -n "${failed_channels}" ]]
diff --git a/android_merge_from_upstream.sh b/android_merge_from_upstream.sh
index 4716a25..a391979 100755
--- a/android_merge_from_upstream.sh
+++ b/android_merge_from_upstream.sh
@@ -1,5 +1,5 @@
#!/bin/bash -eu
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py
index 6784118..0dd2dba 100755
--- a/auto_delete_nightly_test_data.py
+++ b/auto_delete_nightly_test_data.py
@@ -1,269 +1,369 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A crontab script to delete night test data."""
-from __future__ import print_function
-
-__author__ = 'shenhan@google.com (Han Shen)'
+__author__ = "shenhan@google.com (Han Shen)"
import argparse
import datetime
import os
+from pathlib import Path
import re
import shutil
-import shlex
+import stat
import sys
import time
+import traceback
+from typing import Callable
from cros_utils import command_executer
from cros_utils import constants
from cros_utils import misc
-DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
-NIGHTLY_TESTS_WORKSPACE = os.path.join(constants.CROSTC_WORKSPACE,
- 'nightly-tests')
+
+DIR_BY_WEEKDAY = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
+NIGHTLY_TESTS_WORKSPACE = os.path.join(
+ constants.CROSTC_WORKSPACE, "nightly-tests"
+)
def CleanNumberedDir(s, dry_run=False):
- """Deleted directories under each dated_dir."""
- chromeos_dirs = [
- os.path.join(s, x)
- for x in os.listdir(s)
- if misc.IsChromeOsTree(os.path.join(s, x))
- ]
- ce = command_executer.GetCommandExecuter(log_level='none')
- all_succeeded = True
- for cd in chromeos_dirs:
- if misc.DeleteChromeOsTree(cd, dry_run=dry_run):
- print('Successfully removed chromeos tree "{0}".'.format(cd))
+ """Deleted directories under each dated_dir."""
+ chromeos_dirs = [
+ os.path.join(s, x)
+ for x in os.listdir(s)
+ if misc.IsChromeOsTree(os.path.join(s, x))
+ ]
+ ce = command_executer.GetCommandExecuter(log_level="none")
+ all_succeeded = True
+ for cd in chromeos_dirs:
+ if misc.DeleteChromeOsTree(cd, dry_run=dry_run):
+ print(f"Successfully removed chromeos tree {cd!r}.")
+ else:
+ all_succeeded = False
+ print(f"Failed to remove chromeos tree {cd!r}, please check.")
+
+ if not all_succeeded:
+ print("Failed to delete at least one chromeos tree, please check.")
+ return False
+
+ ## Now delete the numbered dir Before forcibly removing the directory, just
+ ## check 's' to make sure it matches the expected pattern. A valid dir to be
+ ## removed must be '/usr/local/google/crostc/(SUN|MON|TUE...|SAT)'.
+ valid_dir_pattern = (
+ "^" + NIGHTLY_TESTS_WORKSPACE + "/(" + "|".join(DIR_BY_WEEKDAY) + ")"
+ )
+ if not re.search(valid_dir_pattern, s):
+ print(
+ f"Trying to delete an invalid dir {s!r} (must match "
+ f"{valid_dir_pattern!r}), please check."
+ )
+ return False
+
+ cmd = f"rm -fr {s}"
+ if dry_run:
+ print(cmd)
else:
- all_succeeded = False
- print('Failed to remove chromeos tree "{0}", please check.'.format(cd))
-
- if not all_succeeded:
- print('Failed to delete at least one chromeos tree, please check.')
- return False
-
- ## Now delete the numbered dir Before forcibly removing the directory, just
- ## check 's' to make sure it matches the expected pattern. A valid dir to be
- ## removed must be '/usr/local/google/crostc/(SUN|MON|TUE...|SAT)'.
- valid_dir_pattern = ('^' + NIGHTLY_TESTS_WORKSPACE + '/(' +
- '|'.join(DIR_BY_WEEKDAY) + ')')
- if not re.search(valid_dir_pattern, s):
- print('Trying to delete an invalid dir "{0}" (must match "{1}"), '
- 'please check.'.format(s, valid_dir_pattern))
- return False
-
- cmd = 'rm -fr {0}'.format(s)
- if dry_run:
- print(cmd)
- else:
- if ce.RunCommand(cmd, print_to_console=False, terminated_timeout=480) == 0:
- print('Successfully removed "{0}".'.format(s))
- else:
- all_succeeded = False
- print('Failed to remove "{0}", please check.'.format(s))
- return all_succeeded
+ if (
+ ce.RunCommand(cmd, print_to_console=False, terminated_timeout=480)
+ == 0
+ ):
+ print(f"Successfully removed {s!r}.")
+ else:
+ all_succeeded = False
+ print(f"Failed to remove {s!r}, please check.")
+ return all_succeeded
def CleanDatedDir(dated_dir, dry_run=False):
- # List subdirs under dir
- subdirs = [
- os.path.join(dated_dir, x)
- for x in os.listdir(dated_dir)
- if os.path.isdir(os.path.join(dated_dir, x))
- ]
- all_succeeded = True
- for s in subdirs:
- if not CleanNumberedDir(s, dry_run):
- all_succeeded = False
- return all_succeeded
+ # List subdirs under dir
+ subdirs = [
+ os.path.join(dated_dir, x)
+ for x in os.listdir(dated_dir)
+ if os.path.isdir(os.path.join(dated_dir, x))
+ ]
+ all_succeeded = True
+ for s in subdirs:
+ if not CleanNumberedDir(s, dry_run):
+ all_succeeded = False
+ return all_succeeded
def ProcessArguments(argv):
- """Process arguments."""
- parser = argparse.ArgumentParser(
- description='Automatically delete nightly test data directories.',
- usage='auto_delete_nightly_test_data.py options')
- parser.add_argument(
- '-d',
- '--dry_run',
- dest='dry_run',
- default=False,
- action='store_true',
- help='Only print command line, do not execute anything.')
- parser.add_argument(
- '--days_to_preserve',
- dest='days_to_preserve',
- default=3,
- help=('Specify the number of days (not including today),'
- ' test data generated on these days will *NOT* be '
- 'deleted. Defaults to 3.'))
- options = parser.parse_args(argv)
- return options
+ """Process arguments."""
+ parser = argparse.ArgumentParser(
+ description="Automatically delete nightly test data directories.",
+ usage="auto_delete_nightly_test_data.py options",
+ )
+ parser.add_argument(
+ "-d",
+ "--dry_run",
+ dest="dry_run",
+ default=False,
+ action="store_true",
+ help="Only print command line, do not execute anything.",
+ )
+ parser.add_argument(
+ "--days_to_preserve",
+ dest="days_to_preserve",
+ default=3,
+ help=(
+ "Specify the number of days (not including today),"
+ " test data generated on these days will *NOT* be "
+ "deleted. Defaults to 3."
+ ),
+ )
+ options = parser.parse_args(argv)
+ return options
-def CleanChromeOsTmpFiles(chroot_tmp, days_to_preserve, dry_run):
- rv = 0
- ce = command_executer.GetCommandExecuter()
- # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last
- # accessed more than specified time.
- minutes = 1440 * days_to_preserve
- cmd = (r'find {0} -maxdepth 1 -type d '
- r'\( -name "test_that_*" -amin +{1} -o '
- r' -name "cros-update*" -amin +{1} -o '
- r' -name "CrAU_temp_data*" -amin +{1} -o '
- r' -regex "{0}/tmp......" -amin +{1} \) '
- r'-exec bash -c "echo rm -fr {{}}" \; '
- r'-exec bash -c "rm -fr {{}}" \;').format(chroot_tmp, minutes)
- if dry_run:
- print('Going to execute:\n%s' % cmd)
- else:
- rv = ce.RunCommand(cmd, print_to_console=False)
- if rv == 0:
- print('Successfully cleaned chromeos tree tmp directory '
- '"{0}".'.format(chroot_tmp))
- else:
- print('Some directories were not removed under chromeos tree '
- 'tmp directory -"{0}".'.format(chroot_tmp))
+def RemoveAllSubdirsMatchingPredicate(
+ base_dir: Path,
+ days_to_preserve: int,
+ dry_run: bool,
+ is_name_removal_worthy: Callable[[str], bool],
+) -> int:
+ """Removes all subdirs of base_dir that match the given predicate."""
+ secs_to_preserve = 60 * 60 * 24 * days_to_preserve
+ now = time.time()
+ remove_older_than_time = now - secs_to_preserve
- return rv
+ try:
+ dir_entries = list(base_dir.iterdir())
+ except FileNotFoundError as e:
+ # We get this if the directory itself doesn't exist. Since we're cleaning
+ # tempdirs, that's as good as a success. Further, the prior approach here
+ # was using the `find` binary, which exits successfully if nothing is
+ # found.
+ print(f"Error enumerating {base_dir}'s contents; skipping removal: {e}")
+ return 0
+
+ had_errors = False
+ for file in dir_entries:
+ if not is_name_removal_worthy(file.name):
+ continue
+
+ try:
+ # Take the stat here and use that later, so we only need to check for a
+ # nonexistent file once.
+ st = file.stat()
+ except FileNotFoundError:
+ # This was deleted while were checking; ignore it.
+ continue
+
+ if not stat.S_ISDIR(st.st_mode):
+ continue
+
+ if secs_to_preserve and st.st_atime >= remove_older_than_time:
+ continue
+
+ if dry_run:
+ print(f"Would remove {file}")
+ continue
+
+ this_iteration_had_errors = False
+
+ def OnError(_func, path_name, excinfo):
+ nonlocal this_iteration_had_errors
+ this_iteration_had_errors = True
+ print(f"Failed removing path at {path_name}; traceback:")
+ traceback.print_exception(*excinfo)
+
+ shutil.rmtree(file, onerror=OnError)
+
+ # Some errors can be other processes racing with us to delete things. Don't
+ # count those as an error which we complain loudly about.
+ if this_iteration_had_errors:
+ if file.exists():
+ had_errors = True
+ else:
+ print(
+ f"Discarding removal errors for {file}; dir was still removed."
+ )
+
+ return 1 if had_errors else 0
-def CleanChromeOsImageFiles(chroot_tmp, subdir_suffix, days_to_preserve,
- dry_run):
- # Clean files that were last accessed more than the specified time.
- seconds_delta = days_to_preserve * 24 * 3600
- now = time.time()
- errors = 0
+def IsChromeOsTmpDeletionCandidate(file_name: str):
+ """Returns whether the given basename can be deleted from a chroot's /tmp."""
+ name_prefixes = (
+ "test_that_",
+ "cros-update",
+ "CrAU_temp_data",
+ )
+ if any(file_name.startswith(x) for x in name_prefixes):
+ return True
+ # Remove files that look like `tmpABCDEFGHI`.
+ return len(file_name) == 9 and file_name.startswith("tmp")
- for tmp_dir in os.listdir(chroot_tmp):
- # Directory under /tmp
- tmp_dir = os.path.join(chroot_tmp, tmp_dir)
- if tmp_dir.endswith(subdir_suffix):
- # Tmp directory which ends with subdir_suffix.
- for subdir in os.listdir(tmp_dir):
- # Subdirectories targeted for deletion.
- subdir_path = os.path.join(tmp_dir, subdir)
- if now - os.path.getatime(subdir_path) > seconds_delta:
- if dry_run:
- print('Will run:\nshutil.rmtree({})'.format(subdir_path))
- else:
- try:
- shutil.rmtree(subdir_path)
- print('Successfully cleaned chromeos image autotest directories '
- 'from "{}".'.format(subdir_path))
- except OSError:
- print('Some image autotest directories were not removed from '
- '"{}".'.format(subdir_path))
- errors += 1
- return errors
+def CleanChromeOsTmpFiles(
+ chroot_tmp: str, days_to_preserve: int, dry_run: bool
+) -> int:
+ # Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last
+ # accessed more than specified time ago.
+ return RemoveAllSubdirsMatchingPredicate(
+ Path(chroot_tmp),
+ days_to_preserve,
+ dry_run,
+ IsChromeOsTmpDeletionCandidate,
+ )
+
+
+def CleanChromeOsImageFiles(
+ chroot_tmp, subdir_suffix, days_to_preserve, dry_run
+):
+ # Clean files that were last accessed more than the specified time.
+ seconds_delta = days_to_preserve * 24 * 3600
+ now = time.time()
+ errors = 0
+
+ for tmp_dir in os.listdir(chroot_tmp):
+ # Directory under /tmp
+ tmp_dir = os.path.join(chroot_tmp, tmp_dir)
+ if tmp_dir.endswith(subdir_suffix):
+ # Tmp directory which ends with subdir_suffix.
+ for subdir in os.listdir(tmp_dir):
+ # Subdirectories targeted for deletion.
+ subdir_path = os.path.join(tmp_dir, subdir)
+ if now - os.path.getatime(subdir_path) > seconds_delta:
+ if dry_run:
+ print(f"Will run:\nshutil.rmtree({subdir_path!r})")
+ else:
+ try:
+ shutil.rmtree(subdir_path)
+ print(
+ "Successfully cleaned chromeos image autotest directories "
+ f"from {subdir_path!r}."
+ )
+ except OSError:
+ print(
+ "Some image autotest directories were not removed from "
+ f'"{subdir_path}".'
+ )
+ errors += 1
+
+ return errors
def CleanChromeOsTmpAndImages(days_to_preserve=1, dry_run=False):
- """Delete temporaries, images under crostc/chromeos."""
- chromeos_chroot_tmp = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos',
- 'chroot', 'tmp')
- # Clean files in tmp directory
- rv = CleanChromeOsTmpFiles(chromeos_chroot_tmp, days_to_preserve, dry_run)
- # Clean image files in *-tryjob directories
- rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-tryjob',
- days_to_preserve, dry_run)
- # Clean image files in *-release directories
- rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-release',
- days_to_preserve, dry_run)
- # Clean image files in *-pfq directories
- rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-pfq', days_to_preserve,
- dry_run)
- # Clean image files in *-llvm-next-nightly directories
- rv += CleanChromeOsImageFiles(chromeos_chroot_tmp, '-llvm-next-nightly',
- days_to_preserve, dry_run)
+ """Delete temporaries, images under crostc/chromeos."""
+ chromeos_chroot_tmp = os.path.join(
+ constants.CROSTC_WORKSPACE, "chromeos", "chroot", "tmp"
+ )
+ # Clean files in tmp directory
+ rv = CleanChromeOsTmpFiles(chromeos_chroot_tmp, days_to_preserve, dry_run)
+ # Clean image files in *-tryjob directories
+ rv += CleanChromeOsImageFiles(
+ chromeos_chroot_tmp, "-tryjob", days_to_preserve, dry_run
+ )
+ # Clean image files in *-release directories
+ rv += CleanChromeOsImageFiles(
+ chromeos_chroot_tmp, "-release", days_to_preserve, dry_run
+ )
+ # Clean image files in *-pfq directories
+ rv += CleanChromeOsImageFiles(
+ chromeos_chroot_tmp, "-pfq", days_to_preserve, dry_run
+ )
+ # Clean image files in *-llvm-next-nightly directories
+ rv += CleanChromeOsImageFiles(
+ chromeos_chroot_tmp, "-llvm-next-nightly", days_to_preserve, dry_run
+ )
- return rv
+ return rv
-def CleanOldCLs(days_to_preserve='1', dry_run=False):
- """Abandon old CLs created by automation tooling."""
- ce = command_executer.GetCommandExecuter()
- chromeos_root = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos')
- # Find Old CLs.
- old_cls_cmd = ('gerrit --raw search "owner:me status:open age:%sd"' %
- days_to_preserve)
- _, cls, _ = ce.ChrootRunCommandWOutput(
- chromeos_root, old_cls_cmd, print_to_console=False)
- # Convert any whitespaces to spaces.
- cls = ' '.join(cls.split())
- if not cls:
- return 0
+def CleanOldCLs(days_to_preserve="1", dry_run=False):
+ """Abandon old CLs created by automation tooling."""
+ ce = command_executer.GetCommandExecuter()
+ chromeos_root = os.path.join(constants.CROSTC_WORKSPACE, "chromeos")
+ # Find Old CLs.
+ old_cls_cmd = (
+ 'gerrit --raw search "owner:me status:open age:%sd"' % days_to_preserve
+ )
+ _, cls, _ = ce.ChrootRunCommandWOutput(
+ chromeos_root, old_cls_cmd, print_to_console=False
+ )
+ # Convert any whitespaces to spaces.
+ cls = " ".join(cls.split())
+ if not cls:
+ return 0
- abandon_cls_cmd = ('gerrit abandon %s' % cls)
- if dry_run:
- print('Going to execute: %s' % abandon_cls_cmd)
- return 0
+ abandon_cls_cmd = "gerrit abandon %s" % cls
+ if dry_run:
+ print("Going to execute: %s" % abandon_cls_cmd)
+ return 0
- return ce.ChrootRunCommand(
- chromeos_root, abandon_cls_cmd, print_to_console=False)
+ return ce.ChrootRunCommand(
+ chromeos_root, abandon_cls_cmd, print_to_console=False
+ )
-def CleanChromeTelemetryTmpFiles(dry_run):
- rv = 0
- ce = command_executer.GetCommandExecuter()
- tmp_dir = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos', '.cache',
- 'distfiles', 'chrome-src-internal', 'src', 'tmp')
- cmd = f'rm -fr {shlex.quote(tmp_dir)}/tmp*telemetry_Crosperf'
- if dry_run:
- print(f'Going to execute:\n{cmd}')
- else:
- rv = ce.RunCommand(cmd, print_to_console=False)
- if rv == 0:
- print(f'Successfully cleaned chrome tree tmp directory ' f'{tmp_dir!r} .')
- else:
- print(f'Some directories were not removed under chrome tree '
- f'tmp directory {tmp_dir!r}.')
- return rv
+def CleanChromeTelemetryTmpFiles(dry_run: bool) -> int:
+ tmp_dir = (
+ Path(constants.CROSTC_WORKSPACE)
+ / "chromeos"
+ / ".cache"
+ / "distfiles"
+ / "chrome-src-internal"
+ / "src"
+ / "tmp"
+ )
+ return RemoveAllSubdirsMatchingPredicate(
+ tmp_dir,
+ days_to_preserve=0,
+ dry_run=dry_run,
+ is_name_removal_worthy=lambda x: x.startswith("tmp")
+ and x.endswith("telemetry_Crosperf"),
+ )
def Main(argv):
- """Delete nightly test data directories, tmps and test images."""
- options = ProcessArguments(argv)
- # Function 'isoweekday' returns 1(Monday) - 7 (Sunday).
- d = datetime.datetime.today().isoweekday()
- # We go back 1 week, delete from that day till we are
- # options.days_to_preserve away from today.
- s = d - 7
- e = d - int(options.days_to_preserve)
- rv = 0
- for i in range(s + 1, e):
- if i <= 0:
- ## Wrap around if index is negative. 6 is from i + 7 - 1, because
- ## DIR_BY_WEEKDAY starts from 0, while isoweekday is from 1-7.
- dated_dir = DIR_BY_WEEKDAY[i + 6]
- else:
- dated_dir = DIR_BY_WEEKDAY[i - 1]
+ """Delete nightly test data directories, tmps and test images."""
+ options = ProcessArguments(argv)
+ # Function 'isoweekday' returns 1(Monday) - 7 (Sunday).
+ d = datetime.datetime.today().isoweekday()
+ # We go back 1 week, delete from that day till we are
+ # options.days_to_preserve away from today.
+ s = d - 7
+ e = d - int(options.days_to_preserve)
+ rv = 0
+ for i in range(s + 1, e):
+ if i <= 0:
+ ## Wrap around if index is negative. 6 is from i + 7 - 1, because
+ ## DIR_BY_WEEKDAY starts from 0, while isoweekday is from 1-7.
+ dated_dir = DIR_BY_WEEKDAY[i + 6]
+ else:
+ dated_dir = DIR_BY_WEEKDAY[i - 1]
- rv += 0 if CleanDatedDir(
- os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir),
- options.dry_run) else 1
+ rv += (
+ 0
+ if CleanDatedDir(
+ os.path.join(NIGHTLY_TESTS_WORKSPACE, dated_dir),
+ options.dry_run,
+ )
+ else 1
+ )
- ## Clean temporaries, images under crostc/chromeos
- rv2 = CleanChromeOsTmpAndImages(
- int(options.days_to_preserve), options.dry_run)
+ ## Clean temporaries, images under crostc/chromeos
+ rv2 = CleanChromeOsTmpAndImages(
+ int(options.days_to_preserve), options.dry_run
+ )
- # Clean CLs that are not updated in last 2 weeks.
- rv3 = CleanOldCLs('14', options.dry_run)
+ # Clean CLs that are not updated in last 2 weeks.
+ rv3 = CleanOldCLs("14", options.dry_run)
- # Clean telemetry temporaries from chrome source tree inside chroot.
- rv4 = CleanChromeTelemetryTmpFiles(options.dry_run)
+ # Clean telemetry temporaries from chrome source tree inside chroot.
+ rv4 = CleanChromeTelemetryTmpFiles(options.dry_run)
- return rv + rv2 + rv3 + rv4
+ return rv + rv2 + rv3 + rv4
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/bestflags/example_algorithms.py b/bestflags/example_algorithms.py
index 9775d49..c39b294 100644
--- a/bestflags/example_algorithms.py
+++ b/bestflags/example_algorithms.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An example main file running the algorithms.
@@ -10,7 +10,7 @@
processes for different modules and runs the experiment.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import json
import multiprocessing
@@ -27,170 +27,190 @@
from task import TEST_STAGE
import testing_batch
+
parser = OptionParser()
-parser.add_option('-f',
- '--file',
- dest='filename',
- help='configuration file FILE input',
- metavar='FILE')
+parser.add_option(
+ "-f",
+ "--file",
+ dest="filename",
+ help="configuration file FILE input",
+ metavar="FILE",
+)
# The meta data for the genetic algorithm.
-BUILD_CMD = 'BUILD_CMD'
-TEST_CMD = 'TEST_CMD'
-OUTPUT = 'OUTPUT'
-DEFAULT_OUTPUT = 'output'
-CONF = 'CONF'
-DEFAULT_CONF = 'conf'
-NUM_BUILDER = 'NUM_BUILDER'
+BUILD_CMD = "BUILD_CMD"
+TEST_CMD = "TEST_CMD"
+OUTPUT = "OUTPUT"
+DEFAULT_OUTPUT = "output"
+CONF = "CONF"
+DEFAULT_CONF = "conf"
+NUM_BUILDER = "NUM_BUILDER"
DEFAULT_NUM_BUILDER = 1
-NUM_TESTER = 'NUM_TESTER'
+NUM_TESTER = "NUM_TESTER"
DEFAULT_NUM_TESTER = 1
-STOP_THRESHOLD = 'STOP_THRESHOLD'
+STOP_THRESHOLD = "STOP_THRESHOLD"
DEFAULT_STOP_THRESHOLD = 1
-NUM_CHROMOSOMES = 'NUM_CHROMOSOMES'
+NUM_CHROMOSOMES = "NUM_CHROMOSOMES"
DEFAULT_NUM_CHROMOSOMES = 20
-NUM_TRIALS = 'NUM_TRIALS'
+NUM_TRIALS = "NUM_TRIALS"
DEFAULT_NUM_TRIALS = 20
-MUTATION_RATE = 'MUTATION_RATE'
+MUTATION_RATE = "MUTATION_RATE"
DEFAULT_MUTATION_RATE = 0.01
def _ProcessGA(meta_data):
- """Set up the meta data for the genetic algorithm.
+ """Set up the meta data for the genetic algorithm.
- Args:
- meta_data: the meta data for the genetic algorithm.
- """
- assert BUILD_CMD in meta_data
- build_cmd = meta_data[BUILD_CMD]
+ Args:
+ meta_data: the meta data for the genetic algorithm.
+ """
+ assert BUILD_CMD in meta_data
+ build_cmd = meta_data[BUILD_CMD]
- assert TEST_CMD in meta_data
- test_cmd = meta_data[TEST_CMD]
+ assert TEST_CMD in meta_data
+ test_cmd = meta_data[TEST_CMD]
- if OUTPUT not in meta_data:
- output_file = DEFAULT_OUTPUT
- else:
- output_file = meta_data[OUTPUT]
+ if OUTPUT not in meta_data:
+ output_file = DEFAULT_OUTPUT
+ else:
+ output_file = meta_data[OUTPUT]
- if CONF not in meta_data:
- conf_file = DEFAULT_CONF
- else:
- conf_file = meta_data[CONF]
+ if CONF not in meta_data:
+ conf_file = DEFAULT_CONF
+ else:
+ conf_file = meta_data[CONF]
- if NUM_BUILDER not in meta_data:
- num_builders = DEFAULT_NUM_BUILDER
- else:
- num_builders = meta_data[NUM_BUILDER]
+ if NUM_BUILDER not in meta_data:
+ num_builders = DEFAULT_NUM_BUILDER
+ else:
+ num_builders = meta_data[NUM_BUILDER]
- if NUM_TESTER not in meta_data:
- num_testers = DEFAULT_NUM_TESTER
- else:
- num_testers = meta_data[NUM_TESTER]
+ if NUM_TESTER not in meta_data:
+ num_testers = DEFAULT_NUM_TESTER
+ else:
+ num_testers = meta_data[NUM_TESTER]
- if STOP_THRESHOLD not in meta_data:
- stop_threshold = DEFAULT_STOP_THRESHOLD
- else:
- stop_threshold = meta_data[STOP_THRESHOLD]
+ if STOP_THRESHOLD not in meta_data:
+ stop_threshold = DEFAULT_STOP_THRESHOLD
+ else:
+ stop_threshold = meta_data[STOP_THRESHOLD]
- if NUM_CHROMOSOMES not in meta_data:
- num_chromosomes = DEFAULT_NUM_CHROMOSOMES
- else:
- num_chromosomes = meta_data[NUM_CHROMOSOMES]
+ if NUM_CHROMOSOMES not in meta_data:
+ num_chromosomes = DEFAULT_NUM_CHROMOSOMES
+ else:
+ num_chromosomes = meta_data[NUM_CHROMOSOMES]
- if NUM_TRIALS not in meta_data:
- num_trials = DEFAULT_NUM_TRIALS
- else:
- num_trials = meta_data[NUM_TRIALS]
+ if NUM_TRIALS not in meta_data:
+ num_trials = DEFAULT_NUM_TRIALS
+ else:
+ num_trials = meta_data[NUM_TRIALS]
- if MUTATION_RATE not in meta_data:
- mutation_rate = DEFAULT_MUTATION_RATE
- else:
- mutation_rate = meta_data[MUTATION_RATE]
+ if MUTATION_RATE not in meta_data:
+ mutation_rate = DEFAULT_MUTATION_RATE
+ else:
+ mutation_rate = meta_data[MUTATION_RATE]
- specs = flags.ReadConf(conf_file)
+ specs = flags.ReadConf(conf_file)
- # Initiate the build/test command and the log directory.
- Task.InitLogCommand(build_cmd, test_cmd, output_file)
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(build_cmd, test_cmd, output_file)
- # Initiate the build/test command and the log directory.
- GAGeneration.InitMetaData(stop_threshold, num_chromosomes, num_trials, specs,
- mutation_rate)
+ # Initiate the build/test command and the log directory.
+ GAGeneration.InitMetaData(
+ stop_threshold, num_chromosomes, num_trials, specs, mutation_rate
+ )
- # Generate the initial generations.
- generation_tasks = testing_batch.GenerateRandomGATasks(specs, num_chromosomes,
- num_trials)
- generations = [GAGeneration(generation_tasks, set([]), 0)]
+ # Generate the initial generations.
+ generation_tasks = testing_batch.GenerateRandomGATasks(
+ specs, num_chromosomes, num_trials
+ )
+ generations = [GAGeneration(generation_tasks, set([]), 0)]
- # Execute the experiment.
- _StartExperiment(num_builders, num_testers, generations)
+ # Execute the experiment.
+ _StartExperiment(num_builders, num_testers, generations)
def _ParseJson(file_name):
- """Parse the input json file.
+ """Parse the input json file.
- Parse the input json file and call the proper function to perform the
- algorithms.
+ Parse the input json file and call the proper function to perform the
+ algorithms.
- Args:
- file_name: the input json file name.
- """
+ Args:
+ file_name: the input json file name.
+ """
- experiments = json.load(open(file_name))
+ experiments = json.load(open(file_name))
- for experiment in experiments:
- if experiment == 'GA':
- # An GA experiment
- _ProcessGA(experiments[experiment])
+ for experiment in experiments:
+ if experiment == "GA":
+ # An GA experiment
+ _ProcessGA(experiments[experiment])
def _StartExperiment(num_builders, num_testers, generations):
- """Set up the experiment environment and execute the framework.
+ """Set up the experiment environment and execute the framework.
- Args:
- num_builders: number of concurrent builders.
- num_testers: number of concurrent testers.
- generations: the initial generation for the framework.
- """
+ Args:
+ num_builders: number of concurrent builders.
+ num_testers: number of concurrent testers.
+ generations: the initial generation for the framework.
+ """
- manager = multiprocessing.Manager()
+ manager = multiprocessing.Manager()
- # The queue between the steering algorithm and the builder.
- steering_build = manager.Queue()
- # The queue between the builder and the tester.
- build_test = manager.Queue()
- # The queue between the tester and the steering algorithm.
- test_steering = manager.Queue()
+ # The queue between the steering algorithm and the builder.
+ steering_build = manager.Queue()
+ # The queue between the builder and the tester.
+ build_test = manager.Queue()
+ # The queue between the tester and the steering algorithm.
+ test_steering = manager.Queue()
- # Set up the processes for the builder, tester and steering algorithm module.
- build_process = PipelineProcess(num_builders, 'builder', {}, BUILD_STAGE,
- steering_build, pipeline_worker.Helper,
- pipeline_worker.Worker, build_test)
+ # Set up the processes for the builder, tester and steering algorithm module.
+ build_process = PipelineProcess(
+ num_builders,
+ "builder",
+ {},
+ BUILD_STAGE,
+ steering_build,
+ pipeline_worker.Helper,
+ pipeline_worker.Worker,
+ build_test,
+ )
- test_process = PipelineProcess(num_testers, 'tester', {}, TEST_STAGE,
- build_test, pipeline_worker.Helper,
- pipeline_worker.Worker, test_steering)
+ test_process = PipelineProcess(
+ num_testers,
+ "tester",
+ {},
+ TEST_STAGE,
+ build_test,
+ pipeline_worker.Helper,
+ pipeline_worker.Worker,
+ test_steering,
+ )
- steer_process = multiprocessing.Process(
- target=Steering,
- args=(set([]), generations, test_steering, steering_build))
+ steer_process = multiprocessing.Process(
+ target=Steering,
+ args=(set([]), generations, test_steering, steering_build),
+ )
- # Start the processes.
- build_process.start()
- test_process.start()
- steer_process.start()
+ # Start the processes.
+ build_process.start()
+ test_process.start()
+ steer_process.start()
- # Wait for the processes to finish.
- build_process.join()
- test_process.join()
- steer_process.join()
+ # Wait for the processes to finish.
+ build_process.join()
+ test_process.join()
+ steer_process.join()
def main(argv):
- (options, _) = parser.parse_args(argv)
- assert options.filename
- _ParseJson(options.filename)
+ (options, _) = parser.parse_args(argv)
+ assert options.filename
+ _ParseJson(options.filename)
-if __name__ == '__main__':
- main(sys.argv)
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/bestflags/flags.py b/bestflags/flags.py
index b316421..b1b7999 100644
--- a/bestflags/flags.py
+++ b/bestflags/flags.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Manage bundles of flags used for the optimizing of ChromeOS.
@@ -21,177 +21,182 @@
"foo[0-9]bar" will expand to e.g. "foo5bar".
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import random
import re
+
#
# This matches a [...] group in the internal representation for a flag
# specification, and is used in "filling out" flags - placing values inside
# the flag_spec. The internal flag_spec format is like "foo[0]", with
# values filled out like 5; this would be transformed by
# FormattedForUse() into "foo5".
-_FLAG_FILLOUT_VALUE_RE = re.compile(r'\[([^\]]*)\]')
+_FLAG_FILLOUT_VALUE_RE = re.compile(r"\[([^\]]*)\]")
# This matches a numeric flag flag=[start-end].
-rx = re.compile(r'\[(?P<start>\d+)-(?P<end>\d+)\]')
+rx = re.compile(r"\[(?P<start>\d+)-(?P<end>\d+)\]")
# Search the numeric flag pattern.
def Search(spec):
- return rx.search(spec)
+ return rx.search(spec)
class NoSuchFileError(Exception):
- """Define an Exception class for user providing invalid input file."""
- pass
+ """Define an Exception class for user providing invalid input file."""
+
+ pass
def ReadConf(file_name):
- """Parse the configuration file.
+ """Parse the configuration file.
- The configuration contains one flag specification in each line.
+ The configuration contains one flag specification in each line.
- Args:
- file_name: The name of the configuration file.
+ Args:
+ file_name: The name of the configuration file.
- Returns:
- A list of specs in the configuration file.
+ Returns:
+ A list of specs in the configuration file.
- Raises:
- NoSuchFileError: The caller should provide a valid configuration file.
- """
+ Raises:
+ NoSuchFileError: The caller should provide a valid configuration file.
+ """
- with open(file_name, 'r') as input_file:
- lines = input_file.readlines()
+ with open(file_name, "r") as input_file:
+ lines = input_file.readlines()
- return sorted([line.strip() for line in lines if line.strip()])
+ return sorted([line.strip() for line in lines if line.strip()])
- raise NoSuchFileError()
+ raise NoSuchFileError()
class Flag(object):
- """A class representing a particular command line flag argument.
+ """A class representing a particular command line flag argument.
- The Flag consists of two parts: The spec and the value.
- The spec is a definition of the following form: a string with escaped
- sequences of the form [<start>-<end>] where start and end is an positive
- integer for a fillable value.
+ The Flag consists of two parts: The spec and the value.
+ The spec is a definition of the following form: a string with escaped
+ sequences of the form [<start>-<end>] where start and end is an positive
+ integer for a fillable value.
- An example of a spec is "foo[0-9]".
- There are two kinds of flags, boolean flag and numeric flags. Boolean flags
- can either be turned on or off, which numeric flags can have different
- positive integer values. For example, -finline-limit=[1-1000] is a numeric
- flag and -ftree-vectorize is a boolean flag.
+ An example of a spec is "foo[0-9]".
+ There are two kinds of flags, boolean flag and numeric flags. Boolean flags
+ can either be turned on or off, which numeric flags can have different
+ positive integer values. For example, -finline-limit=[1-1000] is a numeric
+ flag and -ftree-vectorize is a boolean flag.
- A (boolean/numeric) flag is not turned on if it is not selected in the
- FlagSet.
- """
-
- def __init__(self, spec, value=-1):
- self._spec = spec
-
- # If the value is not specified, generate a random value to use.
- if value == -1:
- # If creating a boolean flag, the value will be 0.
- value = 0
-
- # Parse the spec's expression for the flag value's numeric range.
- numeric_flag_match = Search(spec)
-
- # If this is a numeric flag, a value is chosen within start and end, start
- # inclusive and end exclusive.
- if numeric_flag_match:
- start = int(numeric_flag_match.group('start'))
- end = int(numeric_flag_match.group('end'))
-
- assert start < end
- value = random.randint(start, end)
-
- self._value = value
-
- def __eq__(self, other):
- if isinstance(other, Flag):
- return self._spec == other.GetSpec() and self._value == other.GetValue()
- return False
-
- def __hash__(self):
- return hash(self._spec) + self._value
-
- def GetValue(self):
- """Get the value for this flag.
-
- Returns:
- The value.
+ A (boolean/numeric) flag is not turned on if it is not selected in the
+ FlagSet.
"""
- return self._value
+ def __init__(self, spec, value=-1):
+ self._spec = spec
- def GetSpec(self):
- """Get the spec for this flag.
+ # If the value is not specified, generate a random value to use.
+ if value == -1:
+ # If creating a boolean flag, the value will be 0.
+ value = 0
- Returns:
- The spec.
- """
+ # Parse the spec's expression for the flag value's numeric range.
+ numeric_flag_match = Search(spec)
- return self._spec
+ # If this is a numeric flag, a value is chosen within start and end, start
+ # inclusive and end exclusive.
+ if numeric_flag_match:
+ start = int(numeric_flag_match.group("start"))
+ end = int(numeric_flag_match.group("end"))
- def FormattedForUse(self):
- """Calculate the combination of flag_spec and values.
+ assert start < end
+ value = random.randint(start, end)
- For e.g. the flag_spec 'foo[0-9]' and the value equals to 5, this will
- return 'foo5'. The filled out version of the flag is the text string you use
- when you actually want to pass the flag to some binary.
+ self._value = value
- Returns:
- A string that represent the filled out flag, e.g. the flag with the
- FlagSpec '-X[0-9]Y' and value equals to 5 would return '-X5Y'.
- """
+ def __eq__(self, other):
+ if isinstance(other, Flag):
+ return (
+ self._spec == other.GetSpec()
+ and self._value == other.GetValue()
+ )
+ return False
- return _FLAG_FILLOUT_VALUE_RE.sub(str(self._value), self._spec)
+ def __hash__(self):
+ return hash(self._spec) + self._value
+
+ def GetValue(self):
+ """Get the value for this flag.
+
+ Returns:
+ The value.
+ """
+
+ return self._value
+
+ def GetSpec(self):
+ """Get the spec for this flag.
+
+ Returns:
+ The spec.
+ """
+
+ return self._spec
+
+ def FormattedForUse(self):
+ """Calculate the combination of flag_spec and values.
+
+ For e.g. the flag_spec 'foo[0-9]' and the value equals to 5, this will
+ return 'foo5'. The filled out version of the flag is the text string you use
+ when you actually want to pass the flag to some binary.
+
+ Returns:
+ A string that represent the filled out flag, e.g. the flag with the
+ FlagSpec '-X[0-9]Y' and value equals to 5 would return '-X5Y'.
+ """
+
+ return _FLAG_FILLOUT_VALUE_RE.sub(str(self._value), self._spec)
class FlagSet(object):
- """A dictionary of Flag objects.
+ """A dictionary of Flag objects.
- The flags dictionary stores the spec and flag pair.
- """
-
- def __init__(self, flag_array):
- # Store the flags as a dictionary mapping of spec -> flag object
- self._flags = dict([(flag.GetSpec(), flag) for flag in flag_array])
-
- def __eq__(self, other):
- return isinstance(other, FlagSet) and self._flags == other.GetFlags()
-
- def __hash__(self):
- return sum([hash(flag) for flag in self._flags.values()])
-
- def __getitem__(self, flag_spec):
- """Get flag with a particular flag_spec.
-
- Args:
- flag_spec: The flag_spec to find.
-
- Returns:
- A flag.
+ The flags dictionary stores the spec and flag pair.
"""
- return self._flags[flag_spec]
+ def __init__(self, flag_array):
+ # Store the flags as a dictionary mapping of spec -> flag object
+ self._flags = dict([(flag.GetSpec(), flag) for flag in flag_array])
- def __contains__(self, flag_spec):
- return self._flags.has_key(flag_spec)
+ def __eq__(self, other):
+ return isinstance(other, FlagSet) and self._flags == other.GetFlags()
- def GetFlags(self):
- return self._flags
+ def __hash__(self):
+ return sum([hash(flag) for flag in self._flags.values()])
- def FormattedForUse(self):
- """Format this for use in an application.
+ def __getitem__(self, flag_spec):
+ """Get flag with a particular flag_spec.
- Returns:
- A list of flags, sorted alphabetically and filled in with the values
- for each flag.
- """
+ Args:
+ flag_spec: The flag_spec to find.
- return sorted([f.FormattedForUse() for f in self._flags.values()])
+ Returns:
+ A flag.
+ """
+
+ return self._flags[flag_spec]
+
+ def __contains__(self, flag_spec):
+ return self._flags.has_key(flag_spec)
+
+ def GetFlags(self):
+ return self._flags
+
+ def FormattedForUse(self):
+ """Format this for use in an application.
+
+ Returns:
+ A list of flags, sorted alphabetically and filled in with the values
+ for each flag.
+ """
+
+ return sorted([f.FormattedForUse() for f in self._flags.values()])
diff --git a/bestflags/flags_test.py b/bestflags/flags_test.py
index dbbea77..231e569 100644
--- a/bestflags/flags_test.py
+++ b/bestflags/flags_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the classes in module 'flags'.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import random
import sys
@@ -15,176 +15,179 @@
from flags import Flag
from flags import FlagSet
+
# The number of tests to test.
NUM_TESTS = 20
class FlagTest(unittest.TestCase):
- """This class tests the Flag class."""
+ """This class tests the Flag class."""
- def testInit(self):
- """The value generated should fall within start and end of the spec.
+ def testInit(self):
+ """The value generated should fall within start and end of the spec.
- If the value is not specified, the value generated should fall within start
- and end of the spec.
- """
+ If the value is not specified, the value generated should fall within start
+ and end of the spec.
+ """
- for _ in range(NUM_TESTS):
- start = random.randint(1, sys.maxint - 1)
- end = random.randint(start + 1, sys.maxint)
+ for _ in range(NUM_TESTS):
+ start = random.randint(1, sys.maxint - 1)
+ end = random.randint(start + 1, sys.maxint)
- spec = 'flag=[%s-%s]' % (start, end)
+ spec = "flag=[%s-%s]" % (start, end)
- test_flag = Flag(spec)
+ test_flag = Flag(spec)
- value = test_flag.GetValue()
+ value = test_flag.GetValue()
- # If the value is not specified when the flag is constructed, a random
- # value is chosen. This value should fall within start and end of the
- # spec.
- assert start <= value and value < end
+ # If the value is not specified when the flag is constructed, a random
+ # value is chosen. This value should fall within start and end of the
+ # spec.
+ assert start <= value and value < end
- def testEqual(self):
- """Test the equal operator (==) of the flag.
+ def testEqual(self):
+ """Test the equal operator (==) of the flag.
- Two flags are equal if and only if their spec and value are equal.
- """
+ Two flags are equal if and only if their spec and value are equal.
+ """
- tests = range(NUM_TESTS)
+ tests = range(NUM_TESTS)
- # Two tasks having the same spec and value should be equivalent.
- for test in tests:
- assert Flag(str(test), test) == Flag(str(test), test)
+ # Two tasks having the same spec and value should be equivalent.
+ for test in tests:
+ assert Flag(str(test), test) == Flag(str(test), test)
- # Two tasks having different flag set should be different.
- for test in tests:
- flag = Flag(str(test), test)
- other_flag_sets = [other for other in tests if test != other]
- for other_test in other_flag_sets:
- assert flag != Flag(str(other_test), other_test)
+ # Two tasks having different flag set should be different.
+ for test in tests:
+ flag = Flag(str(test), test)
+ other_flag_sets = [other for other in tests if test != other]
+ for other_test in other_flag_sets:
+ assert flag != Flag(str(other_test), other_test)
- def testFormattedForUse(self):
- """Test the FormattedForUse method of the flag.
+ def testFormattedForUse(self):
+ """Test the FormattedForUse method of the flag.
- The FormattedForUse replaces the string within the [] with the actual value.
- """
+ The FormattedForUse replaces the string within the [] with the actual value.
+ """
- for _ in range(NUM_TESTS):
- start = random.randint(1, sys.maxint - 1)
- end = random.randint(start + 1, sys.maxint)
- value = random.randint(start, end - 1)
+ for _ in range(NUM_TESTS):
+ start = random.randint(1, sys.maxint - 1)
+ end = random.randint(start + 1, sys.maxint)
+ value = random.randint(start, end - 1)
- spec = 'flag=[%s-%s]' % (start, end)
+ spec = "flag=[%s-%s]" % (start, end)
- test_flag = Flag(spec, value)
+ test_flag = Flag(spec, value)
- # For numeric flag, the FormattedForUse replaces the string within the []
- # with the actual value.
- test_value = test_flag.FormattedForUse()
- actual_value = 'flag=%s' % value
+ # For numeric flag, the FormattedForUse replaces the string within the []
+ # with the actual value.
+ test_value = test_flag.FormattedForUse()
+ actual_value = "flag=%s" % value
- assert test_value == actual_value
+ assert test_value == actual_value
- for _ in range(NUM_TESTS):
- value = random.randint(1, sys.maxint - 1)
+ for _ in range(NUM_TESTS):
+ value = random.randint(1, sys.maxint - 1)
- test_flag = Flag('flag', value)
+ test_flag = Flag("flag", value)
- # For boolean flag, the FormattedForUse returns the spec.
- test_value = test_flag.FormattedForUse()
- actual_value = 'flag'
- assert test_value == actual_value
+ # For boolean flag, the FormattedForUse returns the spec.
+ test_value = test_flag.FormattedForUse()
+ actual_value = "flag"
+ assert test_value == actual_value
class FlagSetTest(unittest.TestCase):
- """This class test the FlagSet class."""
+ """This class test the FlagSet class."""
- def testEqual(self):
- """Test the equal method of the Class FlagSet.
+ def testEqual(self):
+ """Test the equal method of the Class FlagSet.
- Two FlagSet instances are equal if all their flags are equal.
- """
+ Two FlagSet instances are equal if all their flags are equal.
+ """
- flag_names = range(NUM_TESTS)
+ flag_names = range(NUM_TESTS)
- # Two flag sets having the same flags should be equivalent.
- for flag_name in flag_names:
- spec = '%s' % flag_name
+ # Two flag sets having the same flags should be equivalent.
+ for flag_name in flag_names:
+ spec = "%s" % flag_name
- assert FlagSet([Flag(spec)]) == FlagSet([Flag(spec)])
+ assert FlagSet([Flag(spec)]) == FlagSet([Flag(spec)])
- # Two flag sets having different flags should be different.
- for flag_name in flag_names:
- spec = '%s' % flag_name
- flag_set = FlagSet([Flag(spec)])
- other_flag_sets = [other for other in flag_names if flag_name != other]
- for other_name in other_flag_sets:
- other_spec = '%s' % other_name
- assert flag_set != FlagSet([Flag(other_spec)])
+ # Two flag sets having different flags should be different.
+ for flag_name in flag_names:
+ spec = "%s" % flag_name
+ flag_set = FlagSet([Flag(spec)])
+ other_flag_sets = [
+ other for other in flag_names if flag_name != other
+ ]
+ for other_name in other_flag_sets:
+ other_spec = "%s" % other_name
+ assert flag_set != FlagSet([Flag(other_spec)])
- def testGetItem(self):
- """Test the get item method of the Class FlagSet.
+ def testGetItem(self):
+ """Test the get item method of the Class FlagSet.
- The flag set is also indexed by the specs. The flag set should return the
- appropriate flag given the spec.
- """
+ The flag set is also indexed by the specs. The flag set should return the
+ appropriate flag given the spec.
+ """
- tests = range(NUM_TESTS)
+ tests = range(NUM_TESTS)
- specs = [str(spec) for spec in tests]
- flag_array = [Flag(spec) for spec in specs]
+ specs = [str(spec) for spec in tests]
+ flag_array = [Flag(spec) for spec in specs]
- flag_set = FlagSet(flag_array)
+ flag_set = FlagSet(flag_array)
- # Created a dictionary of spec and flag, the flag set should return the flag
- # the same as this dictionary.
- spec_flag = dict(zip(specs, flag_array))
+ # Created a dictionary of spec and flag, the flag set should return the flag
+ # the same as this dictionary.
+ spec_flag = dict(zip(specs, flag_array))
- for spec in spec_flag:
- assert flag_set[spec] == spec_flag[spec]
+ for spec in spec_flag:
+ assert flag_set[spec] == spec_flag[spec]
- def testContain(self):
- """Test the contain method of the Class FlagSet.
+ def testContain(self):
+ """Test the contain method of the Class FlagSet.
- The flag set is also indexed by the specs. The flag set should return true
- for spec if it contains a flag containing spec.
- """
+ The flag set is also indexed by the specs. The flag set should return true
+ for spec if it contains a flag containing spec.
+ """
- true_tests = range(NUM_TESTS)
- false_tests = range(NUM_TESTS, NUM_TESTS * 2)
+ true_tests = range(NUM_TESTS)
+ false_tests = range(NUM_TESTS, NUM_TESTS * 2)
- specs = [str(spec) for spec in true_tests]
+ specs = [str(spec) for spec in true_tests]
- flag_set = FlagSet([Flag(spec) for spec in specs])
+ flag_set = FlagSet([Flag(spec) for spec in specs])
- for spec in specs:
- assert spec in flag_set
+ for spec in specs:
+ assert spec in flag_set
- for spec in false_tests:
- assert spec not in flag_set
+ for spec in false_tests:
+ assert spec not in flag_set
- def testFormattedForUse(self):
- """Test the FormattedForUse method of the Class FlagSet.
+ def testFormattedForUse(self):
+ """Test the FormattedForUse method of the Class FlagSet.
- The output should be a sorted list of strings.
- """
+ The output should be a sorted list of strings.
+ """
- flag_names = range(NUM_TESTS)
- flag_names.reverse()
- flags = []
- result = []
+ flag_names = range(NUM_TESTS)
+ flag_names.reverse()
+ flags = []
+ result = []
- # Construct the flag set.
- for flag_name in flag_names:
- spec = '%s' % flag_name
- flags.append(Flag(spec))
- result.append(spec)
+ # Construct the flag set.
+ for flag_name in flag_names:
+ spec = "%s" % flag_name
+ flags.append(Flag(spec))
+ result.append(spec)
- flag_set = FlagSet(flags)
+ flag_set = FlagSet(flags)
- # The results string should be sorted.
- assert sorted(result) == flag_set.FormattedForUse()
+ # The results string should be sorted.
+ assert sorted(result) == flag_set.FormattedForUse()
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py
index 20be57f..c4a490e 100644
--- a/bestflags/flags_util.py
+++ b/bestflags/flags_util.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to explore the neighbor flags.
@@ -6,90 +6,91 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import flags
from flags import Flag
def ClimbNext(flags_dict, climb_spec):
- """Get the flags that are different from |flags_dict| by |climb_spec|.
+ """Get the flags that are different from |flags_dict| by |climb_spec|.
- Given a set of flags, |flags_dict|, return a new set of flags that are
- adjacent along the flag spec |climb_spec|.
+ Given a set of flags, |flags_dict|, return a new set of flags that are
+ adjacent along the flag spec |climb_spec|.
- An example flags_dict is {foo=[1-9]:foo=5, bar=[1-5]:bar=2} and climb_spec is
- bar=[1-5]. This method changes the flag that contains the spec bar=[1-5]. The
- results are its neighbors dictionaries, i.e., {foo=[1-9]:foo=5,
- bar=[1-5]:bar=1} and {foo=[1-9]:foo=5, bar=[1-5]:bar=3}.
+ An example flags_dict is {foo=[1-9]:foo=5, bar=[1-5]:bar=2} and climb_spec is
+ bar=[1-5]. This method changes the flag that contains the spec bar=[1-5]. The
+ results are its neighbors dictionaries, i.e., {foo=[1-9]:foo=5,
+ bar=[1-5]:bar=1} and {foo=[1-9]:foo=5, bar=[1-5]:bar=3}.
- Args:
- flags_dict: The dictionary containing the original flags whose neighbors are
- to be explored.
- climb_spec: The spec in the flags_dict is to be changed. The spec is a
- definition in the little language, a string with escaped sequences of the
- form [<start>-<end>] where start and end is an positive integer for a
- fillable value. An example of a spec is "foo[0-9]".
+ Args:
+ flags_dict: The dictionary containing the original flags whose neighbors are
+ to be explored.
+ climb_spec: The spec in the flags_dict is to be changed. The spec is a
+ definition in the little language, a string with escaped sequences of the
+ form [<start>-<end>] where start and end is an positive integer for a
+ fillable value. An example of a spec is "foo[0-9]".
- Returns:
- List of dictionaries of neighbor flags.
- """
+ Returns:
+ List of dictionaries of neighbor flags.
+ """
- # This method searches for a pattern [start-end] in the spec. If the spec
- # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
- # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
- # a boolean flag.
- numeric_flag_match = flags.Search(climb_spec)
+ # This method searches for a pattern [start-end] in the spec. If the spec
+ # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
+ # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
+ # a boolean flag.
+ numeric_flag_match = flags.Search(climb_spec)
- # If the flags do not contain the spec.
- if climb_spec not in flags_dict:
- results = flags_dict.copy()
+ # If the flags do not contain the spec.
+ if climb_spec not in flags_dict:
+ results = flags_dict.copy()
- if numeric_flag_match:
- # Numeric flags.
- results[climb_spec] = Flag(climb_spec,
- int(numeric_flag_match.group('start')))
+ if numeric_flag_match:
+ # Numeric flags.
+ results[climb_spec] = Flag(
+ climb_spec, int(numeric_flag_match.group("start"))
+ )
+ else:
+ # Boolean flags.
+ results[climb_spec] = Flag(climb_spec)
+
+ return [results]
+
+ # The flags contain the spec.
+ if not numeric_flag_match:
+ # Boolean flags.
+ results = flags_dict.copy()
+
+ # Turn off the flag. A flag is turned off if it is not presented in the
+ # flags_dict.
+ del results[climb_spec]
+ return [results]
+
+ # Numeric flags.
+ flag = flags_dict[climb_spec]
+
+ # The value of the flag having spec.
+ value = flag.GetValue()
+ results = []
+
+ if value + 1 < int(numeric_flag_match.group("end")):
+ # If the value is not the end value, explore the value that is 1 larger than
+ # the current value.
+ neighbor = flags_dict.copy()
+ neighbor[climb_spec] = Flag(climb_spec, value + 1)
+ results.append(neighbor)
+
+ if value > int(numeric_flag_match.group("start")):
+ # If the value is not the start value, explore the value that is 1 lesser
+ # than the current value.
+ neighbor = flags_dict.copy()
+ neighbor[climb_spec] = Flag(climb_spec, value - 1)
+ results.append(neighbor)
else:
- # Boolean flags.
- results[climb_spec] = Flag(climb_spec)
+ # Delete the value, i.e., turn off the flag. A flag is turned off if it is
+ # not presented in the flags_dict.
+ neighbor = flags_dict.copy()
+ del neighbor[climb_spec]
+ results.append(neighbor)
- return [results]
-
- # The flags contain the spec.
- if not numeric_flag_match:
- # Boolean flags.
- results = flags_dict.copy()
-
- # Turn off the flag. A flag is turned off if it is not presented in the
- # flags_dict.
- del results[climb_spec]
- return [results]
-
- # Numeric flags.
- flag = flags_dict[climb_spec]
-
- # The value of the flag having spec.
- value = flag.GetValue()
- results = []
-
- if value + 1 < int(numeric_flag_match.group('end')):
- # If the value is not the end value, explore the value that is 1 larger than
- # the current value.
- neighbor = flags_dict.copy()
- neighbor[climb_spec] = Flag(climb_spec, value + 1)
- results.append(neighbor)
-
- if value > int(numeric_flag_match.group('start')):
- # If the value is not the start value, explore the value that is 1 lesser
- # than the current value.
- neighbor = flags_dict.copy()
- neighbor[climb_spec] = Flag(climb_spec, value - 1)
- results.append(neighbor)
- else:
- # Delete the value, i.e., turn off the flag. A flag is turned off if it is
- # not presented in the flags_dict.
- neighbor = flags_dict.copy()
- del neighbor[climb_spec]
- results.append(neighbor)
-
- return results
+ return results
diff --git a/bestflags/generation.py b/bestflags/generation.py
index 67c379f..69622de 100644
--- a/bestflags/generation.py
+++ b/bestflags/generation.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A generation of a set of tasks.
@@ -15,125 +15,126 @@
will contains all the task t's neighbor.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
class NoneOverridingError(Exception):
- """Define an Exception class for subclasses not overriding certain methods."""
- pass
+ """Define an Exception class for subclasses not overriding certain methods."""
+
+ pass
class Generation(object):
- """A generation of a framework run.
+ """A generation of a framework run.
- The base class of generation. Concrete subclasses, e.g., GAGeneration should
- override the Next and IsImproved method to implement algorithm specific
- applications.
- """
+ The base class of generation. Concrete subclasses, e.g., GAGeneration should
+ override the Next and IsImproved method to implement algorithm specific
+ applications.
+ """
- def __init__(self, exe_set, candidate_pool):
- """Set up the tasks set of this generation.
+ def __init__(self, exe_set, candidate_pool):
+ """Set up the tasks set of this generation.
- Args:
- exe_set: A set of tasks to be run.
- candidate_pool: A set of tasks to be considered to be used to generate the
+ Args:
+ exe_set: A set of tasks to be run.
+ candidate_pool: A set of tasks to be considered to be used to generate the
+ next generation.
+ """
+
+ self._exe_set = exe_set
+ self._candidate_pool = candidate_pool
+
+ # Keeping the record of how many tasks are pending. Pending tasks are the
+ # ones that have been sent out to the next stage for execution but have not
+ # finished. A generation is not ready for the reproduction of the new
+ # generations until all its pending tasks have been executed.
+ self._pending = len(exe_set)
+
+ def CandidatePool(self):
+ """Return the candidate tasks of this generation."""
+
+ return self._candidate_pool
+
+ def Pool(self):
+ """Return the task set of this generation."""
+
+ return self._exe_set
+
+ def Done(self):
+ """All the tasks in this generation are done.
+
+ Returns:
+ True if all the tasks have been executed. That is the number of pending
+ task is 0.
+ """
+
+ return self._pending == 0
+
+ def UpdateTask(self, task):
+ """Match a task t in this generation that is equal to the input task.
+
+ This method is called when the input task has just finished execution. This
+ method finds out whether there is a pending task t in the current generation
+ that is the same as the input task. Two tasks are the same if their flag
+ options are the same. A task is pending if it has not been performed.
+ If there is a pending task t that matches the input task, task t will be
+ substituted with the input task in this generation. In that case, the input
+ task, as well as its build and test results encapsulated in the task, will
+ be stored in the current generation. These results could be used to produce
+ the next generation.
+ If there is a match, the current generation will have one less pending task.
+ When there is no pending task, the generation can be used to produce the
next generation.
- """
+ The caller of this function is responsible for not calling this method on
+ the same task more than once.
- self._exe_set = exe_set
- self._candidate_pool = candidate_pool
+ Args:
+ task: A task that has its results ready.
- # Keeping the record of how many tasks are pending. Pending tasks are the
- # ones that have been sent out to the next stage for execution but have not
- # finished. A generation is not ready for the reproduction of the new
- # generations until all its pending tasks have been executed.
- self._pending = len(exe_set)
+ Returns:
+ Whether the input task belongs to this generation.
+ """
- def CandidatePool(self):
- """Return the candidate tasks of this generation."""
+ # If there is a match, the input task belongs to this generation.
+ if task not in self._exe_set:
+ return False
- return self._candidate_pool
+ # Remove the place holder task in this generation and store the new input
+ # task and its result.
+ self._exe_set.remove(task)
+ self._exe_set.add(task)
- def Pool(self):
- """Return the task set of this generation."""
+ # The current generation will have one less task to wait on.
+ self._pending -= 1
- return self._exe_set
+ assert self._pending >= 0
- def Done(self):
- """All the tasks in this generation are done.
+ return True
- Returns:
- True if all the tasks have been executed. That is the number of pending
- task is 0.
- """
+ def IsImproved(self):
+ """True if this generation has improvement upon its parent generation.
- return self._pending == 0
+ Raises:
+ NoneOverridingError: The subclass should override this method.
+ """
+ raise NoneOverridingError("Must be implemented by child class")
- def UpdateTask(self, task):
- """Match a task t in this generation that is equal to the input task.
+ def Next(self, _):
+ """Calculate the next generation.
- This method is called when the input task has just finished execution. This
- method finds out whether there is a pending task t in the current generation
- that is the same as the input task. Two tasks are the same if their flag
- options are the same. A task is pending if it has not been performed.
- If there is a pending task t that matches the input task, task t will be
- substituted with the input task in this generation. In that case, the input
- task, as well as its build and test results encapsulated in the task, will
- be stored in the current generation. These results could be used to produce
- the next generation.
- If there is a match, the current generation will have one less pending task.
- When there is no pending task, the generation can be used to produce the
- next generation.
- The caller of this function is responsible for not calling this method on
- the same task more than once.
+ This is the core of the framework implementation. It must be overridden by
+ the concrete subclass to implement algorithm specific generations.
- Args:
- task: A task that has its results ready.
+ Args:
+ _: A set of tasks that have been generated before. The overridden method
+ in the subclasses can use this so as not to generate task that has been
+ generated before.
- Returns:
- Whether the input task belongs to this generation.
- """
+ Returns:
+ A set of new generations.
- # If there is a match, the input task belongs to this generation.
- if task not in self._exe_set:
- return False
+ Raises:
+ NoneOverridingError: The subclass should override this method.
+ """
- # Remove the place holder task in this generation and store the new input
- # task and its result.
- self._exe_set.remove(task)
- self._exe_set.add(task)
-
- # The current generation will have one less task to wait on.
- self._pending -= 1
-
- assert self._pending >= 0
-
- return True
-
- def IsImproved(self):
- """True if this generation has improvement upon its parent generation.
-
- Raises:
- NoneOverridingError: The subclass should override this method.
- """
- raise NoneOverridingError('Must be implemented by child class')
-
- def Next(self, _):
- """Calculate the next generation.
-
- This is the core of the framework implementation. It must be overridden by
- the concrete subclass to implement algorithm specific generations.
-
- Args:
- _: A set of tasks that have been generated before. The overridden method
- in the subclasses can use this so as not to generate task that has been
- generated before.
-
- Returns:
- A set of new generations.
-
- Raises:
- NoneOverridingError: The subclass should override this method.
- """
-
- raise NoneOverridingError('Must be implemented by child class')
+ raise NoneOverridingError("Must be implemented by child class")
diff --git a/bestflags/generation_test.py b/bestflags/generation_test.py
index 2e042d4..0928edc 100644
--- a/bestflags/generation_test.py
+++ b/bestflags/generation_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generation unittest.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import random
import unittest
@@ -14,6 +14,7 @@
from generation import Generation
from mock_task import IdentifierMockTask
+
# Pick an integer at random.
TEST_STAGE = -125
@@ -26,47 +27,47 @@
class GenerationTest(unittest.TestCase):
- """This class test the Generation class.
+ """This class test the Generation class.
- Given a set of tasks in the generation, if there is any task that is pending,
- then the Done method will return false, and true otherwise.
- """
-
- def testDone(self):
- """"Test the Done method.
-
- Produce a generation with a set of tasks. Set the cost of the task one by
- one and verify that the Done method returns false before setting the cost
- for all the tasks. After the costs of all the tasks are set, the Done method
- should return true.
+ Given a set of tasks in the generation, if there is any task that is pending,
+ then the Done method will return false, and true otherwise.
"""
- random.seed(0)
+ def testDone(self):
+ """ "Test the Done method.
- testing_tasks = range(NUM_TASKS)
+ Produce a generation with a set of tasks. Set the cost of the task one by
+ one and verify that the Done method returns false before setting the cost
+ for all the tasks. After the costs of all the tasks are set, the Done method
+ should return true.
+ """
- # The tasks for the generation to be tested.
- tasks = [IdentifierMockTask(TEST_STAGE, t) for t in testing_tasks]
+ random.seed(0)
- gen = Generation(set(tasks), None)
+ testing_tasks = range(NUM_TASKS)
- # Permute the list.
- permutation = [(t * STRIDE) % NUM_TASKS for t in range(NUM_TASKS)]
- permuted_tasks = [testing_tasks[index] for index in permutation]
+ # The tasks for the generation to be tested.
+ tasks = [IdentifierMockTask(TEST_STAGE, t) for t in testing_tasks]
- # The Done method of the Generation should return false before all the tasks
- # in the permuted list are set.
- for testing_task in permuted_tasks:
- assert not gen.Done()
+ gen = Generation(set(tasks), None)
- # Mark a task as done by calling the UpdateTask method of the generation.
- # Send the generation the task as well as its results.
- gen.UpdateTask(IdentifierMockTask(TEST_STAGE, testing_task))
+ # Permute the list.
+ permutation = [(t * STRIDE) % NUM_TASKS for t in range(NUM_TASKS)]
+ permuted_tasks = [testing_tasks[index] for index in permutation]
- # The Done method should return true after all the tasks in the permuted
- # list is set.
- assert gen.Done()
+ # The Done method of the Generation should return false before all the tasks
+ # in the permuted list are set.
+ for testing_task in permuted_tasks:
+ assert not gen.Done()
+
+ # Mark a task as done by calling the UpdateTask method of the generation.
+ # Send the generation the task as well as its results.
+ gen.UpdateTask(IdentifierMockTask(TEST_STAGE, testing_task))
+
+ # The Done method should return true after all the tasks in the permuted
+ # list is set.
+ assert gen.Done()
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/genetic_algorithm.py b/bestflags/genetic_algorithm.py
index deb83f1..c2bd557 100644
--- a/bestflags/genetic_algorithm.py
+++ b/bestflags/genetic_algorithm.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The hill genetic algorithm.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import random
@@ -18,278 +18,287 @@
def CrossoverWith(first_flag, second_flag):
- """Get a crossed over gene.
+ """Get a crossed over gene.
- At present, this just picks either/or of these values. However, it could be
- implemented as an integer maskover effort, if required.
+ At present, this just picks either/or of these values. However, it could be
+ implemented as an integer maskover effort, if required.
- Args:
- first_flag: The first gene (Flag) to cross over with.
- second_flag: The second gene (Flag) to cross over with.
+ Args:
+ first_flag: The first gene (Flag) to cross over with.
+ second_flag: The second gene (Flag) to cross over with.
- Returns:
- A Flag that can be considered appropriately randomly blended between the
- first and second input flag.
- """
+ Returns:
+ A Flag that can be considered appropriately randomly blended between the
+ first and second input flag.
+ """
- return first_flag if random.randint(0, 1) else second_flag
+ return first_flag if random.randint(0, 1) else second_flag
def RandomMutate(specs, flag_set, mutation_rate):
- """Randomly mutate the content of a task.
+ """Randomly mutate the content of a task.
- Args:
- specs: A list of spec from which the flag set is created.
- flag_set: The current flag set being mutated
- mutation_rate: What fraction of genes to mutate.
+ Args:
+ specs: A list of spec from which the flag set is created.
+ flag_set: The current flag set being mutated
+ mutation_rate: What fraction of genes to mutate.
- Returns:
- A Genetic Task constructed by randomly mutating the input flag set.
- """
+ Returns:
+ A Genetic Task constructed by randomly mutating the input flag set.
+ """
- results_flags = []
+ results_flags = []
- for spec in specs:
- # Randomly choose whether this flag should be mutated.
- if random.randint(0, int(1 / mutation_rate)):
- continue
+ for spec in specs:
+ # Randomly choose whether this flag should be mutated.
+ if random.randint(0, int(1 / mutation_rate)):
+ continue
- # If the flag is not already in the flag set, it is added.
- if spec not in flag_set:
- results_flags.append(Flag(spec))
- continue
+ # If the flag is not already in the flag set, it is added.
+ if spec not in flag_set:
+ results_flags.append(Flag(spec))
+ continue
- # If the flag is already in the flag set, it is mutated.
- numeric_flag_match = flags.Search(spec)
+ # If the flag is already in the flag set, it is mutated.
+ numeric_flag_match = flags.Search(spec)
- # The value of a numeric flag will be changed, and a boolean flag will be
- # dropped.
- if not numeric_flag_match:
- continue
+ # The value of a numeric flag will be changed, and a boolean flag will be
+ # dropped.
+ if not numeric_flag_match:
+ continue
- value = flag_set[spec].GetValue()
+ value = flag_set[spec].GetValue()
- # Randomly select a nearby value of the current value of the flag.
- rand_arr = [value]
- if value + 1 < int(numeric_flag_match.group('end')):
- rand_arr.append(value + 1)
+ # Randomly select a nearby value of the current value of the flag.
+ rand_arr = [value]
+ if value + 1 < int(numeric_flag_match.group("end")):
+ rand_arr.append(value + 1)
- rand_arr.append(value - 1)
- value = random.sample(rand_arr, 1)[0]
+ rand_arr.append(value - 1)
+ value = random.sample(rand_arr, 1)[0]
- # If the value is smaller than the start of the spec, this flag will be
- # dropped.
- if value != int(numeric_flag_match.group('start')) - 1:
- results_flags.append(Flag(spec, value))
+ # If the value is smaller than the start of the spec, this flag will be
+ # dropped.
+ if value != int(numeric_flag_match.group("start")) - 1:
+ results_flags.append(Flag(spec, value))
- return GATask(FlagSet(results_flags))
+ return GATask(FlagSet(results_flags))
class GATask(Task):
+ def __init__(self, flag_set):
+ Task.__init__(self, flag_set)
- def __init__(self, flag_set):
- Task.__init__(self, flag_set)
+ def ReproduceWith(self, other, specs, mutation_rate):
+ """Reproduce with other FlagSet.
- def ReproduceWith(self, other, specs, mutation_rate):
- """Reproduce with other FlagSet.
+ Args:
+ other: A FlagSet to reproduce with.
+ specs: A list of spec from which the flag set is created.
+ mutation_rate: one in mutation_rate flags will be mutated (replaced by a
+ random version of the same flag, instead of one from either of the
+ parents). Set to 0 to disable mutation.
- Args:
- other: A FlagSet to reproduce with.
- specs: A list of spec from which the flag set is created.
- mutation_rate: one in mutation_rate flags will be mutated (replaced by a
- random version of the same flag, instead of one from either of the
- parents). Set to 0 to disable mutation.
+ Returns:
+ A GA task made by mixing self with other.
+ """
- Returns:
- A GA task made by mixing self with other.
- """
+ # Get the flag dictionary.
+ father_flags = self.GetFlags().GetFlags()
+ mother_flags = other.GetFlags().GetFlags()
- # Get the flag dictionary.
- father_flags = self.GetFlags().GetFlags()
- mother_flags = other.GetFlags().GetFlags()
+ # Flags that are common in both parents and flags that belong to only one
+ # parent.
+ self_flags = []
+ other_flags = []
+ common_flags = []
- # Flags that are common in both parents and flags that belong to only one
- # parent.
- self_flags = []
- other_flags = []
- common_flags = []
+ # Find out flags that are common to both parent and flags that belong soly
+ # to one parent.
+ for self_flag in father_flags:
+ if self_flag in mother_flags:
+ common_flags.append(self_flag)
+ else:
+ self_flags.append(self_flag)
- # Find out flags that are common to both parent and flags that belong soly
- # to one parent.
- for self_flag in father_flags:
- if self_flag in mother_flags:
- common_flags.append(self_flag)
- else:
- self_flags.append(self_flag)
+ for other_flag in mother_flags:
+ if other_flag not in father_flags:
+ other_flags.append(other_flag)
- for other_flag in mother_flags:
- if other_flag not in father_flags:
- other_flags.append(other_flag)
+ # Randomly select flags that belong to only one parent.
+ output_flags = [
+ father_flags[f] for f in self_flags if random.randint(0, 1)
+ ]
+ others = [mother_flags[f] for f in other_flags if random.randint(0, 1)]
+ output_flags.extend(others)
+ # Turn on flags that belong to both parent. Randomly choose the value of the
+ # flag from either parent.
+ for flag in common_flags:
+ output_flags.append(
+ CrossoverWith(father_flags[flag], mother_flags[flag])
+ )
- # Randomly select flags that belong to only one parent.
- output_flags = [father_flags[f] for f in self_flags if random.randint(0, 1)]
- others = [mother_flags[f] for f in other_flags if random.randint(0, 1)]
- output_flags.extend(others)
- # Turn on flags that belong to both parent. Randomly choose the value of the
- # flag from either parent.
- for flag in common_flags:
- output_flags.append(CrossoverWith(father_flags[flag], mother_flags[flag]))
+ # Mutate flags
+ if mutation_rate:
+ return RandomMutate(specs, FlagSet(output_flags), mutation_rate)
- # Mutate flags
- if mutation_rate:
- return RandomMutate(specs, FlagSet(output_flags), mutation_rate)
-
- return GATask(FlagSet(output_flags))
+ return GATask(FlagSet(output_flags))
class GAGeneration(Generation):
- """The Genetic Algorithm."""
+ """The Genetic Algorithm."""
- # The value checks whether the algorithm has converged and arrives at a fixed
- # point. If STOP_THRESHOLD of generations have not seen any performance
- # improvement, the Genetic Algorithm stops.
- STOP_THRESHOLD = None
+ # The value checks whether the algorithm has converged and arrives at a fixed
+ # point. If STOP_THRESHOLD of generations have not seen any performance
+ # improvement, the Genetic Algorithm stops.
+ STOP_THRESHOLD = None
- # Number of tasks in each generation.
- NUM_CHROMOSOMES = None
+ # Number of tasks in each generation.
+ NUM_CHROMOSOMES = None
- # The value checks whether the algorithm has converged and arrives at a fixed
- # point. If NUM_TRIALS of trials have been attempted to generate a new task
- # without a success, the Genetic Algorithm stops.
- NUM_TRIALS = None
+ # The value checks whether the algorithm has converged and arrives at a fixed
+ # point. If NUM_TRIALS of trials have been attempted to generate a new task
+ # without a success, the Genetic Algorithm stops.
+ NUM_TRIALS = None
- # The flags that can be used to generate new tasks.
- SPECS = None
+ # The flags that can be used to generate new tasks.
+ SPECS = None
- # What fraction of genes to mutate.
- MUTATION_RATE = 0
+ # What fraction of genes to mutate.
+ MUTATION_RATE = 0
- @staticmethod
- def InitMetaData(stop_threshold, num_chromosomes, num_trials, specs,
- mutation_rate):
- """Set up the meta data for the Genetic Algorithm.
+ @staticmethod
+ def InitMetaData(
+ stop_threshold, num_chromosomes, num_trials, specs, mutation_rate
+ ):
+ """Set up the meta data for the Genetic Algorithm.
- Args:
- stop_threshold: The number of generations, upon which no performance has
- seen, the Genetic Algorithm stops.
- num_chromosomes: Number of tasks in each generation.
- num_trials: The number of trials, upon which new task has been tried to
- generated without success, the Genetic Algorithm stops.
- specs: The flags that can be used to generate new tasks.
- mutation_rate: What fraction of genes to mutate.
- """
+ Args:
+ stop_threshold: The number of generations, upon which no performance has
+ seen, the Genetic Algorithm stops.
+ num_chromosomes: Number of tasks in each generation.
+ num_trials: The number of trials, upon which new task has been tried to
+ generated without success, the Genetic Algorithm stops.
+ specs: The flags that can be used to generate new tasks.
+ mutation_rate: What fraction of genes to mutate.
+ """
- GAGeneration.STOP_THRESHOLD = stop_threshold
- GAGeneration.NUM_CHROMOSOMES = num_chromosomes
- GAGeneration.NUM_TRIALS = num_trials
- GAGeneration.SPECS = specs
- GAGeneration.MUTATION_RATE = mutation_rate
+ GAGeneration.STOP_THRESHOLD = stop_threshold
+ GAGeneration.NUM_CHROMOSOMES = num_chromosomes
+ GAGeneration.NUM_TRIALS = num_trials
+ GAGeneration.SPECS = specs
+ GAGeneration.MUTATION_RATE = mutation_rate
- def __init__(self, tasks, parents, total_stucks):
- """Set up the meta data for the Genetic Algorithm.
+ def __init__(self, tasks, parents, total_stucks):
+ """Set up the meta data for the Genetic Algorithm.
- Args:
- tasks: A set of tasks to be run.
- parents: A set of tasks from which this new generation is produced. This
- set also contains the best tasks generated so far.
- total_stucks: The number of generations that have not seen improvement.
- The Genetic Algorithm will stop once the total_stucks equals to
- NUM_TRIALS defined in the GAGeneration class.
- """
+ Args:
+ tasks: A set of tasks to be run.
+ parents: A set of tasks from which this new generation is produced. This
+ set also contains the best tasks generated so far.
+ total_stucks: The number of generations that have not seen improvement.
+ The Genetic Algorithm will stop once the total_stucks equals to
+ NUM_TRIALS defined in the GAGeneration class.
+ """
- Generation.__init__(self, tasks, parents)
- self._total_stucks = total_stucks
+ Generation.__init__(self, tasks, parents)
+ self._total_stucks = total_stucks
- def IsImproved(self):
- """True if this generation has improvement upon its parent generation."""
+ def IsImproved(self):
+ """True if this generation has improvement upon its parent generation."""
- tasks = self.Pool()
- parents = self.CandidatePool()
+ tasks = self.Pool()
+ parents = self.CandidatePool()
- # The first generate does not have parents.
- if not parents:
- return True
+ # The first generate does not have parents.
+ if not parents:
+ return True
- # Found out whether a task has improvement upon the best task in the
- # parent generation.
- best_parent = sorted(parents, key=lambda task: task.GetTestResult())[0]
- best_current = sorted(tasks, key=lambda task: task.GetTestResult())[0]
+ # Found out whether a task has improvement upon the best task in the
+ # parent generation.
+ best_parent = sorted(parents, key=lambda task: task.GetTestResult())[0]
+ best_current = sorted(tasks, key=lambda task: task.GetTestResult())[0]
- # At least one task has improvement.
- if best_current.IsImproved(best_parent):
- self._total_stucks = 0
- return True
+ # At least one task has improvement.
+ if best_current.IsImproved(best_parent):
+ self._total_stucks = 0
+ return True
- # If STOP_THRESHOLD of generations have no improvement, the algorithm stops.
- if self._total_stucks >= GAGeneration.STOP_THRESHOLD:
- return False
+ # If STOP_THRESHOLD of generations have no improvement, the algorithm stops.
+ if self._total_stucks >= GAGeneration.STOP_THRESHOLD:
+ return False
- self._total_stucks += 1
- return True
+ self._total_stucks += 1
+ return True
- def Next(self, cache):
- """Calculate the next generation.
+ def Next(self, cache):
+ """Calculate the next generation.
- Generate a new generation from the a set of tasks. This set contains the
- best set seen so far and the tasks executed in the parent generation.
+ Generate a new generation from the a set of tasks. This set contains the
+ best set seen so far and the tasks executed in the parent generation.
- Args:
- cache: A set of tasks that have been generated before.
+ Args:
+ cache: A set of tasks that have been generated before.
- Returns:
- A set of new generations.
- """
+ Returns:
+ A set of new generations.
+ """
- target_len = GAGeneration.NUM_CHROMOSOMES
- specs = GAGeneration.SPECS
- mutation_rate = GAGeneration.MUTATION_RATE
+ target_len = GAGeneration.NUM_CHROMOSOMES
+ specs = GAGeneration.SPECS
+ mutation_rate = GAGeneration.MUTATION_RATE
- # Collect a set of size target_len of tasks. This set will be used to
- # produce a new generation of tasks.
- gen_tasks = [task for task in self.Pool()]
+ # Collect a set of size target_len of tasks. This set will be used to
+ # produce a new generation of tasks.
+ gen_tasks = [task for task in self.Pool()]
- parents = self.CandidatePool()
- if parents:
- gen_tasks.extend(parents)
+ parents = self.CandidatePool()
+ if parents:
+ gen_tasks.extend(parents)
- # A set of tasks that are the best. This set will be used as the parent
- # generation to produce the next generation.
- sort_func = lambda task: task.GetTestResult()
- retained_tasks = sorted(gen_tasks, key=sort_func)[:target_len]
+ # A set of tasks that are the best. This set will be used as the parent
+ # generation to produce the next generation.
+ sort_func = lambda task: task.GetTestResult()
+ retained_tasks = sorted(gen_tasks, key=sort_func)[:target_len]
- child_pool = set()
- for father in retained_tasks:
- num_trials = 0
- # Try num_trials times to produce a new child.
- while num_trials < GAGeneration.NUM_TRIALS:
- # Randomly select another parent.
- mother = random.choice(retained_tasks)
- # Cross over.
- child = mother.ReproduceWith(father, specs, mutation_rate)
- if child not in child_pool and child not in cache:
- child_pool.add(child)
- break
- else:
- num_trials += 1
+ child_pool = set()
+ for father in retained_tasks:
+ num_trials = 0
+ # Try num_trials times to produce a new child.
+ while num_trials < GAGeneration.NUM_TRIALS:
+ # Randomly select another parent.
+ mother = random.choice(retained_tasks)
+ # Cross over.
+ child = mother.ReproduceWith(father, specs, mutation_rate)
+ if child not in child_pool and child not in cache:
+ child_pool.add(child)
+ break
+ else:
+ num_trials += 1
- num_trials = 0
+ num_trials = 0
- while len(child_pool) < target_len and num_trials < GAGeneration.NUM_TRIALS:
- for keep_task in retained_tasks:
- # Mutation.
- child = RandomMutate(specs, keep_task.GetFlags(), mutation_rate)
- if child not in child_pool and child not in cache:
- child_pool.add(child)
- if len(child_pool) >= target_len:
- break
- else:
- num_trials += 1
+ while (
+ len(child_pool) < target_len
+ and num_trials < GAGeneration.NUM_TRIALS
+ ):
+ for keep_task in retained_tasks:
+ # Mutation.
+ child = RandomMutate(specs, keep_task.GetFlags(), mutation_rate)
+ if child not in child_pool and child not in cache:
+ child_pool.add(child)
+ if len(child_pool) >= target_len:
+ break
+ else:
+ num_trials += 1
- # If NUM_TRIALS of tries have been attempted without generating a set of new
- # tasks, the algorithm stops.
- if num_trials >= GAGeneration.NUM_TRIALS:
- return []
+ # If NUM_TRIALS of tries have been attempted without generating a set of new
+ # tasks, the algorithm stops.
+ if num_trials >= GAGeneration.NUM_TRIALS:
+ return []
- assert len(child_pool) == target_len
+ assert len(child_pool) == target_len
- return [GAGeneration(child_pool, set(retained_tasks), self._total_stucks)]
+ return [
+ GAGeneration(child_pool, set(retained_tasks), self._total_stucks)
+ ]
diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py
index 7bb5a7f..2455dd9 100644
--- a/bestflags/hill_climb_best_neighbor.py
+++ b/bestflags/hill_climb_best_neighbor.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A variation of the hill climbing algorithm.
@@ -10,7 +10,7 @@
neighbor.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
from flags import FlagSet
import flags_util
@@ -19,89 +19,92 @@
class HillClimbingBestBranch(Generation):
- """A variation of the hill climbing algorithm.
+ """A variation of the hill climbing algorithm.
- Given a task, it explores all its neighbors. Pick the best neighbor for the
- next iteration.
- """
-
- def __init__(self, exe_set, parents, specs):
- """Set up the tasks set of this generation.
-
- Args:
- exe_set: A set of tasks to be run.
- parents: A set of tasks to be used to check whether their neighbors have
- improved upon them.
- specs: A list of specs to explore. The spec specifies the flags that can
- be changed to find neighbors of a task.
+ Given a task, it explores all its neighbors. Pick the best neighbor for the
+ next iteration.
"""
- Generation.__init__(self, exe_set, parents)
- self._specs = specs
+ def __init__(self, exe_set, parents, specs):
+ """Set up the tasks set of this generation.
- # This variable will be used, by the Next method, to generate the tasks for
- # the next iteration. This self._next_task contains the best task in the
- # current iteration and it will be set by the IsImproved method. The tasks
- # of the next iteration are the neighbor of self._next_task.
- self._next_task = None
+ Args:
+ exe_set: A set of tasks to be run.
+ parents: A set of tasks to be used to check whether their neighbors have
+ improved upon them.
+ specs: A list of specs to explore. The spec specifies the flags that can
+ be changed to find neighbors of a task.
+ """
- def IsImproved(self):
- """True if this generation has improvement over its parent generation.
+ Generation.__init__(self, exe_set, parents)
+ self._specs = specs
- If this generation improves upon the previous generation, this method finds
- out the best task in this generation and sets it to _next_task for the
- method Next to use.
+ # This variable will be used, by the Next method, to generate the tasks for
+ # the next iteration. This self._next_task contains the best task in the
+ # current iteration and it will be set by the IsImproved method. The tasks
+ # of the next iteration are the neighbor of self._next_task.
+ self._next_task = None
- Returns:
- True if the best neighbor improves upon the parent task.
- """
+ def IsImproved(self):
+ """True if this generation has improvement over its parent generation.
- # Find the best neighbor.
- best_task = None
- for task in self._exe_set:
- if not best_task or task.IsImproved(best_task):
- best_task = task
+ If this generation improves upon the previous generation, this method finds
+ out the best task in this generation and sets it to _next_task for the
+ method Next to use.
- if not best_task:
- return False
+ Returns:
+ True if the best neighbor improves upon the parent task.
+ """
- # The first generation may not have parent generation.
- parents = list(self._candidate_pool)
- if parents:
- assert len(parents) == 1
- self._next_task = best_task
- # If the best neighbor improves upon the parent task.
- return best_task.IsImproved(parents[0])
+ # Find the best neighbor.
+ best_task = None
+ for task in self._exe_set:
+ if not best_task or task.IsImproved(best_task):
+ best_task = task
- self._next_task = best_task
- return True
+ if not best_task:
+ return False
- def Next(self, cache):
- """Calculate the next generation.
+ # The first generation may not have parent generation.
+ parents = list(self._candidate_pool)
+ if parents:
+ assert len(parents) == 1
+ self._next_task = best_task
+ # If the best neighbor improves upon the parent task.
+ return best_task.IsImproved(parents[0])
- The best neighbor b of the current task is the parent of the next
- generation. The neighbors of b will be the set of tasks to be evaluated
- next.
+ self._next_task = best_task
+ return True
- Args:
- cache: A set of tasks that have been generated before.
+ def Next(self, cache):
+ """Calculate the next generation.
- Returns:
- A set of new generations.
- """
+ The best neighbor b of the current task is the parent of the next
+ generation. The neighbors of b will be the set of tasks to be evaluated
+ next.
- # The best neighbor.
- current_task = self._next_task
- flag_set = current_task.GetFlags()
+ Args:
+ cache: A set of tasks that have been generated before.
- # The neighbors of the best neighbor.
- children_tasks = set([])
- for spec in self._specs:
- for next_flag in flags_util.ClimbNext(flag_set.GetFlags(), spec):
- new_task = Task(FlagSet(next_flag.values()))
+ Returns:
+ A set of new generations.
+ """
- if new_task not in cache:
- children_tasks.add(new_task)
+ # The best neighbor.
+ current_task = self._next_task
+ flag_set = current_task.GetFlags()
- return [HillClimbingBestBranch(children_tasks, set([current_task]),
- self._specs)]
+ # The neighbors of the best neighbor.
+ children_tasks = set([])
+ for spec in self._specs:
+ for next_flag in flags_util.ClimbNext(flag_set.GetFlags(), spec):
+ new_task = Task(FlagSet(next_flag.values()))
+
+ if new_task not in cache:
+ children_tasks.add(new_task)
+
+ return [
+ HillClimbingBestBranch(
+ children_tasks, set([current_task]), self._specs
+ )
+ ]
diff --git a/bestflags/iterative_elimination.py b/bestflags/iterative_elimination.py
index 2f4c41d..8d54860 100644
--- a/bestflags/iterative_elimination.py
+++ b/bestflags/iterative_elimination.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Iterative flags elimination.
@@ -24,7 +24,7 @@
fitness value.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import flags
from generation import Generation
@@ -32,146 +32,148 @@
def _DecreaseFlag(flags_dict, spec):
- """Decrease the value of the flag that has the specification spec.
+ """Decrease the value of the flag that has the specification spec.
- If the flag that contains the spec is a boolean flag, it is eliminated.
- Otherwise the flag is a numeric flag, its value will be reduced by one.
+ If the flag that contains the spec is a boolean flag, it is eliminated.
+ Otherwise the flag is a numeric flag, its value will be reduced by one.
- Args:
- flags_dict: The dictionary containing the original flags whose neighbors are
- to be explored.
- spec: The spec in the flags_dict is to be changed.
+ Args:
+ flags_dict: The dictionary containing the original flags whose neighbors are
+ to be explored.
+ spec: The spec in the flags_dict is to be changed.
- Returns:
- Dictionary of neighbor flag that is only different from the original
- dictionary by the spec.
- """
+ Returns:
+ Dictionary of neighbor flag that is only different from the original
+ dictionary by the spec.
+ """
- # The specification must be held by one of the flags.
- assert spec in flags_dict
+ # The specification must be held by one of the flags.
+ assert spec in flags_dict
- # The results this method returns.
- results = flags_dict.copy()
+ # The results this method returns.
+ results = flags_dict.copy()
- # This method searches for a pattern [start-end] in the spec. If the spec
- # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
- # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
- # a boolean flag.
- numeric_flag_match = flags.Search(spec)
+ # This method searches for a pattern [start-end] in the spec. If the spec
+ # contains this pattern, it is a numeric flag. Otherwise it is a boolean flag.
+ # For example, -finline-limit=[1-1000] is a numeric flag and -falign-jumps is
+ # a boolean flag.
+ numeric_flag_match = flags.Search(spec)
- if numeric_flag_match:
- # numeric flag
- val = results[spec].GetValue()
+ if numeric_flag_match:
+ # numeric flag
+ val = results[spec].GetValue()
- # If the value of the flag is the lower boundary of the specification, this
- # flag will be turned off. Because it already contains the lowest value and
- # can not be decreased any more.
- if val == int(numeric_flag_match.group('start')):
- # Turn off the flag. A flag is turned off if it is not presented in the
- # flags_dict.
- del results[spec]
+ # If the value of the flag is the lower boundary of the specification, this
+ # flag will be turned off. Because it already contains the lowest value and
+ # can not be decreased any more.
+ if val == int(numeric_flag_match.group("start")):
+ # Turn off the flag. A flag is turned off if it is not presented in the
+ # flags_dict.
+ del results[spec]
+ else:
+ results[spec] = flags.Flag(spec, val - 1)
else:
- results[spec] = flags.Flag(spec, val - 1)
- else:
- # Turn off the flag. A flag is turned off if it is not presented in the
- # flags_dict.
- del results[spec]
+ # Turn off the flag. A flag is turned off if it is not presented in the
+ # flags_dict.
+ del results[spec]
- return results
+ return results
class IterativeEliminationGeneration(Generation):
- """The negative flag iterative elimination algorithm."""
+ """The negative flag iterative elimination algorithm."""
- def __init__(self, exe_set, parent_task):
- """Set up the base line parent task.
+ def __init__(self, exe_set, parent_task):
+ """Set up the base line parent task.
- The parent task is the base line against which the new tasks are compared.
- The new tasks are only different from the base line from one flag f by
- either turning this flag f off, or lower the flag value by 1.
- If a new task is better than the base line, one flag is identified that
- gives degradation. The flag that give the worst degradation will be removed
- or lower the value by 1 in the base in each iteration.
+ The parent task is the base line against which the new tasks are compared.
+ The new tasks are only different from the base line from one flag f by
+ either turning this flag f off, or lower the flag value by 1.
+ If a new task is better than the base line, one flag is identified that
+ gives degradation. The flag that give the worst degradation will be removed
+ or lower the value by 1 in the base in each iteration.
- Args:
- exe_set: A set of tasks to be run. Each one only differs from the
- parent_task by one flag.
- parent_task: The base line task, against which the new tasks in exe_set
- are compared.
- """
+ Args:
+ exe_set: A set of tasks to be run. Each one only differs from the
+ parent_task by one flag.
+ parent_task: The base line task, against which the new tasks in exe_set
+ are compared.
+ """
- Generation.__init__(self, exe_set, None)
- self._parent_task = parent_task
+ Generation.__init__(self, exe_set, None)
+ self._parent_task = parent_task
- def IsImproved(self):
- """Whether any new task has improvement upon the parent task."""
+ def IsImproved(self):
+ """Whether any new task has improvement upon the parent task."""
- parent = self._parent_task
- # Whether there is any new task that has improvement over the parent base
- # line task.
- for curr in [curr for curr in self.Pool() if curr != parent]:
- if curr.IsImproved(parent):
- return True
+ parent = self._parent_task
+ # Whether there is any new task that has improvement over the parent base
+ # line task.
+ for curr in [curr for curr in self.Pool() if curr != parent]:
+ if curr.IsImproved(parent):
+ return True
- return False
+ return False
- def Next(self, cache):
- """Find out the flag that gives the worst degradation.
+ def Next(self, cache):
+ """Find out the flag that gives the worst degradation.
- Found out the flag that gives the worst degradation. Turn that flag off from
- the base line and use the new base line for the new generation.
+ Found out the flag that gives the worst degradation. Turn that flag off from
+ the base line and use the new base line for the new generation.
- Args:
- cache: A set of tasks that have been generated before.
+ Args:
+ cache: A set of tasks that have been generated before.
- Returns:
- A set of new generations.
- """
- parent_task = self._parent_task
+ Returns:
+ A set of new generations.
+ """
+ parent_task = self._parent_task
- # Find out the task that gives the worst degradation.
- worst_task = parent_task
+ # Find out the task that gives the worst degradation.
+ worst_task = parent_task
- for curr in [curr for curr in self.Pool() if curr != parent_task]:
- # The method IsImproved, which is supposed to be called before, ensures
- # that there is at least a task that improves upon the parent_task.
- if curr.IsImproved(worst_task):
- worst_task = curr
+ for curr in [curr for curr in self.Pool() if curr != parent_task]:
+ # The method IsImproved, which is supposed to be called before, ensures
+ # that there is at least a task that improves upon the parent_task.
+ if curr.IsImproved(worst_task):
+ worst_task = curr
- assert worst_task != parent_task
+ assert worst_task != parent_task
- # The flags_set of the worst task.
- work_flags_set = worst_task.GetFlags().GetFlags()
+ # The flags_set of the worst task.
+ work_flags_set = worst_task.GetFlags().GetFlags()
- results = set([])
+ results = set([])
- # If the flags_set contains no flag, i.e., all the flags have been
- # eliminated, the algorithm stops.
- if not work_flags_set:
- return []
+ # If the flags_set contains no flag, i.e., all the flags have been
+ # eliminated, the algorithm stops.
+ if not work_flags_set:
+ return []
- # Turn of the remaining flags one by one for the next generation.
- for spec in work_flags_set:
- flag_set = flags.FlagSet(_DecreaseFlag(work_flags_set, spec).values())
- new_task = task.Task(flag_set)
- if new_task not in cache:
- results.add(new_task)
+ # Turn of the remaining flags one by one for the next generation.
+ for spec in work_flags_set:
+ flag_set = flags.FlagSet(
+ _DecreaseFlag(work_flags_set, spec).values()
+ )
+ new_task = task.Task(flag_set)
+ if new_task not in cache:
+ results.add(new_task)
- return [IterativeEliminationGeneration(results, worst_task)]
+ return [IterativeEliminationGeneration(results, worst_task)]
class IterativeEliminationFirstGeneration(IterativeEliminationGeneration):
- """The first iteration of the iterative elimination algorithm.
+ """The first iteration of the iterative elimination algorithm.
- The first iteration also evaluates the base line task. The base line tasks in
- the subsequent iterations have been evaluated. Therefore,
- IterativeEliminationGeneration does not include the base line task in the
- execution set.
- """
+ The first iteration also evaluates the base line task. The base line tasks in
+ the subsequent iterations have been evaluated. Therefore,
+ IterativeEliminationGeneration does not include the base line task in the
+ execution set.
+ """
- def IsImproved(self):
- # Find out the base line task in the execution set.
- parent = next(task for task in self.Pool() if task == self._parent_task)
- self._parent_task = parent
+ def IsImproved(self):
+ # Find out the base line task in the execution set.
+ parent = next(task for task in self.Pool() if task == self._parent_task)
+ self._parent_task = parent
- return IterativeEliminationGeneration.IsImproved(self)
+ return IterativeEliminationGeneration.IsImproved(self)
diff --git a/bestflags/mock_task.py b/bestflags/mock_task.py
index 6de2b35..e25daeb 100644
--- a/bestflags/mock_task.py
+++ b/bestflags/mock_task.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module defines the common mock tasks used by various unit tests.
@@ -6,87 +6,88 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
# Pick an integer at random.
POISONPILL = 975
class MockTask(object):
- """This class emulates an actual task.
+ """This class emulates an actual task.
- It does not do the actual work, but simply returns the result as given when
- this task is constructed.
- """
-
- def __init__(self, stage, identifier, cost=0):
- """Set up the results for this task.
-
- Args:
- stage: the stage of this test is in.
- identifier: the identifier of this task.
- cost: the mock cost of this task.
-
- The _cost field stored the cost. Once this task is performed, i.e., by
- calling the work method or by setting the result from other task, the
- _cost field will have this cost. The stage field verifies that the module
- being tested and the unitest are in the same stage. If the unitest does
- not care about cost of this task, the cost parameter should be leaved
- blank.
+ It does not do the actual work, but simply returns the result as given when
+ this task is constructed.
"""
- self._identifier = identifier
- self._cost = cost
- self._stage = stage
+ def __init__(self, stage, identifier, cost=0):
+ """Set up the results for this task.
- # Indicate that this method has not been performed yet.
- self._performed = False
+ Args:
+ stage: the stage of this test is in.
+ identifier: the identifier of this task.
+ cost: the mock cost of this task.
- def __eq__(self, other):
- if isinstance(other, MockTask):
- return (self._identifier == other.GetIdentifier(self._stage) and
- self._cost == other.GetResult(self._stage))
- return False
+ The _cost field stored the cost. Once this task is performed, i.e., by
+ calling the work method or by setting the result from other task, the
+ _cost field will have this cost. The stage field verifies that the module
+ being tested and the unitest are in the same stage. If the unitest does
+ not care about cost of this task, the cost parameter should be leaved
+ blank.
+ """
- def GetIdentifier(self, stage):
- assert stage == self._stage
- return self._identifier
+ self._identifier = identifier
+ self._cost = cost
+ self._stage = stage
- def SetResult(self, stage, cost):
- assert stage == self._stage
- self._cost = cost
- self._performed = True
+ # Indicate that this method has not been performed yet.
+ self._performed = False
- def Work(self, stage):
- assert stage == self._stage
- self._performed = True
+ def __eq__(self, other):
+ if isinstance(other, MockTask):
+ return self._identifier == other.GetIdentifier(
+ self._stage
+ ) and self._cost == other.GetResult(self._stage)
+ return False
- def GetResult(self, stage):
- assert stage == self._stage
- return self._cost
+ def GetIdentifier(self, stage):
+ assert stage == self._stage
+ return self._identifier
- def Done(self, stage):
- """Indicates whether the task has been performed."""
+ def SetResult(self, stage, cost):
+ assert stage == self._stage
+ self._cost = cost
+ self._performed = True
- assert stage == self._stage
- return self._performed
+ def Work(self, stage):
+ assert stage == self._stage
+ self._performed = True
- def LogSteeringCost(self):
- pass
+ def GetResult(self, stage):
+ assert stage == self._stage
+ return self._cost
+
+ def Done(self, stage):
+ """Indicates whether the task has been performed."""
+
+ assert stage == self._stage
+ return self._performed
+
+ def LogSteeringCost(self):
+ pass
class IdentifierMockTask(MockTask):
- """This class defines the mock task that does not consider the cost.
+ """This class defines the mock task that does not consider the cost.
- The task instances will be inserted into a set. Therefore the hash and the
- equal methods are overridden. The unittests that compares identities of the
- tasks for equality can use this mock task instead of the base mock tack.
- """
+ The task instances will be inserted into a set. Therefore the hash and the
+ equal methods are overridden. The unittests that compares identities of the
+ tasks for equality can use this mock task instead of the base mock tack.
+ """
- def __hash__(self):
- return self._identifier
+ def __hash__(self):
+ return self._identifier
- def __eq__(self, other):
- if isinstance(other, MockTask):
- return self._identifier == other.GetIdentifier(self._stage)
- return False
+ def __eq__(self, other):
+ if isinstance(other, MockTask):
+ return self._identifier == other.GetIdentifier(self._stage)
+ return False
diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py
index 31f5f21..3aab96f 100644
--- a/bestflags/pipeline_process.py
+++ b/bestflags/pipeline_process.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pipeline process that encapsulates the actual content.
@@ -8,116 +8,138 @@
The actual stages include the builder and the executor.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import multiprocessing
+
# Pick an integer at random.
POISONPILL = 975
class PipelineProcess(multiprocessing.Process):
- """A process that encapsulates the actual content pipeline stage.
+ """A process that encapsulates the actual content pipeline stage.
- The actual pipeline stage can be the builder or the tester. This process
- continuously pull tasks from the queue until a poison pill is received.
- Once a job is received, it will hand it to the actual stage for processing.
+ The actual pipeline stage can be the builder or the tester. This process
+ continuously pull tasks from the queue until a poison pill is received.
+ Once a job is received, it will hand it to the actual stage for processing.
- Each pipeline stage contains three modules.
- The first module continuously pulls task from the input queue. It searches the
- cache to check whether the task has encountered before. If so, duplicate
- computation can be avoided.
- The second module consists of a pool of workers that do the actual work, e.g.,
- the worker will compile the source code and get the image in the builder
- pipeline stage.
- The third module is a helper that put the result cost to the cost field of the
- duplicate tasks. For example, if two tasks are equivalent, only one task, say
- t1 will be executed and the other task, say t2 will not be executed. The third
- mode gets the result from t1, when it is available and set the cost of t2 to
- be the same as that of t1.
- """
-
- def __init__(self, num_processes, name, cache, stage, task_queue, helper,
- worker, result_queue):
- """Set up input/output queue and the actual method to be called.
-
- Args:
- num_processes: Number of helpers subprocessors this stage has.
- name: The name of this stage.
- cache: The computed tasks encountered before.
- stage: An int value that specifies the stage for this pipeline stage, for
- example, build stage or test stage. This value will be used to retrieve
- the keys in different stage. I.e., the flags set is the key in build
- stage and the checksum is the key in the test stage. The key is used to
- detect duplicates.
- task_queue: The input task queue for this pipeline stage.
- helper: The method hosted by the helper module to fill up the cost of the
- duplicate tasks.
- worker: The method hosted by the worker pools to do the actual work, e.g.,
- compile the image.
- result_queue: The output task queue for this pipeline stage.
+ Each pipeline stage contains three modules.
+ The first module continuously pulls task from the input queue. It searches the
+ cache to check whether the task has encountered before. If so, duplicate
+ computation can be avoided.
+ The second module consists of a pool of workers that do the actual work, e.g.,
+ the worker will compile the source code and get the image in the builder
+ pipeline stage.
+ The third module is a helper that put the result cost to the cost field of the
+ duplicate tasks. For example, if two tasks are equivalent, only one task, say
+ t1 will be executed and the other task, say t2 will not be executed. The third
+ mode gets the result from t1, when it is available and set the cost of t2 to
+ be the same as that of t1.
"""
- multiprocessing.Process.__init__(self)
+ def __init__(
+ self,
+ num_processes,
+ name,
+ cache,
+ stage,
+ task_queue,
+ helper,
+ worker,
+ result_queue,
+ ):
+ """Set up input/output queue and the actual method to be called.
- self._name = name
- self._task_queue = task_queue
- self._result_queue = result_queue
+ Args:
+ num_processes: Number of helpers subprocessors this stage has.
+ name: The name of this stage.
+ cache: The computed tasks encountered before.
+ stage: An int value that specifies the stage for this pipeline stage, for
+ example, build stage or test stage. This value will be used to retrieve
+ the keys in different stage. I.e., the flags set is the key in build
+ stage and the checksum is the key in the test stage. The key is used to
+ detect duplicates.
+ task_queue: The input task queue for this pipeline stage.
+ helper: The method hosted by the helper module to fill up the cost of the
+ duplicate tasks.
+ worker: The method hosted by the worker pools to do the actual work, e.g.,
+ compile the image.
+ result_queue: The output task queue for this pipeline stage.
+ """
- self._helper = helper
- self._worker = worker
+ multiprocessing.Process.__init__(self)
- self._cache = cache
- self._stage = stage
- self._num_processes = num_processes
+ self._name = name
+ self._task_queue = task_queue
+ self._result_queue = result_queue
- # the queues used by the modules for communication
- manager = multiprocessing.Manager()
- self._helper_queue = manager.Queue()
- self._work_queue = manager.Queue()
+ self._helper = helper
+ self._worker = worker
- def run(self):
- """Busy pulling the next task from the queue for execution.
+ self._cache = cache
+ self._stage = stage
+ self._num_processes = num_processes
- Once a job is pulled, this stage invokes the actual stage method and submits
- the result to the next pipeline stage.
+ # the queues used by the modules for communication
+ manager = multiprocessing.Manager()
+ self._helper_queue = manager.Queue()
+ self._work_queue = manager.Queue()
- The process will terminate on receiving the poison pill from previous stage.
- """
+ def run(self):
+ """Busy pulling the next task from the queue for execution.
- # the worker pool
- work_pool = multiprocessing.Pool(self._num_processes)
+ Once a job is pulled, this stage invokes the actual stage method and submits
+ the result to the next pipeline stage.
- # the helper process
- helper_process = multiprocessing.Process(
- target=self._helper,
- args=(self._stage, self._cache, self._helper_queue, self._work_queue,
- self._result_queue))
- helper_process.start()
- mycache = self._cache.keys()
+ The process will terminate on receiving the poison pill from previous stage.
+ """
- while True:
- task = self._task_queue.get()
- if task == POISONPILL:
- # Poison pill means shutdown
- self._result_queue.put(POISONPILL)
- break
+ # the worker pool
+ work_pool = multiprocessing.Pool(self._num_processes)
- task_key = task.GetIdentifier(self._stage)
- if task_key in mycache:
- # The task has been encountered before. It will be sent to the helper
- # module for further processing.
- self._helper_queue.put(task)
- else:
- # Let the workers do the actual work.
- work_pool.apply_async(
- self._worker,
- args=(self._stage, task, self._work_queue, self._result_queue))
- mycache.append(task_key)
+ # the helper process
+ helper_process = multiprocessing.Process(
+ target=self._helper,
+ args=(
+ self._stage,
+ self._cache,
+ self._helper_queue,
+ self._work_queue,
+ self._result_queue,
+ ),
+ )
+ helper_process.start()
+ mycache = self._cache.keys()
- # Shutdown the workers pool and the helper process.
- work_pool.close()
- work_pool.join()
+ while True:
+ task = self._task_queue.get()
+ if task == POISONPILL:
+ # Poison pill means shutdown
+ self._result_queue.put(POISONPILL)
+ break
- self._helper_queue.put(POISONPILL)
- helper_process.join()
+ task_key = task.GetIdentifier(self._stage)
+ if task_key in mycache:
+ # The task has been encountered before. It will be sent to the helper
+ # module for further processing.
+ self._helper_queue.put(task)
+ else:
+ # Let the workers do the actual work.
+ work_pool.apply_async(
+ self._worker,
+ args=(
+ self._stage,
+ task,
+ self._work_queue,
+ self._result_queue,
+ ),
+ )
+ mycache.append(task_key)
+
+ # Shutdown the workers pool and the helper process.
+ work_pool.close()
+ work_pool.join()
+
+ self._helper_queue.put(POISONPILL)
+ helper_process.join()
diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py
index b9d8406..04e641e 100644
--- a/bestflags/pipeline_process_test.py
+++ b/bestflags/pipeline_process_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pipeline Process unittest.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import multiprocessing
import unittest
@@ -14,6 +14,7 @@
from mock_task import MockTask
import pipeline_process
+
# Pick an integer at random.
ERROR = -334
# Pick an integer at random.
@@ -21,69 +22,74 @@
def MockHelper(stage, done_dict, helper_queue, _, result_queue):
- """This method echos input to the output."""
+ """This method echos input to the output."""
- assert stage == TEST_STAGE
- while True:
- if not helper_queue.empty():
- task = helper_queue.get()
- if task == pipeline_process.POISONPILL:
- # Poison pill means shutdown
- break
+ assert stage == TEST_STAGE
+ while True:
+ if not helper_queue.empty():
+ task = helper_queue.get()
+ if task == pipeline_process.POISONPILL:
+ # Poison pill means shutdown
+ break
- if task in done_dict:
- # verify that it does not get duplicate "1"s in the test.
- result_queue.put(ERROR)
- else:
- result_queue.put(('helper', task.GetIdentifier(TEST_STAGE)))
+ if task in done_dict:
+ # verify that it does not get duplicate "1"s in the test.
+ result_queue.put(ERROR)
+ else:
+ result_queue.put(("helper", task.GetIdentifier(TEST_STAGE)))
def MockWorker(stage, task, _, result_queue):
- assert stage == TEST_STAGE
- result_queue.put(('worker', task.GetIdentifier(TEST_STAGE)))
+ assert stage == TEST_STAGE
+ result_queue.put(("worker", task.GetIdentifier(TEST_STAGE)))
class PipelineProcessTest(unittest.TestCase):
- """This class test the PipelineProcess.
+ """This class test the PipelineProcess.
- All the task inserted into the input queue should be taken out and hand to the
- actual pipeline handler, except for the POISON_PILL. All these task should
- also be passed to the next pipeline stage via the output queue.
- """
-
- def testRun(self):
- """Test the run method.
-
- Ensure that all the tasks inserted into the queue are properly handled.
+ All the task inserted into the input queue should be taken out and hand to the
+ actual pipeline handler, except for the POISON_PILL. All these task should
+ also be passed to the next pipeline stage via the output queue.
"""
- manager = multiprocessing.Manager()
- inp = manager.Queue()
- output = manager.Queue()
+ def testRun(self):
+ """Test the run method.
- process = pipeline_process.PipelineProcess(
- 2, 'testing', {}, TEST_STAGE, inp, MockHelper, MockWorker, output)
+ Ensure that all the tasks inserted into the queue are properly handled.
+ """
- process.start()
- inp.put(MockTask(TEST_STAGE, 1))
- inp.put(MockTask(TEST_STAGE, 1))
- inp.put(MockTask(TEST_STAGE, 2))
- inp.put(pipeline_process.POISONPILL)
- process.join()
+ manager = multiprocessing.Manager()
+ inp = manager.Queue()
+ output = manager.Queue()
- # All tasks are processed once and only once.
- result = [('worker', 1), ('helper', 1), ('worker', 2),
- pipeline_process.POISONPILL]
- while result:
- task = output.get()
+ process = pipeline_process.PipelineProcess(
+ 2, "testing", {}, TEST_STAGE, inp, MockHelper, MockWorker, output
+ )
- # One "1"s is passed to the worker and one to the helper.
- self.assertNotEqual(task, ERROR)
+ process.start()
+ inp.put(MockTask(TEST_STAGE, 1))
+ inp.put(MockTask(TEST_STAGE, 1))
+ inp.put(MockTask(TEST_STAGE, 2))
+ inp.put(pipeline_process.POISONPILL)
+ process.join()
- # The messages received should be exactly the same as the result.
- self.assertTrue(task in result)
- result.remove(task)
+ # All tasks are processed once and only once.
+ result = [
+ ("worker", 1),
+ ("helper", 1),
+ ("worker", 2),
+ pipeline_process.POISONPILL,
+ ]
+ while result:
+ task = output.get()
+
+ # One "1"s is passed to the worker and one to the helper.
+ self.assertNotEqual(task, ERROR)
+
+ # The messages received should be exactly the same as the result.
+ self.assertTrue(task in result)
+ result.remove(task)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/pipeline_worker.py b/bestflags/pipeline_worker.py
index e21ec2c..f18be66 100644
--- a/bestflags/pipeline_worker.py
+++ b/bestflags/pipeline_worker.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The pipeline_worker functions of the build and test stage of the framework.
@@ -13,130 +13,135 @@
The worker invokes the work method of the tasks that are not duplicate.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import pipeline_process
def Helper(stage, done_dict, helper_queue, completed_queue, result_queue):
- """Helper that filters duplicate tasks.
+ """Helper that filters duplicate tasks.
- This method Continuously pulls duplicate tasks from the helper_queue. The
- duplicate tasks need not be compiled/tested. This method also pulls completed
- tasks from the worker queue and let the results of the duplicate tasks be the
- same as their corresponding finished task.
+ This method Continuously pulls duplicate tasks from the helper_queue. The
+ duplicate tasks need not be compiled/tested. This method also pulls completed
+ tasks from the worker queue and let the results of the duplicate tasks be the
+ same as their corresponding finished task.
- Args:
- stage: The current stage of the pipeline, for example, build stage or test
- stage.
- done_dict: A dictionary of tasks that are done. The key of the dictionary is
- the identifier of the task. The value of the dictionary is the results of
- performing the corresponding task.
- helper_queue: A queue of duplicate tasks whose results need to be resolved.
- This is a communication channel between the pipeline_process and this
- helper process.
- completed_queue: A queue of tasks that have been built/tested. The results
- of these tasks are needed to resolve the results of the duplicate tasks.
- This is the communication channel between the workers and this helper
- process.
- result_queue: After the results of the duplicate tasks have been resolved,
- the duplicate tasks will be sent to the next stage via this queue.
- """
+ Args:
+ stage: The current stage of the pipeline, for example, build stage or test
+ stage.
+ done_dict: A dictionary of tasks that are done. The key of the dictionary is
+ the identifier of the task. The value of the dictionary is the results of
+ performing the corresponding task.
+ helper_queue: A queue of duplicate tasks whose results need to be resolved.
+ This is a communication channel between the pipeline_process and this
+ helper process.
+ completed_queue: A queue of tasks that have been built/tested. The results
+ of these tasks are needed to resolve the results of the duplicate tasks.
+ This is the communication channel between the workers and this helper
+ process.
+ result_queue: After the results of the duplicate tasks have been resolved,
+ the duplicate tasks will be sent to the next stage via this queue.
+ """
- # The list of duplicate tasks, the results of which need to be resolved.
- waiting_list = []
+ # The list of duplicate tasks, the results of which need to be resolved.
+ waiting_list = []
- while True:
- # Pull duplicate task from the helper queue.
- if not helper_queue.empty():
- task = helper_queue.get()
+ while True:
+ # Pull duplicate task from the helper queue.
+ if not helper_queue.empty():
+ task = helper_queue.get()
- if task == pipeline_process.POISONPILL:
- # Poison pill means no more duplicate task from the helper queue.
- break
+ if task == pipeline_process.POISONPILL:
+ # Poison pill means no more duplicate task from the helper queue.
+ break
- # The task has not been performed before.
- assert not task.Done(stage)
+ # The task has not been performed before.
+ assert not task.Done(stage)
- # The identifier of this task.
- identifier = task.GetIdentifier(stage)
+ # The identifier of this task.
+ identifier = task.GetIdentifier(stage)
- # If a duplicate task comes before the corresponding resolved results from
- # the completed_queue, it will be put in the waiting list. If the result
- # arrives before the duplicate task, the duplicate task will be resolved
- # right away.
- if identifier in done_dict:
- # This task has been encountered before and the result is available. The
- # result can be resolved right away.
- task.SetResult(stage, done_dict[identifier])
- result_queue.put(task)
- else:
- waiting_list.append(task)
+ # If a duplicate task comes before the corresponding resolved results from
+ # the completed_queue, it will be put in the waiting list. If the result
+ # arrives before the duplicate task, the duplicate task will be resolved
+ # right away.
+ if identifier in done_dict:
+ # This task has been encountered before and the result is available. The
+ # result can be resolved right away.
+ task.SetResult(stage, done_dict[identifier])
+ result_queue.put(task)
+ else:
+ waiting_list.append(task)
- # Check and get completed tasks from completed_queue.
- GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list,
- result_queue)
+ # Check and get completed tasks from completed_queue.
+ GetResultFromCompletedQueue(
+ stage, completed_queue, done_dict, waiting_list, result_queue
+ )
- # Wait to resolve the results of the remaining duplicate tasks.
- while waiting_list:
- GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list,
- result_queue)
+ # Wait to resolve the results of the remaining duplicate tasks.
+ while waiting_list:
+ GetResultFromCompletedQueue(
+ stage, completed_queue, done_dict, waiting_list, result_queue
+ )
-def GetResultFromCompletedQueue(stage, completed_queue, done_dict, waiting_list,
- result_queue):
- """Pull results from the completed queue and resolves duplicate tasks.
+def GetResultFromCompletedQueue(
+ stage, completed_queue, done_dict, waiting_list, result_queue
+):
+ """Pull results from the completed queue and resolves duplicate tasks.
- Args:
- stage: The current stage of the pipeline, for example, build stage or test
- stage.
- completed_queue: A queue of tasks that have been performed. The results of
- these tasks are needed to resolve the results of the duplicate tasks. This
- is the communication channel between the workers and this method.
- done_dict: A dictionary of tasks that are done. The key of the dictionary is
- the optimization flags of the task. The value of the dictionary is the
- compilation results of the corresponding task.
- waiting_list: The list of duplicate tasks, the results of which need to be
- resolved.
- result_queue: After the results of the duplicate tasks have been resolved,
- the duplicate tasks will be sent to the next stage via this queue.
+ Args:
+ stage: The current stage of the pipeline, for example, build stage or test
+ stage.
+ completed_queue: A queue of tasks that have been performed. The results of
+ these tasks are needed to resolve the results of the duplicate tasks. This
+ is the communication channel between the workers and this method.
+ done_dict: A dictionary of tasks that are done. The key of the dictionary is
+ the optimization flags of the task. The value of the dictionary is the
+ compilation results of the corresponding task.
+ waiting_list: The list of duplicate tasks, the results of which need to be
+ resolved.
+ result_queue: After the results of the duplicate tasks have been resolved,
+ the duplicate tasks will be sent to the next stage via this queue.
- This helper method tries to pull a completed task from the completed queue.
- If it gets a task from the queue, it resolves the results of all the relevant
- duplicate tasks in the waiting list. Relevant tasks are the tasks that have
- the same flags as the currently received results from the completed_queue.
- """
- # Pull completed task from the worker queue.
- if not completed_queue.empty():
- (identifier, result) = completed_queue.get()
- done_dict[identifier] = result
+ This helper method tries to pull a completed task from the completed queue.
+ If it gets a task from the queue, it resolves the results of all the relevant
+ duplicate tasks in the waiting list. Relevant tasks are the tasks that have
+ the same flags as the currently received results from the completed_queue.
+ """
+ # Pull completed task from the worker queue.
+ if not completed_queue.empty():
+ (identifier, result) = completed_queue.get()
+ done_dict[identifier] = result
- tasks = [t for t in waiting_list if t.GetIdentifier(stage) == identifier]
- for duplicate_task in tasks:
- duplicate_task.SetResult(stage, result)
- result_queue.put(duplicate_task)
- waiting_list.remove(duplicate_task)
+ tasks = [
+ t for t in waiting_list if t.GetIdentifier(stage) == identifier
+ ]
+ for duplicate_task in tasks:
+ duplicate_task.SetResult(stage, result)
+ result_queue.put(duplicate_task)
+ waiting_list.remove(duplicate_task)
def Worker(stage, task, helper_queue, result_queue):
- """Worker that performs the task.
+ """Worker that performs the task.
- This method calls the work method of the input task and distribute the result
- to the helper and the next stage.
+ This method calls the work method of the input task and distribute the result
+ to the helper and the next stage.
- Args:
- stage: The current stage of the pipeline, for example, build stage or test
- stage.
- task: Input task that needs to be performed.
- helper_queue: Queue that holds the completed tasks and the results. This is
- the communication channel between the worker and the helper.
- result_queue: Queue that holds the completed tasks and the results. This is
- the communication channel between the worker and the next stage.
- """
+ Args:
+ stage: The current stage of the pipeline, for example, build stage or test
+ stage.
+ task: Input task that needs to be performed.
+ helper_queue: Queue that holds the completed tasks and the results. This is
+ the communication channel between the worker and the helper.
+ result_queue: Queue that holds the completed tasks and the results. This is
+ the communication channel between the worker and the next stage.
+ """
- # The task has not been completed before.
- assert not task.Done(stage)
+ # The task has not been completed before.
+ assert not task.Done(stage)
- task.Work(stage)
- helper_queue.put((task.GetIdentifier(stage), task.GetResult(stage)))
- result_queue.put(task)
+ task.Work(stage)
+ helper_queue.put((task.GetIdentifier(stage), task.GetResult(stage)))
+ result_queue.put(task)
diff --git a/bestflags/pipeline_worker_test.py b/bestflags/pipeline_worker_test.py
index e3de5e1..15c51ec 100644
--- a/bestflags/pipeline_worker_test.py
+++ b/bestflags/pipeline_worker_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for the pipeline_worker functions in the build/test stage.
@@ -8,7 +8,7 @@
This module tests the helper method and the worker method.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import multiprocessing
import random
@@ -19,110 +19,117 @@
import pipeline_process
import pipeline_worker
+
# Pick an integer at random.
TEST_STAGE = -3
def MockTaskCostGenerator():
- """Calls a random number generator and returns a negative number."""
- return random.randint(-sys.maxint - 1, -1)
+ """Calls a random number generator and returns a negative number."""
+ return random.randint(-sys.maxint - 1, -1)
class PipelineWorkerTest(unittest.TestCase):
- """This class tests the pipeline_worker functions.
+ """This class tests the pipeline_worker functions.
- Given the same identifier, the cost should result the same from the
- pipeline_worker functions.
- """
-
- def testHelper(self):
- """"Test the helper.
-
- Call the helper method twice, and test the results. The results should be
- the same, i.e., the cost should be the same.
+ Given the same identifier, the cost should result the same from the
+ pipeline_worker functions.
"""
- # Set up the input, helper and output queue for the helper method.
- manager = multiprocessing.Manager()
- helper_queue = manager.Queue()
- result_queue = manager.Queue()
- completed_queue = manager.Queue()
+ def testHelper(self):
+ """ "Test the helper.
- # Set up the helper process that holds the helper method.
- helper_process = multiprocessing.Process(
- target=pipeline_worker.Helper,
- args=(TEST_STAGE, {}, helper_queue, completed_queue, result_queue))
- helper_process.start()
+ Call the helper method twice, and test the results. The results should be
+ the same, i.e., the cost should be the same.
+ """
- # A dictionary defines the mock result to the helper.
- mock_result = {1: 1995, 2: 59, 9: 1027}
+ # Set up the input, helper and output queue for the helper method.
+ manager = multiprocessing.Manager()
+ helper_queue = manager.Queue()
+ result_queue = manager.Queue()
+ completed_queue = manager.Queue()
- # Test if there is a task that is done before, whether the duplicate task
- # will have the same result. Here, two different scenarios are tested. That
- # is the mock results are added to the completed_queue before and after the
- # corresponding mock tasks being added to the input queue.
- completed_queue.put((9, mock_result[9]))
+ # Set up the helper process that holds the helper method.
+ helper_process = multiprocessing.Process(
+ target=pipeline_worker.Helper,
+ args=(TEST_STAGE, {}, helper_queue, completed_queue, result_queue),
+ )
+ helper_process.start()
- # The output of the helper should contain all the following tasks.
- results = [1, 1, 2, 9]
+ # A dictionary defines the mock result to the helper.
+ mock_result = {1: 1995, 2: 59, 9: 1027}
- # Testing the correctness of having tasks having the same identifier, here
- # 1.
- for result in results:
- helper_queue.put(MockTask(TEST_STAGE, result, MockTaskCostGenerator()))
+ # Test if there is a task that is done before, whether the duplicate task
+ # will have the same result. Here, two different scenarios are tested. That
+ # is the mock results are added to the completed_queue before and after the
+ # corresponding mock tasks being added to the input queue.
+ completed_queue.put((9, mock_result[9]))
- completed_queue.put((2, mock_result[2]))
- completed_queue.put((1, mock_result[1]))
+ # The output of the helper should contain all the following tasks.
+ results = [1, 1, 2, 9]
- # Signal there is no more duplicate task.
- helper_queue.put(pipeline_process.POISONPILL)
- helper_process.join()
+ # Testing the correctness of having tasks having the same identifier, here
+ # 1.
+ for result in results:
+ helper_queue.put(
+ MockTask(TEST_STAGE, result, MockTaskCostGenerator())
+ )
- while results:
- task = result_queue.get()
- identifier = task.GetIdentifier(TEST_STAGE)
- self.assertTrue(identifier in results)
- if identifier in mock_result:
- self.assertTrue(task.GetResult(TEST_STAGE), mock_result[identifier])
- results.remove(identifier)
+ completed_queue.put((2, mock_result[2]))
+ completed_queue.put((1, mock_result[1]))
- def testWorker(self):
- """"Test the worker method.
+ # Signal there is no more duplicate task.
+ helper_queue.put(pipeline_process.POISONPILL)
+ helper_process.join()
- The worker should process all the input tasks and output the tasks to the
- helper and result queue.
- """
+ while results:
+ task = result_queue.get()
+ identifier = task.GetIdentifier(TEST_STAGE)
+ self.assertTrue(identifier in results)
+ if identifier in mock_result:
+ self.assertTrue(
+ task.GetResult(TEST_STAGE), mock_result[identifier]
+ )
+ results.remove(identifier)
- manager = multiprocessing.Manager()
- result_queue = manager.Queue()
- completed_queue = manager.Queue()
+ def testWorker(self):
+ """ "Test the worker method.
- # A dictionary defines the mock tasks and their corresponding results.
- mock_work_tasks = {1: 86, 2: 788}
+ The worker should process all the input tasks and output the tasks to the
+ helper and result queue.
+ """
- mock_tasks = []
+ manager = multiprocessing.Manager()
+ result_queue = manager.Queue()
+ completed_queue = manager.Queue()
- for flag, cost in mock_work_tasks.iteritems():
- mock_tasks.append(MockTask(TEST_STAGE, flag, cost))
+ # A dictionary defines the mock tasks and their corresponding results.
+ mock_work_tasks = {1: 86, 2: 788}
- # Submit the mock tasks to the worker.
- for mock_task in mock_tasks:
- pipeline_worker.Worker(TEST_STAGE, mock_task, completed_queue,
- result_queue)
+ mock_tasks = []
- # The tasks, from the output queue, should be the same as the input and
- # should be performed.
- for task in mock_tasks:
- output = result_queue.get()
- self.assertEqual(output, task)
- self.assertTrue(output.Done(TEST_STAGE))
+ for flag, cost in mock_work_tasks.iteritems():
+ mock_tasks.append(MockTask(TEST_STAGE, flag, cost))
- # The tasks, from the completed_queue, should be defined in the
- # mock_work_tasks dictionary.
- for flag, cost in mock_work_tasks.iteritems():
- helper_input = completed_queue.get()
- self.assertEqual(helper_input, (flag, cost))
+ # Submit the mock tasks to the worker.
+ for mock_task in mock_tasks:
+ pipeline_worker.Worker(
+ TEST_STAGE, mock_task, completed_queue, result_queue
+ )
+
+ # The tasks, from the output queue, should be the same as the input and
+ # should be performed.
+ for task in mock_tasks:
+ output = result_queue.get()
+ self.assertEqual(output, task)
+ self.assertTrue(output.Done(TEST_STAGE))
+
+ # The tasks, from the completed_queue, should be defined in the
+ # mock_work_tasks dictionary.
+ for flag, cost in mock_work_tasks.iteritems():
+ helper_input = completed_queue.get()
+ self.assertEqual(helper_input, (flag, cost))
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/steering.py b/bestflags/steering.py
index 320f7c3..ead2516 100644
--- a/bestflags/steering.py
+++ b/bestflags/steering.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The framework stage that produces the next generation of tasks to run.
@@ -6,111 +6,111 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import pipeline_process
def Steering(cache, generations, input_queue, result_queue):
- """The core method template that produces the next generation of tasks to run.
+ """The core method template that produces the next generation of tasks to run.
- This method waits for the results of the tasks from the previous generation.
- Upon the arrival of all these results, the method uses them to generate the
- next generation of tasks.
+ This method waits for the results of the tasks from the previous generation.
+ Upon the arrival of all these results, the method uses them to generate the
+ next generation of tasks.
- The main logic of producing the next generation from previous generation is
- application specific. For example, in the genetic algorithm, a task is
- produced by combining two parents tasks, while in the hill climbing algorithm,
- a task is generated by its immediate neighbor. The method 'Next' is overridden
- in the concrete subclasses of the class Generation to produce the next
- application-specific generation. The steering method invokes the 'Next'
- method, produces the next generation and submits the tasks in this generation
- to the next stage, e.g., the build/compilation stage.
+ The main logic of producing the next generation from previous generation is
+ application specific. For example, in the genetic algorithm, a task is
+ produced by combining two parents tasks, while in the hill climbing algorithm,
+ a task is generated by its immediate neighbor. The method 'Next' is overridden
+ in the concrete subclasses of the class Generation to produce the next
+ application-specific generation. The steering method invokes the 'Next'
+ method, produces the next generation and submits the tasks in this generation
+ to the next stage, e.g., the build/compilation stage.
- Args:
- cache: It stores the experiments that have been conducted before. Used to
- avoid duplicate works.
- generations: The initial generations of tasks to be run.
- input_queue: The input results from the last stage of the framework. These
- results will trigger new iteration of the algorithm.
- result_queue: The output task queue for this pipeline stage. The new tasks
- generated by the steering algorithm will be sent to the next stage via
- this queue.
- """
+ Args:
+ cache: It stores the experiments that have been conducted before. Used to
+ avoid duplicate works.
+ generations: The initial generations of tasks to be run.
+ input_queue: The input results from the last stage of the framework. These
+ results will trigger new iteration of the algorithm.
+ result_queue: The output task queue for this pipeline stage. The new tasks
+ generated by the steering algorithm will be sent to the next stage via
+ this queue.
+ """
- # Generations that have pending tasks to be executed. Pending tasks are those
- # whose results are not ready. The tasks that have their results ready are
- # referenced to as ready tasks. Once there is no pending generation, the
- # algorithm terminates.
- waiting = generations
+ # Generations that have pending tasks to be executed. Pending tasks are those
+ # whose results are not ready. The tasks that have their results ready are
+ # referenced to as ready tasks. Once there is no pending generation, the
+ # algorithm terminates.
+ waiting = generations
- # Record how many initial tasks there are. If there is no task at all, the
- # algorithm can terminate right away.
- num_tasks = 0
+ # Record how many initial tasks there are. If there is no task at all, the
+ # algorithm can terminate right away.
+ num_tasks = 0
- # Submit all the tasks in the initial generations to the next stage of the
- # framework. The next stage can be the build/compilation stage.
- for generation in generations:
- # Only send the task that has not been performed before to the next stage.
- for task in [task for task in generation.Pool() if task not in cache]:
- result_queue.put(task)
- cache.add(task)
- num_tasks += 1
+ # Submit all the tasks in the initial generations to the next stage of the
+ # framework. The next stage can be the build/compilation stage.
+ for generation in generations:
+ # Only send the task that has not been performed before to the next stage.
+ for task in [task for task in generation.Pool() if task not in cache]:
+ result_queue.put(task)
+ cache.add(task)
+ num_tasks += 1
- # If there is no task to be executed at all, the algorithm returns right away.
- if not num_tasks:
- # Inform the next stage that there will be no more task.
+ # If there is no task to be executed at all, the algorithm returns right away.
+ if not num_tasks:
+ # Inform the next stage that there will be no more task.
+ result_queue.put(pipeline_process.POISONPILL)
+ return
+
+ # The algorithm is done if there is no pending generation. A generation is
+ # pending if it has pending task.
+ while waiting:
+ # Busy-waiting for the next task.
+ if input_queue.empty():
+ continue
+
+ # If there is a task whose result is ready from the last stage of the
+ # feedback loop, there will be one less pending task.
+
+ task = input_queue.get()
+
+ # Store the result of this ready task. Intermediate results can be used to
+ # generate report for final result or be used to reboot from a crash from
+ # the failure of any module of the framework.
+ task.LogSteeringCost()
+
+ # Find out which pending generation this ready task belongs to. This pending
+ # generation will have one less pending task. The "next" expression iterates
+ # the generations in waiting until the first generation whose UpdateTask
+ # method returns true.
+ generation = next(gen for gen in waiting if gen.UpdateTask(task))
+
+ # If there is still any pending task, do nothing.
+ if not generation.Done():
+ continue
+
+ # All the tasks in the generation are finished. The generation is ready to
+ # produce the next generation.
+ waiting.remove(generation)
+
+ # Check whether a generation should generate the next generation.
+ # A generation may not generate the next generation, e.g., because a
+ # fixpoint has been reached, there has not been any improvement for a few
+ # generations or a local maxima is reached.
+ if not generation.IsImproved():
+ continue
+
+ for new_generation in generation.Next(cache):
+ # Make sure that each generation should contain at least one task.
+ assert new_generation.Pool()
+ waiting.append(new_generation)
+
+ # Send the tasks of the new generations to the next stage for execution.
+ for new_task in new_generation.Pool():
+ result_queue.put(new_task)
+ cache.add(new_task)
+
+ # Steering algorithm is finished and it informs the next stage that there will
+ # be no more task.
result_queue.put(pipeline_process.POISONPILL)
- return
-
- # The algorithm is done if there is no pending generation. A generation is
- # pending if it has pending task.
- while waiting:
- # Busy-waiting for the next task.
- if input_queue.empty():
- continue
-
- # If there is a task whose result is ready from the last stage of the
- # feedback loop, there will be one less pending task.
-
- task = input_queue.get()
-
- # Store the result of this ready task. Intermediate results can be used to
- # generate report for final result or be used to reboot from a crash from
- # the failure of any module of the framework.
- task.LogSteeringCost()
-
- # Find out which pending generation this ready task belongs to. This pending
- # generation will have one less pending task. The "next" expression iterates
- # the generations in waiting until the first generation whose UpdateTask
- # method returns true.
- generation = next(gen for gen in waiting if gen.UpdateTask(task))
-
- # If there is still any pending task, do nothing.
- if not generation.Done():
- continue
-
- # All the tasks in the generation are finished. The generation is ready to
- # produce the next generation.
- waiting.remove(generation)
-
- # Check whether a generation should generate the next generation.
- # A generation may not generate the next generation, e.g., because a
- # fixpoint has been reached, there has not been any improvement for a few
- # generations or a local maxima is reached.
- if not generation.IsImproved():
- continue
-
- for new_generation in generation.Next(cache):
- # Make sure that each generation should contain at least one task.
- assert new_generation.Pool()
- waiting.append(new_generation)
-
- # Send the tasks of the new generations to the next stage for execution.
- for new_task in new_generation.Pool():
- result_queue.put(new_task)
- cache.add(new_task)
-
- # Steering algorithm is finished and it informs the next stage that there will
- # be no more task.
- result_queue.put(pipeline_process.POISONPILL)
diff --git a/bestflags/steering_test.py b/bestflags/steering_test.py
index c96e362..28a2f10 100644
--- a/bestflags/steering_test.py
+++ b/bestflags/steering_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Steering stage unittest.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import multiprocessing
import unittest
@@ -16,6 +16,7 @@
import pipeline_process
import steering
+
# Pick an integer at random.
STEERING_TEST_STAGE = -8
@@ -31,140 +32,153 @@
class MockGeneration(Generation):
- """This class emulates an actual generation.
+ """This class emulates an actual generation.
- It will output the next_generations when the method Next is called. The
- next_generations is initiated when the MockGeneration instance is constructed.
- """
-
- def __init__(self, tasks, next_generations):
- """Set up the next generations for this task.
-
- Args:
- tasks: A set of tasks to be run.
- next_generations: A list of generations as the next generation of the
- current generation.
+ It will output the next_generations when the method Next is called. The
+ next_generations is initiated when the MockGeneration instance is constructed.
"""
- Generation.__init__(self, tasks, None)
- self._next_generations = next_generations
- def Next(self, _):
- return self._next_generations
+ def __init__(self, tasks, next_generations):
+ """Set up the next generations for this task.
- def IsImproved(self):
- if self._next_generations:
- return True
- return False
+ Args:
+ tasks: A set of tasks to be run.
+ next_generations: A list of generations as the next generation of the
+ current generation.
+ """
+ Generation.__init__(self, tasks, None)
+ self._next_generations = next_generations
+
+ def Next(self, _):
+ return self._next_generations
+
+ def IsImproved(self):
+ if self._next_generations:
+ return True
+ return False
class SteeringTest(unittest.TestCase):
- """This class test the steering method.
+ """This class test the steering method.
- The steering algorithm should return if there is no new task in the initial
- generation. The steering algorithm should send all the tasks to the next stage
- and should terminate once there is no pending generation. A generation is
- pending if it contains pending task. A task is pending if its (test) result
- is not ready.
- """
-
- def testSteering(self):
- """Test that the steering algorithm processes all the tasks properly.
-
- Test that the steering algorithm sends all the tasks to the next stage. Test
- that the steering algorithm terminates once all the tasks have been
- processed, i.e., the results for the tasks are all ready.
+ The steering algorithm should return if there is no new task in the initial
+ generation. The steering algorithm should send all the tasks to the next stage
+ and should terminate once there is no pending generation. A generation is
+ pending if it contains pending task. A task is pending if its (test) result
+ is not ready.
"""
- # A list of generations used to test the steering stage.
- generations = []
+ def testSteering(self):
+ """Test that the steering algorithm processes all the tasks properly.
- task_index = 0
- previous_generations = None
+ Test that the steering algorithm sends all the tasks to the next stage. Test
+ that the steering algorithm terminates once all the tasks have been
+ processed, i.e., the results for the tasks are all ready.
+ """
- # Generate a sequence of generations to be tested. Each generation will
- # output the next generation in reverse order of the list when the "Next"
- # method is called.
- for _ in range(NUMBER_OF_GENERATIONS):
- # Use a consecutive sequence of numbers as identifiers for the set of
- # tasks put into a generation.
- test_ranges = range(task_index, task_index + NUMBER_OF_TASKS)
- tasks = [IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges]
- steering_tasks = set(tasks)
+ # A list of generations used to test the steering stage.
+ generations = []
- # Let the previous generation as the offspring generation of the current
- # generation.
- current_generation = MockGeneration(steering_tasks, previous_generations)
- generations.insert(0, current_generation)
- previous_generations = [current_generation]
+ task_index = 0
+ previous_generations = None
- task_index += NUMBER_OF_TASKS
+ # Generate a sequence of generations to be tested. Each generation will
+ # output the next generation in reverse order of the list when the "Next"
+ # method is called.
+ for _ in range(NUMBER_OF_GENERATIONS):
+ # Use a consecutive sequence of numbers as identifiers for the set of
+ # tasks put into a generation.
+ test_ranges = range(task_index, task_index + NUMBER_OF_TASKS)
+ tasks = [
+ IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges
+ ]
+ steering_tasks = set(tasks)
- # If there is no generation at all, the unittest returns right away.
- if not current_generation:
- return
+ # Let the previous generation as the offspring generation of the current
+ # generation.
+ current_generation = MockGeneration(
+ steering_tasks, previous_generations
+ )
+ generations.insert(0, current_generation)
+ previous_generations = [current_generation]
- # Set up the input and result queue for the steering method.
- manager = multiprocessing.Manager()
- input_queue = manager.Queue()
- result_queue = manager.Queue()
+ task_index += NUMBER_OF_TASKS
- steering_process = multiprocessing.Process(
- target=steering.Steering,
- args=(set(), [current_generation], input_queue, result_queue))
- steering_process.start()
+ # If there is no generation at all, the unittest returns right away.
+ if not current_generation:
+ return
- # Test that each generation is processed properly. I.e., the generations are
- # processed in order.
- while generations:
- generation = generations.pop(0)
- tasks = [task for task in generation.Pool()]
+ # Set up the input and result queue for the steering method.
+ manager = multiprocessing.Manager()
+ input_queue = manager.Queue()
+ result_queue = manager.Queue()
- # Test that all the tasks are processed once and only once.
- while tasks:
+ steering_process = multiprocessing.Process(
+ target=steering.Steering,
+ args=(set(), [current_generation], input_queue, result_queue),
+ )
+ steering_process.start()
+
+ # Test that each generation is processed properly. I.e., the generations are
+ # processed in order.
+ while generations:
+ generation = generations.pop(0)
+ tasks = [task for task in generation.Pool()]
+
+ # Test that all the tasks are processed once and only once.
+ while tasks:
+ task = result_queue.get()
+
+ assert task in tasks
+ tasks.remove(task)
+
+ input_queue.put(task)
+
task = result_queue.get()
- assert task in tasks
- tasks.remove(task)
+ # Test that the steering algorithm returns properly after processing all
+ # the generations.
+ assert task == pipeline_process.POISONPILL
- input_queue.put(task)
+ steering_process.join()
- task = result_queue.get()
+ def testCache(self):
+ """The steering algorithm returns immediately if there is no new tasks.
- # Test that the steering algorithm returns properly after processing all
- # the generations.
- assert task == pipeline_process.POISONPILL
+ If all the new tasks have been cached before, the steering algorithm does
+ not have to execute these tasks again and thus can terminate right away.
+ """
- steering_process.join()
+ # Put a set of tasks in the cache and add this set to initial generation.
+ test_ranges = range(NUMBER_OF_TASKS)
+ tasks = [
+ IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges
+ ]
+ steering_tasks = set(tasks)
- def testCache(self):
- """The steering algorithm returns immediately if there is no new tasks.
+ current_generation = MockGeneration(steering_tasks, None)
- If all the new tasks have been cached before, the steering algorithm does
- not have to execute these tasks again and thus can terminate right away.
- """
+ # Set up the input and result queue for the steering method.
+ manager = multiprocessing.Manager()
+ input_queue = manager.Queue()
+ result_queue = manager.Queue()
- # Put a set of tasks in the cache and add this set to initial generation.
- test_ranges = range(NUMBER_OF_TASKS)
- tasks = [IdentifierMockTask(STEERING_TEST_STAGE, t) for t in test_ranges]
- steering_tasks = set(tasks)
+ steering_process = multiprocessing.Process(
+ target=steering.Steering,
+ args=(
+ steering_tasks,
+ [current_generation],
+ input_queue,
+ result_queue,
+ ),
+ )
- current_generation = MockGeneration(steering_tasks, None)
+ steering_process.start()
- # Set up the input and result queue for the steering method.
- manager = multiprocessing.Manager()
- input_queue = manager.Queue()
- result_queue = manager.Queue()
-
- steering_process = multiprocessing.Process(
- target=steering.Steering,
- args=(steering_tasks, [current_generation], input_queue, result_queue))
-
- steering_process.start()
-
- # Test that the steering method returns right away.
- assert result_queue.get() == pipeline_process.POISONPILL
- steering_process.join()
+ # Test that the steering method returns right away.
+ assert result_queue.get() == pipeline_process.POISONPILL
+ steering_process.join()
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/task.py b/bestflags/task.py
index f055fc7..a782206 100644
--- a/bestflags/task.py
+++ b/bestflags/task.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A reproducing entity.
@@ -12,18 +12,19 @@
execution output to the execution field.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import os
import subprocess
import sys
from uuid import uuid4
+
BUILD_STAGE = 1
TEST_STAGE = 2
# Message indicating that the build or test failed.
-ERROR_STRING = 'error'
+ERROR_STRING = "error"
# The maximum number of tries a build can have. Some compilations may fail due
# to unexpected environment circumstance. This variable defines how many tries
@@ -38,413 +39,456 @@
# Create the file/directory if it does not already exist.
def _CreateDirectory(file_name):
- directory = os.path.dirname(file_name)
- if not os.path.exists(directory):
- os.makedirs(directory)
+ directory = os.path.dirname(file_name)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
class Task(object):
- """A single reproducing entity.
+ """A single reproducing entity.
- A single test of performance with a particular set of flags. It records the
- flag set, the image, the check sum of the image and the cost.
- """
-
- # The command that will be used in the build stage to compile the tasks.
- BUILD_COMMAND = None
- # The command that will be used in the test stage to test the tasks.
- TEST_COMMAND = None
- # The directory to log the compilation and test results.
- LOG_DIRECTORY = None
-
- @staticmethod
- def InitLogCommand(build_command, test_command, log_directory):
- """Set up the build and test command for the task and the log directory.
-
- This framework is generic. It lets the client specify application specific
- compile and test methods by passing different build_command and
- test_command.
-
- Args:
- build_command: The command that will be used in the build stage to compile
- this task.
- test_command: The command that will be used in the test stage to test this
- task.
- log_directory: The directory to log the compilation and test results.
+ A single test of performance with a particular set of flags. It records the
+ flag set, the image, the check sum of the image and the cost.
"""
- Task.BUILD_COMMAND = build_command
- Task.TEST_COMMAND = test_command
- Task.LOG_DIRECTORY = log_directory
+ # The command that will be used in the build stage to compile the tasks.
+ BUILD_COMMAND = None
+ # The command that will be used in the test stage to test the tasks.
+ TEST_COMMAND = None
+ # The directory to log the compilation and test results.
+ LOG_DIRECTORY = None
- def __init__(self, flag_set):
- """Set up the optimization flag selection for this task.
+ @staticmethod
+ def InitLogCommand(build_command, test_command, log_directory):
+ """Set up the build and test command for the task and the log directory.
- Args:
- flag_set: The optimization flag set that is encapsulated by this task.
- """
+ This framework is generic. It lets the client specify application specific
+ compile and test methods by passing different build_command and
+ test_command.
- self._flag_set = flag_set
+ Args:
+ build_command: The command that will be used in the build stage to compile
+ this task.
+ test_command: The command that will be used in the test stage to test this
+ task.
+ log_directory: The directory to log the compilation and test results.
+ """
- # A unique identifier that distinguishes this task from other tasks.
- self._task_identifier = uuid4()
+ Task.BUILD_COMMAND = build_command
+ Task.TEST_COMMAND = test_command
+ Task.LOG_DIRECTORY = log_directory
- self._log_path = (Task.LOG_DIRECTORY, self._task_identifier)
+ def __init__(self, flag_set):
+ """Set up the optimization flag selection for this task.
- # Initiate the hash value. The hash value is used so as not to recompute it
- # every time the hash method is called.
- self._hash_value = None
+ Args:
+ flag_set: The optimization flag set that is encapsulated by this task.
+ """
- # Indicate that the task has not been compiled/tested.
- self._build_cost = None
- self._exe_cost = None
- self._checksum = None
- self._image = None
- self._file_length = None
- self._text_length = None
+ self._flag_set = flag_set
- def __eq__(self, other):
- """Test whether two tasks are equal.
+ # A unique identifier that distinguishes this task from other tasks.
+ self._task_identifier = uuid4()
- Two tasks are equal if their flag_set are equal.
+ self._log_path = (Task.LOG_DIRECTORY, self._task_identifier)
- Args:
- other: The other task with which this task is tested equality.
- Returns:
- True if the encapsulated flag sets are equal.
- """
- if isinstance(other, Task):
- return self.GetFlags() == other.GetFlags()
- return False
+ # Initiate the hash value. The hash value is used so as not to recompute it
+ # every time the hash method is called.
+ self._hash_value = None
- def __hash__(self):
- if self._hash_value is None:
- # Cache the hash value of the flags, so as not to recompute them.
- self._hash_value = hash(self._flag_set)
- return self._hash_value
+ # Indicate that the task has not been compiled/tested.
+ self._build_cost = None
+ self._exe_cost = None
+ self._checksum = None
+ self._image = None
+ self._file_length = None
+ self._text_length = None
- def GetIdentifier(self, stage):
- """Get the identifier of the task in the stage.
+ def __eq__(self, other):
+ """Test whether two tasks are equal.
- The flag set uniquely identifies a task in the build stage. The checksum of
- the image of the task uniquely identifies the task in the test stage.
+ Two tasks are equal if their flag_set are equal.
- Args:
- stage: The stage (build/test) in which this method is called.
- Returns:
- Return the flag set in build stage and return the checksum in test stage.
- """
+ Args:
+ other: The other task with which this task is tested equality.
+ Returns:
+ True if the encapsulated flag sets are equal.
+ """
+ if isinstance(other, Task):
+ return self.GetFlags() == other.GetFlags()
+ return False
- # Define the dictionary for different stage function lookup.
- get_identifier_functions = {BUILD_STAGE: self.FormattedFlags,
- TEST_STAGE: self.__GetCheckSum}
+ def __hash__(self):
+ if self._hash_value is None:
+ # Cache the hash value of the flags, so as not to recompute them.
+ self._hash_value = hash(self._flag_set)
+ return self._hash_value
- assert stage in get_identifier_functions
- return get_identifier_functions[stage]()
+ def GetIdentifier(self, stage):
+ """Get the identifier of the task in the stage.
- def GetResult(self, stage):
- """Get the performance results of the task in the stage.
+ The flag set uniquely identifies a task in the build stage. The checksum of
+ the image of the task uniquely identifies the task in the test stage.
- Args:
- stage: The stage (build/test) in which this method is called.
- Returns:
- Performance results.
- """
+ Args:
+ stage: The stage (build/test) in which this method is called.
+ Returns:
+ Return the flag set in build stage and return the checksum in test stage.
+ """
- # Define the dictionary for different stage function lookup.
- get_result_functions = {BUILD_STAGE: self.__GetBuildResult,
- TEST_STAGE: self.GetTestResult}
+ # Define the dictionary for different stage function lookup.
+ get_identifier_functions = {
+ BUILD_STAGE: self.FormattedFlags,
+ TEST_STAGE: self.__GetCheckSum,
+ }
- assert stage in get_result_functions
+ assert stage in get_identifier_functions
+ return get_identifier_functions[stage]()
- return get_result_functions[stage]()
+ def GetResult(self, stage):
+ """Get the performance results of the task in the stage.
- def SetResult(self, stage, result):
- """Set the performance results of the task in the stage.
+ Args:
+ stage: The stage (build/test) in which this method is called.
+ Returns:
+ Performance results.
+ """
- This method is called by the pipeling_worker to set the results for
- duplicated tasks.
+ # Define the dictionary for different stage function lookup.
+ get_result_functions = {
+ BUILD_STAGE: self.__GetBuildResult,
+ TEST_STAGE: self.GetTestResult,
+ }
- Args:
- stage: The stage (build/test) in which this method is called.
- result: The performance results of the stage.
- """
+ assert stage in get_result_functions
- # Define the dictionary for different stage function lookup.
- set_result_functions = {BUILD_STAGE: self.__SetBuildResult,
- TEST_STAGE: self.__SetTestResult}
+ return get_result_functions[stage]()
- assert stage in set_result_functions
+ def SetResult(self, stage, result):
+ """Set the performance results of the task in the stage.
- set_result_functions[stage](result)
+ This method is called by the pipeling_worker to set the results for
+ duplicated tasks.
- def Done(self, stage):
- """Check whether the stage is done.
+ Args:
+ stage: The stage (build/test) in which this method is called.
+ result: The performance results of the stage.
+ """
- Args:
- stage: The stage to be checked, build or test.
- Returns:
- True if the stage is done.
- """
+ # Define the dictionary for different stage function lookup.
+ set_result_functions = {
+ BUILD_STAGE: self.__SetBuildResult,
+ TEST_STAGE: self.__SetTestResult,
+ }
- # Define the dictionary for different result string lookup.
- done_string = {BUILD_STAGE: self._build_cost, TEST_STAGE: self._exe_cost}
+ assert stage in set_result_functions
- assert stage in done_string
+ set_result_functions[stage](result)
- return done_string[stage] is not None
+ def Done(self, stage):
+ """Check whether the stage is done.
- def Work(self, stage):
- """Perform the task.
+ Args:
+ stage: The stage to be checked, build or test.
+ Returns:
+ True if the stage is done.
+ """
- Args:
- stage: The stage in which the task is performed, compile or test.
- """
+ # Define the dictionary for different result string lookup.
+ done_string = {
+ BUILD_STAGE: self._build_cost,
+ TEST_STAGE: self._exe_cost,
+ }
- # Define the dictionary for different stage function lookup.
- work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test}
+ assert stage in done_string
- assert stage in work_functions
+ return done_string[stage] is not None
- work_functions[stage]()
+ def Work(self, stage):
+ """Perform the task.
- def FormattedFlags(self):
- """Format the optimization flag set of this task.
+ Args:
+ stage: The stage in which the task is performed, compile or test.
+ """
- Returns:
- The formatted optimization flag set that is encapsulated by this task.
- """
- return str(self._flag_set.FormattedForUse())
+ # Define the dictionary for different stage function lookup.
+ work_functions = {BUILD_STAGE: self.__Compile, TEST_STAGE: self.__Test}
- def GetFlags(self):
- """Get the optimization flag set of this task.
+ assert stage in work_functions
- Returns:
- The optimization flag set that is encapsulated by this task.
- """
+ work_functions[stage]()
- return self._flag_set
+ def FormattedFlags(self):
+ """Format the optimization flag set of this task.
- def __GetCheckSum(self):
- """Get the compilation image checksum of this task.
+ Returns:
+ The formatted optimization flag set that is encapsulated by this task.
+ """
+ return str(self._flag_set.FormattedForUse())
- Returns:
- The compilation image checksum of this task.
- """
+ def GetFlags(self):
+ """Get the optimization flag set of this task.
- # The checksum should be computed before this method is called.
- assert self._checksum is not None
- return self._checksum
+ Returns:
+ The optimization flag set that is encapsulated by this task.
+ """
- def __Compile(self):
- """Run a compile.
+ return self._flag_set
- This method compile an image using the present flags, get the image,
- test the existent of the image and gathers monitoring information, and sets
- the internal cost (fitness) for this set of flags.
- """
+ def __GetCheckSum(self):
+ """Get the compilation image checksum of this task.
- # Format the flags as a string as input to compile command. The unique
- # identifier is passed to the compile command. If concurrent processes are
- # used to compile different tasks, these processes can use the identifier to
- # write to different file.
- flags = self._flag_set.FormattedForUse()
- command = '%s %s %s' % (Task.BUILD_COMMAND, ' '.join(flags),
- self._task_identifier)
+ Returns:
+ The compilation image checksum of this task.
+ """
- # Try BUILD_TRIES number of times before confirming that the build fails.
- for _ in range(BUILD_TRIES):
- try:
- # Execute the command and get the execution status/results.
- p = subprocess.Popen(command.split(),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- (out, err) = p.communicate()
+ # The checksum should be computed before this method is called.
+ assert self._checksum is not None
+ return self._checksum
+
+ def __Compile(self):
+ """Run a compile.
+
+ This method compile an image using the present flags, get the image,
+ test the existent of the image and gathers monitoring information, and sets
+ the internal cost (fitness) for this set of flags.
+ """
+
+ # Format the flags as a string as input to compile command. The unique
+ # identifier is passed to the compile command. If concurrent processes are
+ # used to compile different tasks, these processes can use the identifier to
+ # write to different file.
+ flags = self._flag_set.FormattedForUse()
+ command = "%s %s %s" % (
+ Task.BUILD_COMMAND,
+ " ".join(flags),
+ self._task_identifier,
+ )
+
+ # Try BUILD_TRIES number of times before confirming that the build fails.
+ for _ in range(BUILD_TRIES):
+ try:
+ # Execute the command and get the execution status/results.
+ p = subprocess.Popen(
+ command.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ (out, err) = p.communicate()
- if out:
- out = out.strip()
- if out != ERROR_STRING:
- # Each build results contains the checksum of the result image, the
- # performance cost of the build, the compilation image, the length
- # of the build, and the length of the text section of the build.
- (checksum, cost, image, file_length, text_length) = out.split()
- # Build successfully.
- break
+ if out:
+ out = out.strip()
+ if out != ERROR_STRING:
+ # Each build results contains the checksum of the result image, the
+ # performance cost of the build, the compilation image, the length
+ # of the build, and the length of the text section of the build.
+ (
+ checksum,
+ cost,
+ image,
+ file_length,
+ text_length,
+ ) = out.split()
+ # Build successfully.
+ break
- # Build failed.
- cost = ERROR_STRING
- except _:
- # If there is exception getting the cost information of the build, the
- # build failed.
- cost = ERROR_STRING
+ # Build failed.
+ cost = ERROR_STRING
+ except _:
+ # If there is exception getting the cost information of the build, the
+ # build failed.
+ cost = ERROR_STRING
- # Convert the build cost from String to integer. The build cost is used to
- # compare a task with another task. Set the build cost of the failing task
- # to the max integer. The for loop will keep trying until either there is a
- # success or BUILD_TRIES number of tries have been conducted.
- self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost)
+ # Convert the build cost from String to integer. The build cost is used to
+ # compare a task with another task. Set the build cost of the failing task
+ # to the max integer. The for loop will keep trying until either there is a
+ # success or BUILD_TRIES number of tries have been conducted.
+ self._build_cost = sys.maxint if cost == ERROR_STRING else float(cost)
- self._checksum = checksum
- self._file_length = file_length
- self._text_length = text_length
- self._image = image
+ self._checksum = checksum
+ self._file_length = file_length
+ self._text_length = text_length
+ self._image = image
- self.__LogBuildCost(err)
+ self.__LogBuildCost(err)
- def __Test(self):
- """__Test the task against benchmark(s) using the input test command."""
+ def __Test(self):
+ """__Test the task against benchmark(s) using the input test command."""
- # Ensure that the task is compiled before being tested.
- assert self._image is not None
+ # Ensure that the task is compiled before being tested.
+ assert self._image is not None
- # If the task does not compile, no need to test.
- if self._image == ERROR_STRING:
- self._exe_cost = ERROR_STRING
- return
+ # If the task does not compile, no need to test.
+ if self._image == ERROR_STRING:
+ self._exe_cost = ERROR_STRING
+ return
- # The unique identifier is passed to the test command. If concurrent
- # processes are used to compile different tasks, these processes can use the
- # identifier to write to different file.
- command = '%s %s %s' % (Task.TEST_COMMAND, self._image,
- self._task_identifier)
+ # The unique identifier is passed to the test command. If concurrent
+ # processes are used to compile different tasks, these processes can use the
+ # identifier to write to different file.
+ command = "%s %s %s" % (
+ Task.TEST_COMMAND,
+ self._image,
+ self._task_identifier,
+ )
- # Try TEST_TRIES number of times before confirming that the build fails.
- for _ in range(TEST_TRIES):
- try:
- p = subprocess.Popen(command.split(),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- (out, err) = p.communicate()
+ # Try TEST_TRIES number of times before confirming that the build fails.
+ for _ in range(TEST_TRIES):
+ try:
+ p = subprocess.Popen(
+ command.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ (out, err) = p.communicate()
- if out:
- out = out.strip()
- if out != ERROR_STRING:
- # The test results contains the performance cost of the test.
- cost = out
- # Test successfully.
- break
+ if out:
+ out = out.strip()
+ if out != ERROR_STRING:
+ # The test results contains the performance cost of the test.
+ cost = out
+ # Test successfully.
+ break
- # Test failed.
- cost = ERROR_STRING
- except _:
- # If there is exception getting the cost information of the test, the
- # test failed. The for loop will keep trying until either there is a
- # success or TEST_TRIES number of tries have been conducted.
- cost = ERROR_STRING
+ # Test failed.
+ cost = ERROR_STRING
+ except _:
+ # If there is exception getting the cost information of the test, the
+ # test failed. The for loop will keep trying until either there is a
+ # success or TEST_TRIES number of tries have been conducted.
+ cost = ERROR_STRING
- self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost)
+ self._exe_cost = sys.maxint if (cost == ERROR_STRING) else float(cost)
- self.__LogTestCost(err)
+ self.__LogTestCost(err)
- def __SetBuildResult(self, (checksum, build_cost, image, file_length,
- text_length)):
- self._checksum = checksum
- self._build_cost = build_cost
- self._image = image
- self._file_length = file_length
- self._text_length = text_length
+ def __SetBuildResult(
+ self, (checksum, build_cost, image, file_length, text_length)
+ ):
+ self._checksum = checksum
+ self._build_cost = build_cost
+ self._image = image
+ self._file_length = file_length
+ self._text_length = text_length
- def __GetBuildResult(self):
- return (self._checksum, self._build_cost, self._image, self._file_length,
- self._text_length)
+ def __GetBuildResult(self):
+ return (
+ self._checksum,
+ self._build_cost,
+ self._image,
+ self._file_length,
+ self._text_length,
+ )
- def GetTestResult(self):
- return self._exe_cost
+ def GetTestResult(self):
+ return self._exe_cost
- def __SetTestResult(self, exe_cost):
- self._exe_cost = exe_cost
+ def __SetTestResult(self, exe_cost):
+ self._exe_cost = exe_cost
- def LogSteeringCost(self):
- """Log the performance results for the task.
+ def LogSteeringCost(self):
+ """Log the performance results for the task.
- This method is called by the steering stage and this method writes the
- results out to a file. The results include the build and the test results.
- """
+ This method is called by the steering stage and this method writes the
+ results out to a file. The results include the build and the test results.
+ """
- steering_log = '%s/%s/steering.txt' % self._log_path
+ steering_log = "%s/%s/steering.txt" % self._log_path
- _CreateDirectory(steering_log)
+ _CreateDirectory(steering_log)
- with open(steering_log, 'w') as out_file:
- # Include the build and the test results.
- steering_result = (self._flag_set, self._checksum, self._build_cost,
- self._image, self._file_length, self._text_length,
- self._exe_cost)
+ with open(steering_log, "w") as out_file:
+ # Include the build and the test results.
+ steering_result = (
+ self._flag_set,
+ self._checksum,
+ self._build_cost,
+ self._image,
+ self._file_length,
+ self._text_length,
+ self._exe_cost,
+ )
- # Write out the result in the comma-separated format (CSV).
- out_file.write('%s,%s,%s,%s,%s,%s,%s\n' % steering_result)
+ # Write out the result in the comma-separated format (CSV).
+ out_file.write("%s,%s,%s,%s,%s,%s,%s\n" % steering_result)
- def __LogBuildCost(self, log):
- """Log the build results for the task.
+ def __LogBuildCost(self, log):
+ """Log the build results for the task.
- The build results include the compilation time of the build, the result
- image, the checksum, the file length and the text length of the image.
- The file length of the image includes the length of the file of the image.
- The text length only includes the length of the text section of the image.
+ The build results include the compilation time of the build, the result
+ image, the checksum, the file length and the text length of the image.
+ The file length of the image includes the length of the file of the image.
+ The text length only includes the length of the text section of the image.
- Args:
- log: The build log of this task.
- """
+ Args:
+ log: The build log of this task.
+ """
- build_result_log = '%s/%s/build.txt' % self._log_path
+ build_result_log = "%s/%s/build.txt" % self._log_path
- _CreateDirectory(build_result_log)
+ _CreateDirectory(build_result_log)
- with open(build_result_log, 'w') as out_file:
- build_result = (self._flag_set, self._build_cost, self._image,
- self._checksum, self._file_length, self._text_length)
+ with open(build_result_log, "w") as out_file:
+ build_result = (
+ self._flag_set,
+ self._build_cost,
+ self._image,
+ self._checksum,
+ self._file_length,
+ self._text_length,
+ )
- # Write out the result in the comma-separated format (CSV).
- out_file.write('%s,%s,%s,%s,%s,%s\n' % build_result)
+ # Write out the result in the comma-separated format (CSV).
+ out_file.write("%s,%s,%s,%s,%s,%s\n" % build_result)
- # The build information about running the build.
- build_run_log = '%s/%s/build_log.txt' % self._log_path
- _CreateDirectory(build_run_log)
+ # The build information about running the build.
+ build_run_log = "%s/%s/build_log.txt" % self._log_path
+ _CreateDirectory(build_run_log)
- with open(build_run_log, 'w') as out_log_file:
- # Write out the execution information.
- out_log_file.write('%s' % log)
+ with open(build_run_log, "w") as out_log_file:
+ # Write out the execution information.
+ out_log_file.write("%s" % log)
- def __LogTestCost(self, log):
- """Log the test results for the task.
+ def __LogTestCost(self, log):
+ """Log the test results for the task.
- The test results include the runtime execution time of the test.
+ The test results include the runtime execution time of the test.
- Args:
- log: The test log of this task.
- """
+ Args:
+ log: The test log of this task.
+ """
- test_log = '%s/%s/test.txt' % self._log_path
+ test_log = "%s/%s/test.txt" % self._log_path
- _CreateDirectory(test_log)
+ _CreateDirectory(test_log)
- with open(test_log, 'w') as out_file:
- test_result = (self._flag_set, self._checksum, self._exe_cost)
+ with open(test_log, "w") as out_file:
+ test_result = (self._flag_set, self._checksum, self._exe_cost)
- # Write out the result in the comma-separated format (CSV).
- out_file.write('%s,%s,%s\n' % test_result)
+ # Write out the result in the comma-separated format (CSV).
+ out_file.write("%s,%s,%s\n" % test_result)
- # The execution information about running the test.
- test_run_log = '%s/%s/test_log.txt' % self._log_path
+ # The execution information about running the test.
+ test_run_log = "%s/%s/test_log.txt" % self._log_path
- _CreateDirectory(test_run_log)
+ _CreateDirectory(test_run_log)
- with open(test_run_log, 'w') as out_log_file:
- # Append the test log information.
- out_log_file.write('%s' % log)
+ with open(test_run_log, "w") as out_log_file:
+ # Append the test log information.
+ out_log_file.write("%s" % log)
- def IsImproved(self, other):
- """Compare the current task with another task.
+ def IsImproved(self, other):
+ """Compare the current task with another task.
- Args:
- other: The other task against which the current task is compared.
+ Args:
+ other: The other task against which the current task is compared.
- Returns:
- True if this task has improvement upon the other task.
- """
+ Returns:
+ True if this task has improvement upon the other task.
+ """
- # The execution costs must have been initiated.
- assert self._exe_cost is not None
- assert other.GetTestResult() is not None
+ # The execution costs must have been initiated.
+ assert self._exe_cost is not None
+ assert other.GetTestResult() is not None
- return self._exe_cost < other.GetTestResult()
+ return self._exe_cost < other.GetTestResult()
diff --git a/bestflags/task_test.py b/bestflags/task_test.py
index 68a7bf7..f151bc7 100644
--- a/bestflags/task_test.py
+++ b/bestflags/task_test.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Task unittest.
@@ -6,7 +6,7 @@
Part of the Chrome build flags optimization.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import random
import sys
@@ -15,6 +15,7 @@
import task
from task import Task
+
# The number of flags be tested.
NUM_FLAGS = 20
@@ -26,149 +27,159 @@
class MockFlagSet(object):
- """This class emulates a set of flags.
+ """This class emulates a set of flags.
- It returns the flags and hash value, when the FormattedForUse method and the
- __hash__ method is called, respectively. These values are initialized when the
- MockFlagSet instance is constructed.
- """
+ It returns the flags and hash value, when the FormattedForUse method and the
+ __hash__ method is called, respectively. These values are initialized when the
+ MockFlagSet instance is constructed.
+ """
- def __init__(self, flags=0, hash_value=-1):
- self._flags = flags
- self._hash_value = hash_value
+ def __init__(self, flags=0, hash_value=-1):
+ self._flags = flags
+ self._hash_value = hash_value
- def __eq__(self, other):
- assert isinstance(other, MockFlagSet)
- return self._flags == other.FormattedForUse()
+ def __eq__(self, other):
+ assert isinstance(other, MockFlagSet)
+ return self._flags == other.FormattedForUse()
- def FormattedForUse(self):
- return self._flags
+ def FormattedForUse(self):
+ return self._flags
- def __hash__(self):
- return self._hash_value
+ def __hash__(self):
+ return self._hash_value
- def GetHash(self):
- return self._hash_value
+ def GetHash(self):
+ return self._hash_value
class TaskTest(unittest.TestCase):
- """This class test the Task class."""
+ """This class test the Task class."""
- def testEqual(self):
- """Test the equal method of the task.
+ def testEqual(self):
+ """Test the equal method of the task.
- Two tasks are equal if and only if their encapsulated flag_sets are equal.
- """
+ Two tasks are equal if and only if their encapsulated flag_sets are equal.
+ """
- flags = range(NUM_FLAGS)
+ flags = range(NUM_FLAGS)
- # Two tasks having the same flag set should be equivalent.
- flag_sets = [MockFlagSet(flag) for flag in flags]
- for flag_set in flag_sets:
- assert Task(flag_set) == Task(flag_set)
+ # Two tasks having the same flag set should be equivalent.
+ flag_sets = [MockFlagSet(flag) for flag in flags]
+ for flag_set in flag_sets:
+ assert Task(flag_set) == Task(flag_set)
- # Two tasks having different flag set should be different.
- for flag_set in flag_sets:
- test_task = Task(flag_set)
- other_flag_sets = [flags for flags in flag_sets if flags != flag_set]
- for flag_set1 in other_flag_sets:
- assert test_task != Task(flag_set1)
+ # Two tasks having different flag set should be different.
+ for flag_set in flag_sets:
+ test_task = Task(flag_set)
+ other_flag_sets = [
+ flags for flags in flag_sets if flags != flag_set
+ ]
+ for flag_set1 in other_flag_sets:
+ assert test_task != Task(flag_set1)
- def testHash(self):
- """Test the hash method of the task.
+ def testHash(self):
+ """Test the hash method of the task.
- Two tasks are equal if and only if their encapsulated flag_sets are equal.
- """
+ Two tasks are equal if and only if their encapsulated flag_sets are equal.
+ """
- # Random identifier that is not relevant in this test.
- identifier = random.randint(-sys.maxint - 1, -1)
+ # Random identifier that is not relevant in this test.
+ identifier = random.randint(-sys.maxint - 1, -1)
- flag_sets = [MockFlagSet(identifier, value) for value in range(NUM_FLAGS)]
- for flag_set in flag_sets:
- # The hash of a task is the same as the hash of its flag set.
- hash_task = Task(flag_set)
- hash_value = hash(hash_task)
- assert hash_value == flag_set.GetHash()
+ flag_sets = [
+ MockFlagSet(identifier, value) for value in range(NUM_FLAGS)
+ ]
+ for flag_set in flag_sets:
+ # The hash of a task is the same as the hash of its flag set.
+ hash_task = Task(flag_set)
+ hash_value = hash(hash_task)
+ assert hash_value == flag_set.GetHash()
- # The hash of a task does not change.
- assert hash_value == hash(hash_task)
+ # The hash of a task does not change.
+ assert hash_value == hash(hash_task)
- def testGetIdentifier(self):
- """Test the get identifier method of the task.
+ def testGetIdentifier(self):
+ """Test the get identifier method of the task.
- The get identifier method should returns the flag set in the build stage.
- """
+ The get identifier method should returns the flag set in the build stage.
+ """
- flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
- for flag_set in flag_sets:
- identifier_task = Task(flag_set)
+ flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
+ for flag_set in flag_sets:
+ identifier_task = Task(flag_set)
- identifier = identifier_task.GetIdentifier(task.BUILD_STAGE)
+ identifier = identifier_task.GetIdentifier(task.BUILD_STAGE)
- # The task formats the flag set into a string.
- assert identifier == str(flag_set.FormattedForUse())
+ # The task formats the flag set into a string.
+ assert identifier == str(flag_set.FormattedForUse())
- def testGetSetResult(self):
- """Test the get and set result methods of the task.
+ def testGetSetResult(self):
+ """Test the get and set result methods of the task.
- The get result method should return the same results as were set.
- """
+ The get result method should return the same results as were set.
+ """
- flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
- for flag_set in flag_sets:
- result_task = Task(flag_set)
+ flag_sets = [MockFlagSet(flag) for flag in range(NUM_FLAGS)]
+ for flag_set in flag_sets:
+ result_task = Task(flag_set)
- # The get result method should return the same results as were set, in
- # build stage. Currently, the build result is a 5-element tuple containing
- # the checksum of the result image, the performance cost of the build, the
- # compilation image, the length of the build, and the length of the text
- # section of the build.
- result = tuple([random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)])
- result_task.SetResult(task.BUILD_STAGE, result)
- assert result == result_task.GetResult(task.BUILD_STAGE)
+ # The get result method should return the same results as were set, in
+ # build stage. Currently, the build result is a 5-element tuple containing
+ # the checksum of the result image, the performance cost of the build, the
+ # compilation image, the length of the build, and the length of the text
+ # section of the build.
+ result = tuple(
+ [random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)]
+ )
+ result_task.SetResult(task.BUILD_STAGE, result)
+ assert result == result_task.GetResult(task.BUILD_STAGE)
- # The checksum is the identifier of the test stage.
- identifier = result_task.GetIdentifier(task.TEST_STAGE)
- # The first element of the result tuple is the checksum.
- assert identifier == result[0]
+ # The checksum is the identifier of the test stage.
+ identifier = result_task.GetIdentifier(task.TEST_STAGE)
+ # The first element of the result tuple is the checksum.
+ assert identifier == result[0]
- # The get result method should return the same results as were set, in
- # test stage.
- random_test_result = random.randint(0, RANDOM_TESTRESULT)
- result_task.SetResult(task.TEST_STAGE, random_test_result)
- test_result = result_task.GetResult(task.TEST_STAGE)
- assert test_result == random_test_result
+ # The get result method should return the same results as were set, in
+ # test stage.
+ random_test_result = random.randint(0, RANDOM_TESTRESULT)
+ result_task.SetResult(task.TEST_STAGE, random_test_result)
+ test_result = result_task.GetResult(task.TEST_STAGE)
+ assert test_result == random_test_result
- def testDone(self):
- """Test the done methods of the task.
+ def testDone(self):
+ """Test the done methods of the task.
- The done method should return false is the task has not perform and return
- true after the task is finished.
- """
+ The done method should return false is the task has not perform and return
+ true after the task is finished.
+ """
- flags = range(NUM_FLAGS)
+ flags = range(NUM_FLAGS)
- flag_sets = [MockFlagSet(flag) for flag in flags]
- for flag_set in flag_sets:
- work_task = Task(flag_set)
+ flag_sets = [MockFlagSet(flag) for flag in flags]
+ for flag_set in flag_sets:
+ work_task = Task(flag_set)
- # The task has not been compiled nor tested.
- assert not work_task.Done(task.TEST_STAGE)
- assert not work_task.Done(task.BUILD_STAGE)
+ # The task has not been compiled nor tested.
+ assert not work_task.Done(task.TEST_STAGE)
+ assert not work_task.Done(task.BUILD_STAGE)
- # After the task has been compiled, it should indicate finished in BUILD
- # stage.
- result = tuple([random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)])
- work_task.SetResult(task.BUILD_STAGE, result)
- assert not work_task.Done(task.TEST_STAGE)
- assert work_task.Done(task.BUILD_STAGE)
+ # After the task has been compiled, it should indicate finished in BUILD
+ # stage.
+ result = tuple(
+ [random.randint(0, RANDOM_BUILD_RESULT) for _ in range(5)]
+ )
+ work_task.SetResult(task.BUILD_STAGE, result)
+ assert not work_task.Done(task.TEST_STAGE)
+ assert work_task.Done(task.BUILD_STAGE)
- # After the task has been tested, it should indicate finished in TEST
- # stage.
- work_task.SetResult(task.TEST_STAGE, random.randint(0, RANDOM_TESTRESULT))
- assert work_task.Done(task.TEST_STAGE)
- assert work_task.Done(task.BUILD_STAGE)
+ # After the task has been tested, it should indicate finished in TEST
+ # stage.
+ work_task.SetResult(
+ task.TEST_STAGE, random.randint(0, RANDOM_TESTRESULT)
+ )
+ assert work_task.Done(task.TEST_STAGE)
+ assert work_task.Done(task.BUILD_STAGE)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py
index ffe1944..783d95b 100644
--- a/bestflags/testing_batch.py
+++ b/bestflags/testing_batch.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Hill climbing unitest.
@@ -9,7 +9,7 @@
iterative elimination algorithm.
"""
-__author__ = 'yuhenglong@google.com (Yuheng Long)'
+__author__ = "yuhenglong@google.com (Yuheng Long)"
import multiprocessing
import random
@@ -29,6 +29,7 @@
from task import Task
from task import TEST_STAGE
+
# The number of flags be tested.
NUM_FLAGS = 5
@@ -43,408 +44,413 @@
def _GenerateRandomRasks(specs):
- """Generate a task that has random values.
+ """Generate a task that has random values.
- Args:
- specs: A list of spec from which the flag set is created.
+ Args:
+ specs: A list of spec from which the flag set is created.
- Returns:
- A set containing a task that has random values.
- """
+ Returns:
+ A set containing a task that has random values.
+ """
- flag_set = []
+ flag_set = []
- for spec in specs:
- numeric_flag_match = flags.Search(spec)
- if numeric_flag_match:
- # Numeric flags.
- start = int(numeric_flag_match.group('start'))
- end = int(numeric_flag_match.group('end'))
+ for spec in specs:
+ numeric_flag_match = flags.Search(spec)
+ if numeric_flag_match:
+ # Numeric flags.
+ start = int(numeric_flag_match.group("start"))
+ end = int(numeric_flag_match.group("end"))
- value = random.randint(start - 1, end - 1)
- if value != start - 1:
- # If the value falls in the range, this flag is enabled.
- flag_set.append(Flag(spec, value))
- else:
- # Boolean flags.
- if random.randint(0, 1):
- flag_set.append(Flag(spec))
+ value = random.randint(start - 1, end - 1)
+ if value != start - 1:
+ # If the value falls in the range, this flag is enabled.
+ flag_set.append(Flag(spec, value))
+ else:
+ # Boolean flags.
+ if random.randint(0, 1):
+ flag_set.append(Flag(spec))
- return set([Task(FlagSet(flag_set))])
+ return set([Task(FlagSet(flag_set))])
def _GenerateAllFlagsTasks(specs):
- """Generate a task that all the flags are enable.
+ """Generate a task that all the flags are enable.
- All the boolean flags in the specs will be enabled and all the numeric flag
- with have the largest legal value.
+ All the boolean flags in the specs will be enabled and all the numeric flag
+ with have the largest legal value.
- Args:
- specs: A list of spec from which the flag set is created.
+ Args:
+ specs: A list of spec from which the flag set is created.
- Returns:
- A set containing a task that has all flags enabled.
- """
+ Returns:
+ A set containing a task that has all flags enabled.
+ """
- flag_set = []
+ flag_set = []
- for spec in specs:
- numeric_flag_match = flags.Search(spec)
+ for spec in specs:
+ numeric_flag_match = flags.Search(spec)
- if numeric_flag_match:
- value = (int(numeric_flag_match.group('end')) - 1)
- else:
- value = -1
- flag_set.append(Flag(spec, value))
+ if numeric_flag_match:
+ value = int(numeric_flag_match.group("end")) - 1
+ else:
+ value = -1
+ flag_set.append(Flag(spec, value))
- return set([Task(FlagSet(flag_set))])
+ return set([Task(FlagSet(flag_set))])
def _GenerateNoFlagTask():
- return set([Task(FlagSet([]))])
+ return set([Task(FlagSet([]))])
def GenerateRandomGATasks(specs, num_tasks, num_trials):
- """Generate a set of tasks for the Genetic Algorithm.
+ """Generate a set of tasks for the Genetic Algorithm.
- Args:
- specs: A list of spec from which the flag set is created.
- num_tasks: number of tasks that should be generated.
- num_trials: the maximum number of tries should be attempted to generate the
- set of tasks.
+ Args:
+ specs: A list of spec from which the flag set is created.
+ num_tasks: number of tasks that should be generated.
+ num_trials: the maximum number of tries should be attempted to generate the
+ set of tasks.
- Returns:
- A set of randomly generated tasks.
- """
+ Returns:
+ A set of randomly generated tasks.
+ """
- tasks = set([])
+ tasks = set([])
- total_trials = 0
- while len(tasks) < num_tasks and total_trials < num_trials:
- new_flag = FlagSet([Flag(spec) for spec in specs if random.randint(0, 1)])
- new_task = GATask(new_flag)
+ total_trials = 0
+ while len(tasks) < num_tasks and total_trials < num_trials:
+ new_flag = FlagSet(
+ [Flag(spec) for spec in specs if random.randint(0, 1)]
+ )
+ new_task = GATask(new_flag)
- if new_task in tasks:
- total_trials += 1
- else:
- tasks.add(new_task)
- total_trials = 0
+ if new_task in tasks:
+ total_trials += 1
+ else:
+ tasks.add(new_task)
+ total_trials = 0
- return tasks
+ return tasks
def _GenerateInitialFlags(specs, spec):
- """Generate the flag_set of a task in the flag elimination algorithm.
+ """Generate the flag_set of a task in the flag elimination algorithm.
- Set the value of all the flags to the largest value, except for the flag that
- contains spec.
+ Set the value of all the flags to the largest value, except for the flag that
+ contains spec.
- For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing] and
- the spec is -finline-limit=[1-1000], then the result is
- [-finline-limit=[1-1000]:-finline-limit=998,
- -fstrict-aliasing:-fstrict-aliasing]
+ For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing] and
+ the spec is -finline-limit=[1-1000], then the result is
+ [-finline-limit=[1-1000]:-finline-limit=998,
+ -fstrict-aliasing:-fstrict-aliasing]
- Args:
- specs: an array of specifications from which the result flag_set is created.
- The flag_set contains one and only one flag that contain the specification
+ Args:
+ specs: an array of specifications from which the result flag_set is created.
+ The flag_set contains one and only one flag that contain the specification
+ spec.
+ spec: The flag containing this spec should have a value that is smaller than
+ the highest value the flag can have.
+
+ Returns:
+ An array of flags, each of which contains one spec in specs. All the values
+ of the flags are the largest values in specs, expect the one that contains
spec.
- spec: The flag containing this spec should have a value that is smaller than
- the highest value the flag can have.
+ """
- Returns:
- An array of flags, each of which contains one spec in specs. All the values
- of the flags are the largest values in specs, expect the one that contains
- spec.
- """
+ flag_set = []
+ for other_spec in specs:
+ numeric_flag_match = flags.Search(other_spec)
+ # Found the spec in the array specs.
+ if other_spec == spec:
+ # Numeric flag will have a value that is smaller than the largest value
+ # and Boolean flag will be deleted.
+ if numeric_flag_match:
+ end = int(numeric_flag_match.group("end"))
+ flag_set.append(flags.Flag(other_spec, end - 2))
- flag_set = []
- for other_spec in specs:
- numeric_flag_match = flags.Search(other_spec)
- # Found the spec in the array specs.
- if other_spec == spec:
- # Numeric flag will have a value that is smaller than the largest value
- # and Boolean flag will be deleted.
- if numeric_flag_match:
- end = int(numeric_flag_match.group('end'))
- flag_set.append(flags.Flag(other_spec, end - 2))
+ continue
- continue
+ # other_spec != spec
+ if numeric_flag_match:
+ # numeric flag
+ end = int(numeric_flag_match.group("end"))
+ flag_set.append(flags.Flag(other_spec, end - 1))
+ continue
- # other_spec != spec
- if numeric_flag_match:
- # numeric flag
- end = int(numeric_flag_match.group('end'))
- flag_set.append(flags.Flag(other_spec, end - 1))
- continue
+ # boolean flag
+ flag_set.append(flags.Flag(other_spec))
- # boolean flag
- flag_set.append(flags.Flag(other_spec))
-
- return flag_set
+ return flag_set
def _GenerateAllIterativeEliminationTasks(specs):
- """Generate the initial tasks for the negative flag elimination algorithm.
+ """Generate the initial tasks for the negative flag elimination algorithm.
- Generate the base line task that turns on all the boolean flags and sets the
- value to be the largest value for the numeric flag.
+ Generate the base line task that turns on all the boolean flags and sets the
+ value to be the largest value for the numeric flag.
- For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing],
- the base line is [-finline-limit=[1-1000]:-finline-limit=999,
- -fstrict-aliasing:-fstrict-aliasing]
+ For example, if the specs are [-finline-limit=[1-1000], -fstrict-aliasing],
+ the base line is [-finline-limit=[1-1000]:-finline-limit=999,
+ -fstrict-aliasing:-fstrict-aliasing]
- Generate a set of task, each turns off one of the flag or sets a value that is
- smaller than the largest value for the flag.
+ Generate a set of task, each turns off one of the flag or sets a value that is
+ smaller than the largest value for the flag.
- Args:
- specs: an array of specifications from which the result flag_set is created.
+ Args:
+ specs: an array of specifications from which the result flag_set is created.
- Returns:
- An array containing one generation of the initial tasks for the negative
- flag elimination algorithm.
- """
+ Returns:
+ An array containing one generation of the initial tasks for the negative
+ flag elimination algorithm.
+ """
- # The set of tasks to be generated.
- results = set([])
- flag_set = []
+ # The set of tasks to be generated.
+ results = set([])
+ flag_set = []
- for spec in specs:
- numeric_flag_match = flags.Search(spec)
- if numeric_flag_match:
- # Numeric flag.
- end_value = int(numeric_flag_match.group('end'))
- flag_set.append(flags.Flag(spec, end_value - 1))
- continue
+ for spec in specs:
+ numeric_flag_match = flags.Search(spec)
+ if numeric_flag_match:
+ # Numeric flag.
+ end_value = int(numeric_flag_match.group("end"))
+ flag_set.append(flags.Flag(spec, end_value - 1))
+ continue
- # Boolean flag.
- flag_set.append(flags.Flag(spec))
+ # Boolean flag.
+ flag_set.append(flags.Flag(spec))
- # The base line task that set all the flags to their largest values.
- parent_task = Task(flags.FlagSet(flag_set))
- results.add(parent_task)
+ # The base line task that set all the flags to their largest values.
+ parent_task = Task(flags.FlagSet(flag_set))
+ results.add(parent_task)
- for spec in specs:
- results.add(Task(flags.FlagSet(_GenerateInitialFlags(specs, spec))))
+ for spec in specs:
+ results.add(Task(flags.FlagSet(_GenerateInitialFlags(specs, spec))))
- return [IterativeEliminationFirstGeneration(results, parent_task)]
+ return [IterativeEliminationFirstGeneration(results, parent_task)]
def _ComputeCost(cost_func, specs, flag_set):
- """Compute the mock cost of the flag_set using the input cost function.
+ """Compute the mock cost of the flag_set using the input cost function.
- All the boolean flags in the specs will be enabled and all the numeric flag
- with have the largest legal value.
+ All the boolean flags in the specs will be enabled and all the numeric flag
+ with have the largest legal value.
- Args:
- cost_func: The cost function which is used to compute the mock cost of a
- dictionary of flags.
- specs: All the specs that are used in the algorithm. This is used to check
- whether certain flag is disabled in the flag_set dictionary.
- flag_set: a dictionary of the spec and flag pairs.
+ Args:
+ cost_func: The cost function which is used to compute the mock cost of a
+ dictionary of flags.
+ specs: All the specs that are used in the algorithm. This is used to check
+ whether certain flag is disabled in the flag_set dictionary.
+ flag_set: a dictionary of the spec and flag pairs.
- Returns:
- The mock cost of the input dictionary of the flags.
- """
+ Returns:
+ The mock cost of the input dictionary of the flags.
+ """
- values = []
+ values = []
- for spec in specs:
- # If a flag is enabled, its value is added. Otherwise a padding 0 is added.
- values.append(flag_set[spec].GetValue() if spec in flag_set else 0)
+ for spec in specs:
+ # If a flag is enabled, its value is added. Otherwise a padding 0 is added.
+ values.append(flag_set[spec].GetValue() if spec in flag_set else 0)
- # The cost function string can use the values array.
- return eval(cost_func)
+ # The cost function string can use the values array.
+ return eval(cost_func)
def _GenerateTestFlags(num_flags, upper_bound, file_name):
- """Generate a set of mock flags and write it to a configuration file.
+ """Generate a set of mock flags and write it to a configuration file.
- Generate a set of mock flags
+ Generate a set of mock flags
- Args:
- num_flags: Number of numeric flags to be generated.
- upper_bound: The value of the upper bound of the range.
- file_name: The configuration file name into which the mock flags are put.
- """
+ Args:
+ num_flags: Number of numeric flags to be generated.
+ upper_bound: The value of the upper bound of the range.
+ file_name: The configuration file name into which the mock flags are put.
+ """
- with open(file_name, 'w') as output_file:
- num_flags = int(num_flags)
- upper_bound = int(upper_bound)
- for i in range(num_flags):
- output_file.write('%s=[1-%d]\n' % (i, upper_bound))
+ with open(file_name, "w") as output_file:
+ num_flags = int(num_flags)
+ upper_bound = int(upper_bound)
+ for i in range(num_flags):
+ output_file.write("%s=[1-%d]\n" % (i, upper_bound))
def _TestAlgorithm(cost_func, specs, generations, best_result):
- """Test the best result the algorithm should return.
+ """Test the best result the algorithm should return.
- Set up the framework, run the input algorithm and verify the result.
+ Set up the framework, run the input algorithm and verify the result.
- Args:
- cost_func: The cost function which is used to compute the mock cost of a
- dictionary of flags.
- specs: All the specs that are used in the algorithm. This is used to check
- whether certain flag is disabled in the flag_set dictionary.
- generations: The initial generations to be evaluated.
- best_result: The expected best result of the algorithm. If best_result is
- -1, the algorithm may or may not return the best value. Therefore, no
- assertion will be inserted.
- """
+ Args:
+ cost_func: The cost function which is used to compute the mock cost of a
+ dictionary of flags.
+ specs: All the specs that are used in the algorithm. This is used to check
+ whether certain flag is disabled in the flag_set dictionary.
+ generations: The initial generations to be evaluated.
+ best_result: The expected best result of the algorithm. If best_result is
+ -1, the algorithm may or may not return the best value. Therefore, no
+ assertion will be inserted.
+ """
- # Set up the utilities to test the framework.
- manager = multiprocessing.Manager()
- input_queue = manager.Queue()
- output_queue = manager.Queue()
- pp_steer = multiprocessing.Process(
- target=Steering,
- args=(set(), generations, output_queue, input_queue))
- pp_steer.start()
+ # Set up the utilities to test the framework.
+ manager = multiprocessing.Manager()
+ input_queue = manager.Queue()
+ output_queue = manager.Queue()
+ pp_steer = multiprocessing.Process(
+ target=Steering, args=(set(), generations, output_queue, input_queue)
+ )
+ pp_steer.start()
- # The best result of the algorithm so far.
- result = sys.maxint
+ # The best result of the algorithm so far.
+ result = sys.maxint
- while True:
- task = input_queue.get()
+ while True:
+ task = input_queue.get()
- # POISONPILL signal the ends of the algorithm.
- if task == pipeline_process.POISONPILL:
- break
+ # POISONPILL signal the ends of the algorithm.
+ if task == pipeline_process.POISONPILL:
+ break
- task.SetResult(BUILD_STAGE, (0, 0, 0, 0, 0))
+ task.SetResult(BUILD_STAGE, (0, 0, 0, 0, 0))
- # Compute the mock cost for the task.
- task_result = _ComputeCost(cost_func, specs, task.GetFlags())
- task.SetResult(TEST_STAGE, task_result)
+ # Compute the mock cost for the task.
+ task_result = _ComputeCost(cost_func, specs, task.GetFlags())
+ task.SetResult(TEST_STAGE, task_result)
- # If the mock result of the current task is the best so far, set this
- # result to be the best result.
- if task_result < result:
- result = task_result
+ # If the mock result of the current task is the best so far, set this
+ # result to be the best result.
+ if task_result < result:
+ result = task_result
- output_queue.put(task)
+ output_queue.put(task)
- pp_steer.join()
+ pp_steer.join()
- # Only do this test when best_result is not -1.
- if best_result != -1:
- assert best_result == result
+ # Only do this test when best_result is not -1.
+ if best_result != -1:
+ assert best_result == result
class MockAlgorithmsTest(unittest.TestCase):
- """This class mock tests different steering algorithms.
+ """This class mock tests different steering algorithms.
- The steering algorithms are responsible for generating the next set of tasks
- to run in each iteration. This class does a functional testing on the
- algorithms. It mocks out the computation of the fitness function from the
- build and test phases by letting the user define the fitness function.
- """
-
- def _GenerateFlagSpecifications(self):
- """Generate the testing specifications."""
-
- mock_test_file = 'scale_mock_test'
- _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file)
- return flags.ReadConf(mock_test_file)
-
- def testBestHillClimb(self):
- """Test the best hill climb algorithm.
-
- Test whether it finds the best results as expected.
+ The steering algorithms are responsible for generating the next set of tasks
+ to run in each iteration. This class does a functional testing on the
+ algorithms. It mocks out the computation of the fitness function from the
+ build and test phases by letting the user define the fitness function.
"""
- # Initiate the build/test command and the log directory.
- Task.InitLogCommand(None, None, 'output')
+ def _GenerateFlagSpecifications(self):
+ """Generate the testing specifications."""
- # Generate the testing specs.
- specs = self._GenerateFlagSpecifications()
+ mock_test_file = "scale_mock_test"
+ _GenerateTestFlags(NUM_FLAGS, FLAG_RANGES, mock_test_file)
+ return flags.ReadConf(mock_test_file)
- # Generate the initial generations for a test whose cost function is the
- # summation of the values of all the flags.
- generation_tasks = _GenerateAllFlagsTasks(specs)
- generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
+ def testBestHillClimb(self):
+ """Test the best hill climb algorithm.
- # Test the algorithm. The cost function is the summation of all the values
- # of all the flags. Therefore, the best value is supposed to be 0, i.e.,
- # when all the flags are disabled.
- _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0)
+ Test whether it finds the best results as expected.
+ """
- # This test uses a cost function that is the negative of the previous cost
- # function. Therefore, the best result should be found in task with all the
- # flags enabled.
- cost_function = 'sys.maxint - sum(values[0:len(values)])'
- all_flags = list(generation_tasks)[0].GetFlags()
- cost = _ComputeCost(cost_function, specs, all_flags)
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(None, None, "output")
- # Generate the initial generations.
- generation_tasks = _GenerateNoFlagTask()
- generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
+ # Generate the testing specs.
+ specs = self._GenerateFlagSpecifications()
- # Test the algorithm. The cost function is negative of the summation of all
- # the values of all the flags. Therefore, the best value is supposed to be
- # 0, i.e., when all the flags are disabled.
- _TestAlgorithm(cost_function, specs, generations, cost)
+ # Generate the initial generations for a test whose cost function is the
+ # summation of the values of all the flags.
+ generation_tasks = _GenerateAllFlagsTasks(specs)
+ generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
- def testGeneticAlgorithm(self):
- """Test the Genetic Algorithm.
+ # Test the algorithm. The cost function is the summation of all the values
+ # of all the flags. Therefore, the best value is supposed to be 0, i.e.,
+ # when all the flags are disabled.
+ _TestAlgorithm("sum(values[0:len(values)])", specs, generations, 0)
- Do a functional testing here and see how well it scales.
- """
+ # This test uses a cost function that is the negative of the previous cost
+ # function. Therefore, the best result should be found in task with all the
+ # flags enabled.
+ cost_function = "sys.maxint - sum(values[0:len(values)])"
+ all_flags = list(generation_tasks)[0].GetFlags()
+ cost = _ComputeCost(cost_function, specs, all_flags)
- # Initiate the build/test command and the log directory.
- Task.InitLogCommand(None, None, 'output')
+ # Generate the initial generations.
+ generation_tasks = _GenerateNoFlagTask()
+ generations = [HillClimbingBestBranch(generation_tasks, set([]), specs)]
- # Generate the testing specs.
- specs = self._GenerateFlagSpecifications()
- # Initiate the build/test command and the log directory.
- GAGeneration.InitMetaData(STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS,
- specs, MUTATION_RATE)
+ # Test the algorithm. The cost function is negative of the summation of all
+ # the values of all the flags. Therefore, the best value is supposed to be
+ # 0, i.e., when all the flags are disabled.
+ _TestAlgorithm(cost_function, specs, generations, cost)
- # Generate the initial generations.
- generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES, NUM_TRIALS)
- generations = [GAGeneration(generation_tasks, set([]), 0)]
+ def testGeneticAlgorithm(self):
+ """Test the Genetic Algorithm.
- # Test the algorithm.
- _TestAlgorithm('sum(values[0:len(values)])', specs, generations, -1)
- cost_func = 'sys.maxint - sum(values[0:len(values)])'
- _TestAlgorithm(cost_func, specs, generations, -1)
+ Do a functional testing here and see how well it scales.
+ """
- def testIterativeElimination(self):
- """Test the iterative elimination algorithm.
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(None, None, "output")
- Test whether it finds the best results as expected.
- """
+ # Generate the testing specs.
+ specs = self._GenerateFlagSpecifications()
+ # Initiate the build/test command and the log directory.
+ GAGeneration.InitMetaData(
+ STOP_THRESHOLD, NUM_CHROMOSOMES, NUM_TRIALS, specs, MUTATION_RATE
+ )
- # Initiate the build/test command and the log directory.
- Task.InitLogCommand(None, None, 'output')
+ # Generate the initial generations.
+ generation_tasks = GenerateRandomGATasks(
+ specs, NUM_CHROMOSOMES, NUM_TRIALS
+ )
+ generations = [GAGeneration(generation_tasks, set([]), 0)]
- # Generate the testing specs.
- specs = self._GenerateFlagSpecifications()
+ # Test the algorithm.
+ _TestAlgorithm("sum(values[0:len(values)])", specs, generations, -1)
+ cost_func = "sys.maxint - sum(values[0:len(values)])"
+ _TestAlgorithm(cost_func, specs, generations, -1)
- # Generate the initial generations. The generation contains the base line
- # task that turns on all the flags and tasks that each turn off one of the
- # flags.
- generations = _GenerateAllIterativeEliminationTasks(specs)
+ def testIterativeElimination(self):
+ """Test the iterative elimination algorithm.
- # Test the algorithm. The cost function is the summation of all the values
- # of all the flags. Therefore, the best value is supposed to be 0, i.e.,
- # when all the flags are disabled.
- _TestAlgorithm('sum(values[0:len(values)])', specs, generations, 0)
+ Test whether it finds the best results as expected.
+ """
- # This test uses a cost function that is the negative of the previous cost
- # function. Therefore, the best result should be found in task with all the
- # flags enabled.
- all_flags_tasks = _GenerateAllFlagsTasks(specs)
- cost_function = 'sys.maxint - sum(values[0:len(values)])'
- # Compute the cost of the task that turns on all the flags.
- all_flags = list(all_flags_tasks)[0].GetFlags()
- cost = _ComputeCost(cost_function, specs, all_flags)
+ # Initiate the build/test command and the log directory.
+ Task.InitLogCommand(None, None, "output")
- # Test the algorithm. The cost function is negative of the summation of all
- # the values of all the flags. Therefore, the best value is supposed to be
- # 0, i.e., when all the flags are disabled.
- # The concrete type of the generation decides how the next generation will
- # be generated.
- _TestAlgorithm(cost_function, specs, generations, cost)
+ # Generate the testing specs.
+ specs = self._GenerateFlagSpecifications()
+
+ # Generate the initial generations. The generation contains the base line
+ # task that turns on all the flags and tasks that each turn off one of the
+ # flags.
+ generations = _GenerateAllIterativeEliminationTasks(specs)
+
+ # Test the algorithm. The cost function is the summation of all the values
+ # of all the flags. Therefore, the best value is supposed to be 0, i.e.,
+ # when all the flags are disabled.
+ _TestAlgorithm("sum(values[0:len(values)])", specs, generations, 0)
+
+ # This test uses a cost function that is the negative of the previous cost
+ # function. Therefore, the best result should be found in task with all the
+ # flags enabled.
+ all_flags_tasks = _GenerateAllFlagsTasks(specs)
+ cost_function = "sys.maxint - sum(values[0:len(values)])"
+ # Compute the cost of the task that turns on all the flags.
+ all_flags = list(all_flags_tasks)[0].GetFlags()
+ cost = _ComputeCost(cost_function, specs, all_flags)
+
+ # Test the algorithm. The cost function is negative of the summation of all
+ # the values of all the flags. Therefore, the best value is supposed to be
+ # 0, i.e., when all the flags are disabled.
+ # The concrete type of the generation decides how the next generation will
+ # be generated.
+ _TestAlgorithm(cost_function, specs, generations, cost)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/binary_search_tool/MAINTENANCE b/binary_search_tool/MAINTENANCE
index 8f96ff1..90ac582 100644
--- a/binary_search_tool/MAINTENANCE
+++ b/binary_search_tool/MAINTENANCE
@@ -1,4 +1,4 @@
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/binary_search_tool/__init__.py b/binary_search_tool/__init__.py
index 76500de..6e3ade4 100644
--- a/binary_search_tool/__init__.py
+++ b/binary_search_tool/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/binary_search_tool/android/boot_test.sh b/binary_search_tool/android/boot_test.sh
index dc87160..4c0c77e 100755
--- a/binary_search_tool/android/boot_test.sh
+++ b/binary_search_tool/android/boot_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script pings the android device to determine if it successfully booted.
#
diff --git a/binary_search_tool/android/cleanup.sh b/binary_search_tool/android/cleanup.sh
index 759b3ed..480b830 100755
--- a/binary_search_tool/android/cleanup.sh
+++ b/binary_search_tool/android/cleanup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is part of the Android binary search triage process.
# It should be the last script called by the user, after the user has
diff --git a/binary_search_tool/android/generate_cmd.sh b/binary_search_tool/android/generate_cmd.sh
index 78a39b1..6d0e569 100755
--- a/binary_search_tool/android/generate_cmd.sh
+++ b/binary_search_tool/android/generate_cmd.sh
@@ -1,6 +1,6 @@
#!/bin/bash -eu
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/binary_search_tool/android/get_initial_items.sh b/binary_search_tool/android/get_initial_items.sh
index 2a1eda3..1ed3042 100755
--- a/binary_search_tool/android/get_initial_items.sh
+++ b/binary_search_tool/android/get_initial_items.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on the Android source tree. This script
@@ -11,4 +11,3 @@
source android/common.sh
cat ${BISECT_GOOD_BUILD}/_LIST
-
diff --git a/binary_search_tool/android/interactive_test.sh b/binary_search_tool/android/interactive_test.sh
index e506b23..0a8a4b8 100755
--- a/binary_search_tool/android/interactive_test.sh
+++ b/binary_search_tool/android/interactive_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script pings the android device to determine if it successfully booted.
# It then asks the user if the image is good or not, allowing the user to
diff --git a/binary_search_tool/android/setup.sh b/binary_search_tool/android/setup.sh
index 7f8ba0e..0691822 100755
--- a/binary_search_tool/android/setup.sh
+++ b/binary_search_tool/android/setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is part of the Android binary search triage process.
# It should be the first script called by the user, after the user has set up
diff --git a/binary_search_tool/android/switch_to_bad.sh b/binary_search_tool/android/switch_to_bad.sh
index d44f9f1..2100ed4 100755
--- a/binary_search_tool/android/switch_to_bad.sh
+++ b/binary_search_tool/android/switch_to_bad.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on the Android source tree. This script
diff --git a/binary_search_tool/android/switch_to_good.sh b/binary_search_tool/android/switch_to_good.sh
index 557553c..a5be3c3 100755
--- a/binary_search_tool/android/switch_to_good.sh
+++ b/binary_search_tool/android/switch_to_good.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on the Android source tree. This script
diff --git a/binary_search_tool/android/test_setup.sh b/binary_search_tool/android/test_setup.sh
index 26f8ec2..be4a0b7 100755
--- a/binary_search_tool/android/test_setup.sh
+++ b/binary_search_tool/android/test_setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This is the test setup script for generating an Android image based off the
# current working build tree. make is called to relink the object files and
diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py
index f2a3c8d..01756b8 100755
--- a/binary_search_tool/binary_search_perforce.py
+++ b/binary_search_tool/binary_search_perforce.py
@@ -1,15 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module of binary serch for perforce."""
-from __future__ import division
-from __future__ import print_function
-import math
import argparse
+import math
import os
import re
import sys
@@ -18,496 +16,562 @@
from cros_utils import command_executer
from cros_utils import logger
+
verbose = True
def _GetP4ClientSpec(client_name, p4_paths):
- p4_string = ''
- for p4_path in p4_paths:
- if ' ' not in p4_path:
- p4_string += ' -a %s' % p4_path
- else:
- p4_string += ' -a "' + (' //' + client_name + '/').join(p4_path) + '"'
-
- return p4_string
-
-
-def GetP4Command(client_name, p4_port, p4_paths, checkoutdir, p4_snapshot=''):
- command = ''
-
- if p4_snapshot:
- command += 'mkdir -p ' + checkoutdir
+ p4_string = ""
for p4_path in p4_paths:
- real_path = p4_path[1]
- if real_path.endswith('...'):
- real_path = real_path.replace('/...', '')
- command += (
- '; mkdir -p ' + checkoutdir + '/' + os.path.dirname(real_path))
- command += ('&& rsync -lr ' + p4_snapshot + '/' + real_path + ' ' +
- checkoutdir + '/' + os.path.dirname(real_path))
- return command
+ if " " not in p4_path:
+ p4_string += " -a %s" % p4_path
+ else:
+ p4_string += (
+ ' -a "' + (" //" + client_name + "/").join(p4_path) + '"'
+ )
- command += ' export P4CONFIG=.p4config'
- command += ' && mkdir -p ' + checkoutdir
- command += ' && cd ' + checkoutdir
- command += ' && cp ${HOME}/.p4config .'
- command += ' && chmod u+w .p4config'
- command += ' && echo "P4PORT=' + p4_port + '" >> .p4config'
- command += ' && echo "P4CLIENT=' + client_name + '" >> .p4config'
- command += (' && g4 client ' + _GetP4ClientSpec(client_name, p4_paths))
- command += ' && g4 sync '
- command += ' && cd -'
- return command
+ return p4_string
+
+
+def GetP4Command(client_name, p4_port, p4_paths, checkoutdir, p4_snapshot=""):
+ command = ""
+
+ if p4_snapshot:
+ command += "mkdir -p " + checkoutdir
+ for p4_path in p4_paths:
+ real_path = p4_path[1]
+ if real_path.endswith("..."):
+ real_path = real_path.replace("/...", "")
+ command += (
+ "; mkdir -p "
+ + checkoutdir
+ + "/"
+ + os.path.dirname(real_path)
+ )
+ command += (
+ "&& rsync -lr "
+ + p4_snapshot
+ + "/"
+ + real_path
+ + " "
+ + checkoutdir
+ + "/"
+ + os.path.dirname(real_path)
+ )
+ return command
+
+ command += " export P4CONFIG=.p4config"
+ command += " && mkdir -p " + checkoutdir
+ command += " && cd " + checkoutdir
+ command += " && cp ${HOME}/.p4config ."
+ command += " && chmod u+w .p4config"
+ command += ' && echo "P4PORT=' + p4_port + '" >> .p4config'
+ command += ' && echo "P4CLIENT=' + client_name + '" >> .p4config'
+ command += " && g4 client " + _GetP4ClientSpec(client_name, p4_paths)
+ command += " && g4 sync "
+ command += " && cd -"
+ return command
class BinarySearchPoint(object):
- """Class of binary search point."""
+ """Class of binary search point."""
- def __init__(self, revision, status, tag=None):
- self.revision = revision
- self.status = status
- self.tag = tag
+ def __init__(self, revision, status, tag=None):
+ self.revision = revision
+ self.status = status
+ self.tag = tag
class BinarySearcherForPass(object):
- """Class of pass level binary searcher."""
+ """Class of pass level binary searcher."""
- def __init__(self, logger_to_set=None):
- self.current = 0
- self.lo = 0
- self.hi = 0
- self.total = 0
- if logger_to_set is not None:
- self.logger = logger_to_set
- else:
- self.logger = logger.GetLogger()
+ def __init__(self, logger_to_set=None):
+ self.current = 0
+ self.lo = 0
+ self.hi = 0
+ self.total = 0
+ if logger_to_set is not None:
+ self.logger = logger_to_set
+ else:
+ self.logger = logger.GetLogger()
- def GetNext(self):
- # For the first run, update self.hi with total pass/transformation count
- if self.hi == 0:
- self.hi = self.total
- self.current = (self.hi + self.lo) // 2
- message = ('Bisecting between: (%d, %d)' % (self.lo, self.hi))
- self.logger.LogOutput(message, print_to_console=verbose)
- message = ('Current limit number: %d' % self.current)
- self.logger.LogOutput(message, print_to_console=verbose)
- return self.current
+ def GetNext(self):
+ # For the first run, update self.hi with total pass/transformation count
+ if self.hi == 0:
+ self.hi = self.total
+ self.current = (self.hi + self.lo) // 2
+ message = "Bisecting between: (%d, %d)" % (self.lo, self.hi)
+ self.logger.LogOutput(message, print_to_console=verbose)
+ message = "Current limit number: %d" % self.current
+ self.logger.LogOutput(message, print_to_console=verbose)
+ return self.current
- def SetStatus(self, status):
- """Set lo/hi status based on test script result
+ def SetStatus(self, status):
+ """Set lo/hi status based on test script result
- If status == 0, it means that runtime error is not introduced until current
- pass/transformation, so we need to increase lower bound for binary search.
+ If status == 0, it means that runtime error is not introduced until current
+ pass/transformation, so we need to increase lower bound for binary search.
- If status == 1, it means that runtime error still happens with current pass/
- transformation, so we need to decrease upper bound for binary search.
+ If status == 1, it means that runtime error still happens with current pass/
+ transformation, so we need to decrease upper bound for binary search.
- Returns:
- True if we find the bad pass/transformation, or cannot find bad one after
- decreasing to the first pass/transformation. Otherwise False.
- """
- assert status in (0, 1, 125), status
+ Returns:
+ True if we find the bad pass/transformation, or cannot find bad one after
+ decreasing to the first pass/transformation. Otherwise False.
+ """
+ assert status in (0, 1, 125), status
- if self.current == 0:
- message = ('Runtime error occurs before first pass/transformation. '
- 'Stop binary searching.')
- self.logger.LogOutput(message, print_to_console=verbose)
- return True
+ if self.current == 0:
+ message = (
+ "Runtime error occurs before first pass/transformation. "
+ "Stop binary searching."
+ )
+ self.logger.LogOutput(message, print_to_console=verbose)
+ return True
- if status == 0:
- message = ('Runtime error is not reproduced, increasing lower bound.')
- self.logger.LogOutput(message, print_to_console=verbose)
- self.lo = self.current + 1
- elif status == 1:
- message = ('Runtime error is reproduced, decreasing upper bound..')
- self.logger.LogOutput(message, print_to_console=verbose)
- self.hi = self.current
+ if status == 0:
+ message = "Runtime error is not reproduced, increasing lower bound."
+ self.logger.LogOutput(message, print_to_console=verbose)
+ self.lo = self.current + 1
+ elif status == 1:
+ message = "Runtime error is reproduced, decreasing upper bound.."
+ self.logger.LogOutput(message, print_to_console=verbose)
+ self.hi = self.current
- if self.lo >= self.hi:
- return True
+ if self.lo >= self.hi:
+ return True
- return False
+ return False
class BinarySearcher(object):
- """Class of binary searcher."""
+ """Class of binary searcher."""
- def __init__(self, logger_to_set=None):
- self.sorted_list = []
- self.index_log = []
- self.status_log = []
- self.skipped_indices = []
- self.current = 0
- self.points = {}
- self.lo = 0
- self.hi = 0
- if logger_to_set is not None:
- self.logger = logger_to_set
- else:
- self.logger = logger.GetLogger()
+ def __init__(self, logger_to_set=None):
+ self.sorted_list = []
+ self.index_log = []
+ self.status_log = []
+ self.skipped_indices = []
+ self.current = 0
+ self.points = {}
+ self.lo = 0
+ self.hi = 0
+ if logger_to_set is not None:
+ self.logger = logger_to_set
+ else:
+ self.logger = logger.GetLogger()
- def SetSortedList(self, sorted_list):
- assert sorted_list
- self.sorted_list = sorted_list
- self.index_log = []
- self.hi = len(sorted_list) - 1
- self.lo = 0
- self.points = {}
- for i in range(len(self.sorted_list)):
- bsp = BinarySearchPoint(self.sorted_list[i], -1, 'Not yet done.')
- self.points[i] = bsp
+ def SetSortedList(self, sorted_list):
+ assert sorted_list
+ self.sorted_list = sorted_list
+ self.index_log = []
+ self.hi = len(sorted_list) - 1
+ self.lo = 0
+ self.points = {}
+ for i in range(len(self.sorted_list)):
+ bsp = BinarySearchPoint(self.sorted_list[i], -1, "Not yet done.")
+ self.points[i] = bsp
- def SetStatus(self, status, tag=None):
- message = ('Revision: %s index: %d returned: %d' %
- (self.sorted_list[self.current], self.current, status))
- self.logger.LogOutput(message, print_to_console=verbose)
- assert status in (0, 1, 125), status
- self.index_log.append(self.current)
- self.status_log.append(status)
- bsp = BinarySearchPoint(self.sorted_list[self.current], status, tag)
- self.points[self.current] = bsp
+ def SetStatus(self, status, tag=None):
+ message = "Revision: %s index: %d returned: %d" % (
+ self.sorted_list[self.current],
+ self.current,
+ status,
+ )
+ self.logger.LogOutput(message, print_to_console=verbose)
+ assert status in (0, 1, 125), status
+ self.index_log.append(self.current)
+ self.status_log.append(status)
+ bsp = BinarySearchPoint(self.sorted_list[self.current], status, tag)
+ self.points[self.current] = bsp
- if status == 125:
- self.skipped_indices.append(self.current)
+ if status == 125:
+ self.skipped_indices.append(self.current)
- if status in (0, 1):
- if status == 0:
- self.lo = self.current + 1
- elif status == 1:
- self.hi = self.current
- self.logger.LogOutput('lo: %d hi: %d\n' % (self.lo, self.hi))
- self.current = (self.lo + self.hi) // 2
+ if status in (0, 1):
+ if status == 0:
+ self.lo = self.current + 1
+ elif status == 1:
+ self.hi = self.current
+ self.logger.LogOutput("lo: %d hi: %d\n" % (self.lo, self.hi))
+ self.current = (self.lo + self.hi) // 2
- if self.lo == self.hi:
- message = ('Search complete. First bad version: %s'
- ' at index: %d' % (self.sorted_list[self.current], self.lo))
- self.logger.LogOutput(message)
- return True
+ if self.lo == self.hi:
+ message = (
+ "Search complete. First bad version: %s"
+ " at index: %d"
+ % (
+ self.sorted_list[self.current],
+ self.lo,
+ )
+ )
+ self.logger.LogOutput(message)
+ return True
- for index in range(self.lo, self.hi):
- if index not in self.skipped_indices:
- return False
- self.logger.LogOutput(
- 'All skipped indices between: %d and %d\n' % (self.lo, self.hi),
- print_to_console=verbose)
- return True
+ for index in range(self.lo, self.hi):
+ if index not in self.skipped_indices:
+ return False
+ self.logger.LogOutput(
+ "All skipped indices between: %d and %d\n" % (self.lo, self.hi),
+ print_to_console=verbose,
+ )
+ return True
- # Does a better job with chromeos flakiness.
- def GetNextFlakyBinary(self):
- t = (self.lo, self.current, self.hi)
- q = [t]
- while q:
- element = q.pop(0)
- if element[1] in self.skipped_indices:
- # Go top
- to_add = (element[0], (element[0] + element[1]) // 2, element[1])
- q.append(to_add)
- # Go bottom
- to_add = (element[1], (element[1] + element[2]) // 2, element[2])
- q.append(to_add)
- else:
- self.current = element[1]
- return
- assert q, 'Queue should never be 0-size!'
+ # Does a better job with chromeos flakiness.
+ def GetNextFlakyBinary(self):
+ t = (self.lo, self.current, self.hi)
+ q = [t]
+ while q:
+ element = q.pop(0)
+ if element[1] in self.skipped_indices:
+ # Go top
+ to_add = (
+ element[0],
+ (element[0] + element[1]) // 2,
+ element[1],
+ )
+ q.append(to_add)
+ # Go bottom
+ to_add = (
+ element[1],
+ (element[1] + element[2]) // 2,
+ element[2],
+ )
+ q.append(to_add)
+ else:
+ self.current = element[1]
+ return
+ assert q, "Queue should never be 0-size!"
- def GetNextFlakyLinear(self):
- current_hi = self.current
- current_lo = self.current
- while True:
- if current_hi < self.hi and current_hi not in self.skipped_indices:
- self.current = current_hi
- break
- if current_lo >= self.lo and current_lo not in self.skipped_indices:
- self.current = current_lo
- break
- if current_lo < self.lo and current_hi >= self.hi:
- break
+ def GetNextFlakyLinear(self):
+ current_hi = self.current
+ current_lo = self.current
+ while True:
+ if current_hi < self.hi and current_hi not in self.skipped_indices:
+ self.current = current_hi
+ break
+ if current_lo >= self.lo and current_lo not in self.skipped_indices:
+ self.current = current_lo
+ break
+ if current_lo < self.lo and current_hi >= self.hi:
+ break
- current_hi += 1
- current_lo -= 1
+ current_hi += 1
+ current_lo -= 1
- def GetNext(self):
- self.current = (self.hi + self.lo) // 2
- # Try going forward if current is skipped.
- if self.current in self.skipped_indices:
- self.GetNextFlakyBinary()
+ def GetNext(self):
+ self.current = (self.hi + self.lo) // 2
+ # Try going forward if current is skipped.
+ if self.current in self.skipped_indices:
+ self.GetNextFlakyBinary()
- # TODO: Add an estimated time remaining as well.
- message = ('Estimated tries: min: %d max: %d\n' % (1 + math.log(
- self.hi - self.lo, 2), self.hi - self.lo - len(self.skipped_indices)))
- self.logger.LogOutput(message, print_to_console=verbose)
- message = ('lo: %d hi: %d current: %d version: %s\n' %
- (self.lo, self.hi, self.current, self.sorted_list[self.current]))
- self.logger.LogOutput(message, print_to_console=verbose)
- self.logger.LogOutput(str(self), print_to_console=verbose)
- return self.sorted_list[self.current]
+ # TODO: Add an estimated time remaining as well.
+ message = "Estimated tries: min: %d max: %d\n" % (
+ 1 + math.log(self.hi - self.lo, 2),
+ self.hi - self.lo - len(self.skipped_indices),
+ )
+ self.logger.LogOutput(message, print_to_console=verbose)
+ message = "lo: %d hi: %d current: %d version: %s\n" % (
+ self.lo,
+ self.hi,
+ self.current,
+ self.sorted_list[self.current],
+ )
+ self.logger.LogOutput(message, print_to_console=verbose)
+ self.logger.LogOutput(str(self), print_to_console=verbose)
+ return self.sorted_list[self.current]
- def SetLoRevision(self, lo_revision):
- self.lo = self.sorted_list.index(lo_revision)
+ def SetLoRevision(self, lo_revision):
+ self.lo = self.sorted_list.index(lo_revision)
- def SetHiRevision(self, hi_revision):
- self.hi = self.sorted_list.index(hi_revision)
+ def SetHiRevision(self, hi_revision):
+ self.hi = self.sorted_list.index(hi_revision)
- def GetAllPoints(self):
- to_return = ''
- for i in range(len(self.sorted_list)):
- to_return += (
- '%d %d %s\n' % (self.points[i].status, i, self.points[i].revision))
+ def GetAllPoints(self):
+ to_return = ""
+ for i in range(len(self.sorted_list)):
+ to_return += "%d %d %s\n" % (
+ self.points[i].status,
+ i,
+ self.points[i].revision,
+ )
- return to_return
+ return to_return
- def __str__(self):
- to_return = ''
- to_return += 'Current: %d\n' % self.current
- to_return += str(self.index_log) + '\n'
- revision_log = []
- for index in self.index_log:
- revision_log.append(self.sorted_list[index])
- to_return += str(revision_log) + '\n'
- to_return += str(self.status_log) + '\n'
- to_return += 'Skipped indices:\n'
- to_return += str(self.skipped_indices) + '\n'
- to_return += self.GetAllPoints()
- return to_return
+ def __str__(self):
+ to_return = ""
+ to_return += "Current: %d\n" % self.current
+ to_return += str(self.index_log) + "\n"
+ revision_log = []
+ for index in self.index_log:
+ revision_log.append(self.sorted_list[index])
+ to_return += str(revision_log) + "\n"
+ to_return += str(self.status_log) + "\n"
+ to_return += "Skipped indices:\n"
+ to_return += str(self.skipped_indices) + "\n"
+ to_return += self.GetAllPoints()
+ return to_return
class RevisionInfo(object):
- """Class of reversion info."""
+ """Class of reversion info."""
- def __init__(self, date, client, description):
- self.date = date
- self.client = client
- self.description = description
- self.status = -1
+ def __init__(self, date, client, description):
+ self.date = date
+ self.client = client
+ self.description = description
+ self.status = -1
class VCSBinarySearcher(object):
- """Class of VCS binary searcher."""
+ """Class of VCS binary searcher."""
- def __init__(self):
- self.bs = BinarySearcher()
- self.rim = {}
- self.current_ce = None
- self.checkout_dir = None
- self.current_revision = None
+ def __init__(self):
+ self.bs = BinarySearcher()
+ self.rim = {}
+ self.current_ce = None
+ self.checkout_dir = None
+ self.current_revision = None
- def Initialize(self):
- pass
+ def Initialize(self):
+ pass
- def GetNextRevision(self):
- pass
+ def GetNextRevision(self):
+ pass
- def CheckoutRevision(self, current_revision):
- pass
+ def CheckoutRevision(self, current_revision):
+ pass
- def SetStatus(self, status):
- pass
+ def SetStatus(self, status):
+ pass
- def Cleanup(self):
- pass
+ def Cleanup(self):
+ pass
- def SetGoodRevision(self, revision):
- if revision is None:
- return
- assert revision in self.bs.sorted_list
- self.bs.SetLoRevision(revision)
+ def SetGoodRevision(self, revision):
+ if revision is None:
+ return
+ assert revision in self.bs.sorted_list
+ self.bs.SetLoRevision(revision)
- def SetBadRevision(self, revision):
- if revision is None:
- return
- assert revision in self.bs.sorted_list
- self.bs.SetHiRevision(revision)
+ def SetBadRevision(self, revision):
+ if revision is None:
+ return
+ assert revision in self.bs.sorted_list
+ self.bs.SetHiRevision(revision)
class P4BinarySearcher(VCSBinarySearcher):
- """Class of P4 binary searcher."""
+ """Class of P4 binary searcher."""
- def __init__(self, p4_port, p4_paths, test_command):
- VCSBinarySearcher.__init__(self)
- self.p4_port = p4_port
- self.p4_paths = p4_paths
- self.test_command = test_command
- self.checkout_dir = tempfile.mkdtemp()
- self.ce = command_executer.GetCommandExecuter()
- self.client_name = 'binary-searcher-$HOSTNAME-$USER'
- self.job_log_root = '/home/asharif/www/coreboot_triage/'
- self.changes = None
+ def __init__(self, p4_port, p4_paths, test_command):
+ VCSBinarySearcher.__init__(self)
+ self.p4_port = p4_port
+ self.p4_paths = p4_paths
+ self.test_command = test_command
+ self.checkout_dir = tempfile.mkdtemp()
+ self.ce = command_executer.GetCommandExecuter()
+ self.client_name = "binary-searcher-$HOSTNAME-$USER"
+ self.job_log_root = "/home/asharif/www/coreboot_triage/"
+ self.changes = None
- def Initialize(self):
- self.Cleanup()
- command = GetP4Command(self.client_name, self.p4_port, self.p4_paths, 1,
- self.checkout_dir)
- self.ce.RunCommand(command)
- command = 'cd %s && g4 changes ...' % self.checkout_dir
- _, out, _ = self.ce.RunCommandWOutput(command)
- self.changes = re.findall(r'Change (\d+)', out)
- change_infos = re.findall(
- r'Change (\d+) on ([\d/]+) by '
- r"([^\s]+) ('[^']*')", out)
- for change_info in change_infos:
- ri = RevisionInfo(change_info[1], change_info[2], change_info[3])
- self.rim[change_info[0]] = ri
- # g4 gives changes in reverse chronological order.
- self.changes.reverse()
- self.bs.SetSortedList(self.changes)
+ def Initialize(self):
+ self.Cleanup()
+ command = GetP4Command(
+ self.client_name, self.p4_port, self.p4_paths, 1, self.checkout_dir
+ )
+ self.ce.RunCommand(command)
+ command = "cd %s && g4 changes ..." % self.checkout_dir
+ _, out, _ = self.ce.RunCommandWOutput(command)
+ self.changes = re.findall(r"Change (\d+)", out)
+ change_infos = re.findall(
+ r"Change (\d+) on ([\d/]+) by " r"([^\s]+) ('[^']*')", out
+ )
+ for change_info in change_infos:
+ ri = RevisionInfo(change_info[1], change_info[2], change_info[3])
+ self.rim[change_info[0]] = ri
+ # g4 gives changes in reverse chronological order.
+ self.changes.reverse()
+ self.bs.SetSortedList(self.changes)
- def SetStatus(self, status):
- self.rim[self.current_revision].status = status
- return self.bs.SetStatus(status)
+ def SetStatus(self, status):
+ self.rim[self.current_revision].status = status
+ return self.bs.SetStatus(status)
- def GetNextRevision(self):
- next_revision = self.bs.GetNext()
- self.current_revision = next_revision
- return next_revision
+ def GetNextRevision(self):
+ next_revision = self.bs.GetNext()
+ self.current_revision = next_revision
+ return next_revision
- def CleanupCLs(self):
- if not os.path.isfile(self.checkout_dir + '/.p4config'):
- command = 'cd %s' % self.checkout_dir
- command += ' && cp ${HOME}/.p4config .'
- command += ' && echo "P4PORT=' + self.p4_port + '" >> .p4config'
- command += ' && echo "P4CLIENT=' + self.client_name + '" >> .p4config'
- self.ce.RunCommand(command)
- command = 'cd %s' % self.checkout_dir
- command += '; g4 changes -c %s' % self.client_name
- _, out, _ = self.ce.RunCommandWOutput(command)
- changes = re.findall(r'Change (\d+)', out)
- if changes:
- command = 'cd %s' % self.checkout_dir
- for change in changes:
- command += '; g4 revert -c %s' % change
- self.ce.RunCommand(command)
+ def CleanupCLs(self):
+ if not os.path.isfile(self.checkout_dir + "/.p4config"):
+ command = "cd %s" % self.checkout_dir
+ command += " && cp ${HOME}/.p4config ."
+ command += ' && echo "P4PORT=' + self.p4_port + '" >> .p4config'
+ command += (
+ ' && echo "P4CLIENT=' + self.client_name + '" >> .p4config'
+ )
+ self.ce.RunCommand(command)
+ command = "cd %s" % self.checkout_dir
+ command += "; g4 changes -c %s" % self.client_name
+ _, out, _ = self.ce.RunCommandWOutput(command)
+ changes = re.findall(r"Change (\d+)", out)
+ if changes:
+ command = "cd %s" % self.checkout_dir
+ for change in changes:
+ command += "; g4 revert -c %s" % change
+ self.ce.RunCommand(command)
- def CleanupClient(self):
- command = 'cd %s' % self.checkout_dir
- command += '; g4 revert ...'
- command += '; g4 client -d %s' % self.client_name
- self.ce.RunCommand(command)
+ def CleanupClient(self):
+ command = "cd %s" % self.checkout_dir
+ command += "; g4 revert ..."
+ command += "; g4 client -d %s" % self.client_name
+ self.ce.RunCommand(command)
- def Cleanup(self):
- self.CleanupCLs()
- self.CleanupClient()
+ def Cleanup(self):
+ self.CleanupCLs()
+ self.CleanupClient()
- def __str__(self):
- to_return = ''
- for change in self.changes:
- ri = self.rim[change]
- if ri.status == -1:
- to_return = '%s\t%d\n' % (change, ri.status)
- else:
- to_return += ('%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n' %
- (change, ri.status, ri.date, ri.client, ri.description,
- self.job_log_root + change + '.cmd', self.job_log_root +
- change + '.out', self.job_log_root + change + '.err'))
- return to_return
+ def __str__(self):
+ to_return = ""
+ for change in self.changes:
+ ri = self.rim[change]
+ if ri.status == -1:
+ to_return = "%s\t%d\n" % (change, ri.status)
+ else:
+ to_return += "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n" % (
+ change,
+ ri.status,
+ ri.date,
+ ri.client,
+ ri.description,
+ self.job_log_root + change + ".cmd",
+ self.job_log_root + change + ".out",
+ self.job_log_root + change + ".err",
+ )
+ return to_return
class P4GCCBinarySearcher(P4BinarySearcher):
- """Class of P4 gcc binary searcher."""
+ """Class of P4 gcc binary searcher."""
- # TODO: eventually get these patches from g4 instead of creating them manually
- def HandleBrokenCLs(self, current_revision):
- cr = int(current_revision)
- problematic_ranges = []
- problematic_ranges.append([44528, 44539])
- problematic_ranges.append([44528, 44760])
- problematic_ranges.append([44335, 44882])
- command = 'pwd'
- for pr in problematic_ranges:
- if cr in range(pr[0], pr[1]):
- patch_file = '/home/asharif/triage_tool/%d-%d.patch' % (pr[0], pr[1])
- with open(patch_file, encoding='utf-8') as f:
- patch = f.read()
- files = re.findall('--- (//.*)', patch)
- command += '; cd %s' % self.checkout_dir
- for f in files:
- command += '; g4 open %s' % f
- command += '; patch -p2 < %s' % patch_file
- self.current_ce.RunCommand(command)
+ # TODO: eventually get these patches from g4 instead of creating them manually
+ def HandleBrokenCLs(self, current_revision):
+ cr = int(current_revision)
+ problematic_ranges = []
+ problematic_ranges.append([44528, 44539])
+ problematic_ranges.append([44528, 44760])
+ problematic_ranges.append([44335, 44882])
+ command = "pwd"
+ for pr in problematic_ranges:
+ if cr in range(pr[0], pr[1]):
+ patch_file = "/home/asharif/triage_tool/%d-%d.patch" % (
+ pr[0],
+ pr[1],
+ )
+ with open(patch_file, encoding="utf-8") as f:
+ patch = f.read()
+ files = re.findall("--- (//.*)", patch)
+ command += "; cd %s" % self.checkout_dir
+ for f in files:
+ command += "; g4 open %s" % f
+ command += "; patch -p2 < %s" % patch_file
+ self.current_ce.RunCommand(command)
- def CheckoutRevision(self, current_revision):
- job_logger = logger.Logger(
- self.job_log_root, current_revision, True, subdir='')
- self.current_ce = command_executer.GetCommandExecuter(job_logger)
+ def CheckoutRevision(self, current_revision):
+ job_logger = logger.Logger(
+ self.job_log_root, current_revision, True, subdir=""
+ )
+ self.current_ce = command_executer.GetCommandExecuter(job_logger)
- self.CleanupCLs()
- # Change the revision of only the gcc part of the toolchain.
- command = (
- 'cd %s/gcctools/google_vendor_src_branch/gcc '
- '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir, current_revision))
- self.current_ce.RunCommand(command)
+ self.CleanupCLs()
+ # Change the revision of only the gcc part of the toolchain.
+ command = (
+ "cd %s/gcctools/google_vendor_src_branch/gcc "
+ "&& g4 revert ...; g4 sync @%s"
+ % (self.checkout_dir, current_revision)
+ )
+ self.current_ce.RunCommand(command)
- self.HandleBrokenCLs(current_revision)
+ self.HandleBrokenCLs(current_revision)
def Main(argv):
- """The main function."""
- # Common initializations
- ### command_executer.InitCommandExecuter(True)
- ce = command_executer.GetCommandExecuter()
+ """The main function."""
+ # Common initializations
+ ### command_executer.InitCommandExecuter(True)
+ ce = command_executer.GetCommandExecuter()
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-n',
- '--num_tries',
- dest='num_tries',
- default='100',
- help='Number of tries.')
- parser.add_argument(
- '-g',
- '--good_revision',
- dest='good_revision',
- help='Last known good revision.')
- parser.add_argument(
- '-b',
- '--bad_revision',
- dest='bad_revision',
- help='Last known bad revision.')
- parser.add_argument(
- '-s', '--script', dest='script', help='Script to run for every version.')
- options = parser.parse_args(argv)
- # First get all revisions
- p4_paths = [
- '//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
- '//depot2/gcctools/google_vendor_src_branch/binutils/'
- 'binutils-2.20.1-mobile/...',
- '//depot2/gcctools/google_vendor_src_branch/'
- 'binutils/binutils-20100303/...'
- ]
- p4gccbs = P4GCCBinarySearcher('perforce2:2666', p4_paths, '')
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-n",
+ "--num_tries",
+ dest="num_tries",
+ default="100",
+ help="Number of tries.",
+ )
+ parser.add_argument(
+ "-g",
+ "--good_revision",
+ dest="good_revision",
+ help="Last known good revision.",
+ )
+ parser.add_argument(
+ "-b",
+ "--bad_revision",
+ dest="bad_revision",
+ help="Last known bad revision.",
+ )
+ parser.add_argument(
+ "-s", "--script", dest="script", help="Script to run for every version."
+ )
+ options = parser.parse_args(argv)
+ # First get all revisions
+ p4_paths = [
+ "//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...",
+ "//depot2/gcctools/google_vendor_src_branch/binutils/"
+ "binutils-2.20.1-mobile/...",
+ "//depot2/gcctools/google_vendor_src_branch/"
+ "binutils/binutils-20100303/...",
+ ]
+ p4gccbs = P4GCCBinarySearcher("perforce2:2666", p4_paths, "")
- # Main loop:
- terminated = False
- num_tries = int(options.num_tries)
- script = os.path.expanduser(options.script)
+ # Main loop:
+ terminated = False
+ num_tries = int(options.num_tries)
+ script = os.path.expanduser(options.script)
- try:
- p4gccbs.Initialize()
- p4gccbs.SetGoodRevision(options.good_revision)
- p4gccbs.SetBadRevision(options.bad_revision)
- while not terminated and num_tries > 0:
- current_revision = p4gccbs.GetNextRevision()
+ try:
+ p4gccbs.Initialize()
+ p4gccbs.SetGoodRevision(options.good_revision)
+ p4gccbs.SetBadRevision(options.bad_revision)
+ while not terminated and num_tries > 0:
+ current_revision = p4gccbs.GetNextRevision()
- # Now run command to get the status
- ce = command_executer.GetCommandExecuter()
- command = '%s %s' % (script, p4gccbs.checkout_dir)
- status = ce.RunCommand(command)
- message = (
- 'Revision: %s produced: %d status\n' % (current_revision, status))
- logger.GetLogger().LogOutput(message, print_to_console=verbose)
- terminated = p4gccbs.SetStatus(status)
- num_tries -= 1
- logger.GetLogger().LogOutput(str(p4gccbs), print_to_console=verbose)
+ # Now run command to get the status
+ ce = command_executer.GetCommandExecuter()
+ command = "%s %s" % (script, p4gccbs.checkout_dir)
+ status = ce.RunCommand(command)
+ message = "Revision: %s produced: %d status\n" % (
+ current_revision,
+ status,
+ )
+ logger.GetLogger().LogOutput(message, print_to_console=verbose)
+ terminated = p4gccbs.SetStatus(status)
+ num_tries -= 1
+ logger.GetLogger().LogOutput(str(p4gccbs), print_to_console=verbose)
- if not terminated:
- logger.GetLogger().LogOutput(
- 'Tries: %d expired.' % num_tries, print_to_console=verbose)
- logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose)
- except (KeyboardInterrupt, SystemExit):
- logger.GetLogger().LogOutput('Cleaning up...')
- finally:
- logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose)
- p4gccbs.Cleanup()
+ if not terminated:
+ logger.GetLogger().LogOutput(
+ "Tries: %d expired." % num_tries, print_to_console=verbose
+ )
+ logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose)
+ except (KeyboardInterrupt, SystemExit):
+ logger.GetLogger().LogOutput("Cleaning up...")
+ finally:
+ logger.GetLogger().LogOutput(str(p4gccbs.bs), print_to_console=verbose)
+ p4gccbs.Cleanup()
-if __name__ == '__main__':
- Main(sys.argv[1:])
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py
index 1ddd65c..1b423b5 100755
--- a/binary_search_tool/binary_search_state.py
+++ b/binary_search_tool/binary_search_state.py
@@ -1,13 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The binary search wrapper."""
-from __future__ import division
-from __future__ import print_function
import argparse
import contextlib
@@ -30,871 +28,993 @@
from cros_utils import command_executer
from cros_utils import logger
-GOOD_SET_VAR = 'BISECT_GOOD_SET'
-BAD_SET_VAR = 'BISECT_BAD_SET'
-STATE_FILE = '%s.state' % sys.argv[0]
+GOOD_SET_VAR = "BISECT_GOOD_SET"
+BAD_SET_VAR = "BISECT_BAD_SET"
+
+STATE_FILE = "%s.state" % sys.argv[0]
HIDDEN_STATE_FILE = os.path.join(
- os.path.dirname(STATE_FILE), '.%s' % os.path.basename(STATE_FILE))
+ os.path.dirname(STATE_FILE), ".%s" % os.path.basename(STATE_FILE)
+)
@contextlib.contextmanager
def SetFile(env_var, items):
- """Generate set files that can be used by switch/test scripts.
+ """Generate set files that can be used by switch/test scripts.
- Generate temporary set file (good/bad) holding contents of good/bad items for
- the current binary search iteration. Store the name of each file as an
- environment variable so all child processes can access it.
+ Generate temporary set file (good/bad) holding contents of good/bad items for
+ the current binary search iteration. Store the name of each file as an
+ environment variable so all child processes can access it.
- This function is a contextmanager, meaning it's meant to be used with the
- "with" statement in Python. This is so cleanup and setup happens automatically
- and cleanly. Execution of the outer "with" statement happens at the "yield"
- statement.
+ This function is a contextmanager, meaning it's meant to be used with the
+ "with" statement in Python. This is so cleanup and setup happens automatically
+ and cleanly. Execution of the outer "with" statement happens at the "yield"
+ statement.
- Args:
- env_var: What environment variable to store the file name in.
- items: What items are in this set.
- """
- with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f:
- os.environ[env_var] = f.name
- f.write('\n'.join(items))
- f.flush()
- yield
+ Args:
+ env_var: What environment variable to store the file name in.
+ items: What items are in this set.
+ """
+ with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f:
+ os.environ[env_var] = f.name
+ f.write("\n".join(items))
+ f.flush()
+ yield
class BinarySearchState(object):
- """The binary search state class."""
+ """The binary search state class."""
- def __init__(self, get_initial_items, switch_to_good, switch_to_bad,
- test_setup_script, test_script, incremental, prune, pass_bisect,
- ir_diff, iterations, prune_iterations, verify, file_args,
- verbose):
- """BinarySearchState constructor, see Run for full args documentation."""
- self.get_initial_items = get_initial_items
- self.switch_to_good = switch_to_good
- self.switch_to_bad = switch_to_bad
- self.test_setup_script = test_setup_script
- self.test_script = test_script
- self.incremental = incremental
- self.prune = prune
- self.pass_bisect = pass_bisect
- self.ir_diff = ir_diff
- self.iterations = iterations
- self.prune_iterations = prune_iterations
- self.verify = verify
- self.file_args = file_args
- self.verbose = verbose
+ def __init__(
+ self,
+ get_initial_items,
+ switch_to_good,
+ switch_to_bad,
+ test_setup_script,
+ test_script,
+ incremental,
+ prune,
+ pass_bisect,
+ ir_diff,
+ iterations,
+ prune_iterations,
+ verify,
+ file_args,
+ verbose,
+ ):
+ """BinarySearchState constructor, see Run for full args documentation."""
+ self.get_initial_items = get_initial_items
+ self.switch_to_good = switch_to_good
+ self.switch_to_bad = switch_to_bad
+ self.test_setup_script = test_setup_script
+ self.test_script = test_script
+ self.incremental = incremental
+ self.prune = prune
+ self.pass_bisect = pass_bisect
+ self.ir_diff = ir_diff
+ self.iterations = iterations
+ self.prune_iterations = prune_iterations
+ self.verify = verify
+ self.file_args = file_args
+ self.verbose = verbose
- self.l = logger.GetLogger()
- self.ce = command_executer.GetCommandExecuter()
+ self.l = logger.GetLogger()
+ self.ce = command_executer.GetCommandExecuter()
- self.resumed = False
- self.prune_cycles = 0
- self.search_cycles = 0
- self.binary_search = None
- self.all_items = None
- self.cmd_script = None
- self.mode = None
- self.PopulateItemsUsingCommand(self.get_initial_items)
- self.currently_good_items = set()
- self.currently_bad_items = set()
- self.found_items = set()
- self.known_good = set()
+ self.resumed = False
+ self.prune_cycles = 0
+ self.search_cycles = 0
+ self.binary_search = None
+ self.all_items = None
+ self.cmd_script = None
+ self.mode = None
+ self.PopulateItemsUsingCommand(self.get_initial_items)
+ self.currently_good_items = set()
+ self.currently_bad_items = set()
+ self.found_items = set()
+ self.known_good = set()
- self.start_time = time.time()
+ self.start_time = time.time()
- def SwitchToGood(self, item_list):
- """Switch given items to "good" set."""
- if self.incremental:
- self.l.LogOutput(
- 'Incremental set. Wanted to switch %s to good' % str(item_list),
- print_to_console=self.verbose)
- incremental_items = [
- item for item in item_list if item not in self.currently_good_items
- ]
- item_list = incremental_items
- self.l.LogOutput(
- 'Incremental set. Actually switching %s to good' % str(item_list),
- print_to_console=self.verbose)
+ def SwitchToGood(self, item_list):
+ """Switch given items to "good" set."""
+ if self.incremental:
+ self.l.LogOutput(
+ "Incremental set. Wanted to switch %s to good" % str(item_list),
+ print_to_console=self.verbose,
+ )
+ incremental_items = [
+ item
+ for item in item_list
+ if item not in self.currently_good_items
+ ]
+ item_list = incremental_items
+ self.l.LogOutput(
+ "Incremental set. Actually switching %s to good"
+ % str(item_list),
+ print_to_console=self.verbose,
+ )
- if not item_list:
- return
+ if not item_list:
+ return
- self.l.LogOutput(
- 'Switching %s to good' % str(item_list), print_to_console=self.verbose)
- self.RunSwitchScript(self.switch_to_good, item_list)
- self.currently_good_items = self.currently_good_items.union(set(item_list))
- self.currently_bad_items.difference_update(set(item_list))
-
- def SwitchToBad(self, item_list):
- """Switch given items to "bad" set."""
- if self.incremental:
- self.l.LogOutput(
- 'Incremental set. Wanted to switch %s to bad' % str(item_list),
- print_to_console=self.verbose)
- incremental_items = [
- item for item in item_list if item not in self.currently_bad_items
- ]
- item_list = incremental_items
- self.l.LogOutput(
- 'Incremental set. Actually switching %s to bad' % str(item_list),
- print_to_console=self.verbose)
-
- if not item_list:
- return
-
- self.l.LogOutput(
- 'Switching %s to bad' % str(item_list), print_to_console=self.verbose)
- self.RunSwitchScript(self.switch_to_bad, item_list)
- self.currently_bad_items = self.currently_bad_items.union(set(item_list))
- self.currently_good_items.difference_update(set(item_list))
-
- def RunSwitchScript(self, switch_script, item_list):
- """Pass given items to switch script.
-
- Args:
- switch_script: path to switch script
- item_list: list of all items to be switched
- """
- if self.file_args:
- with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f:
- f.write('\n'.join(item_list))
- f.flush()
- command = '%s %s' % (switch_script, f.name)
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- command, print_to_console=self.verbose)
- else:
- command = '%s %s' % (switch_script, ' '.join(item_list))
- try:
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- command, print_to_console=self.verbose)
- except OSError as e:
- if e.errno == errno.E2BIG:
- raise RuntimeError('Too many arguments for switch script! Use '
- '--file_args')
- assert ret == 0, 'Switch script %s returned %d' % (switch_script, ret)
-
- def TestScript(self):
- """Run test script and return exit code from script."""
- command = self.test_script
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(command)
- return ret
-
- def TestSetupScript(self):
- """Run test setup script and return exit code from script."""
- if not self.test_setup_script:
- return 0
-
- command = self.test_setup_script
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(command)
- return ret
-
- def GenerateBadCommandScript(self, bad_items):
- """Generate command line script for building bad item."""
- assert not self.prune, 'Prune must be false if pass_bisect is set.'
- assert len(bad_items) == 1, 'Pruning is off, but number of bad ' \
- 'items found was not 1.'
- item = list(bad_items)[0]
- command = '%s %s' % (self.pass_bisect, item)
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- command, print_to_console=self.verbose)
- return ret
-
- def DoVerify(self):
- """Verify correctness of test environment.
-
- Verify that a "good" set of items produces a "good" result and that a "bad"
- set of items produces a "bad" result. To be run directly before running
- DoSearch. If verify is False this step is skipped.
- """
- if not self.verify:
- return
-
- self.l.LogOutput('VERIFICATION')
- self.l.LogOutput('Beginning tests to verify good/bad sets\n')
-
- self._OutputProgress('Verifying items from GOOD set\n')
- with SetFile(GOOD_SET_VAR, self.all_items), SetFile(BAD_SET_VAR, []):
- self.l.LogOutput('Resetting all items to good to verify.')
- self.SwitchToGood(self.all_items)
- status = self.TestSetupScript()
- assert status == 0, 'When reset_to_good, test setup should succeed.'
- status = self.TestScript()
- assert status == 0, 'When reset_to_good, status should be 0.'
-
- self._OutputProgress('Verifying items from BAD set\n')
- with SetFile(GOOD_SET_VAR, []), SetFile(BAD_SET_VAR, self.all_items):
- self.l.LogOutput('Resetting all items to bad to verify.')
- self.SwitchToBad(self.all_items)
- status = self.TestSetupScript()
- # The following assumption is not true; a bad image might not
- # successfully push onto a device.
- # assert status == 0, 'When reset_to_bad, test setup should succeed.'
- if status == 0:
- status = self.TestScript()
- assert status == 1, 'When reset_to_bad, status should be 1.'
-
- def DoSearchBadItems(self):
- """Perform full search for bad items.
-
- Perform full search until prune_iterations number of bad items are found.
- """
- while (True and len(self.all_items) > 1 and
- self.prune_cycles < self.prune_iterations):
- terminated = self.DoBinarySearchBadItems()
- self.prune_cycles += 1
- if not terminated:
- break
- # Prune is set.
- prune_index = self.binary_search.current
-
- # If found item is last item, no new items can be found
- if prune_index == len(self.all_items) - 1:
- self.l.LogOutput('First bad item is the last item. Breaking.')
- self.l.LogOutput('Bad items are: %s' % self.all_items[-1])
- self.found_items.add(self.all_items[-1])
- break
-
- # If already seen item we have no new bad items to find, finish up
- if self.all_items[prune_index] in self.found_items:
self.l.LogOutput(
- 'Found item already found before: %s.' %
- self.all_items[prune_index],
- print_to_console=self.verbose)
- self.l.LogOutput('No more bad items remaining. Done searching.')
- self.l.LogOutput('Bad items are: %s' % ' '.join(self.found_items))
- break
+ "Switching %s to good" % str(item_list),
+ print_to_console=self.verbose,
+ )
+ self.RunSwitchScript(self.switch_to_good, item_list)
+ self.currently_good_items = self.currently_good_items.union(
+ set(item_list)
+ )
+ self.currently_bad_items.difference_update(set(item_list))
- new_all_items = list(self.all_items)
- # Move prune item to the end of the list.
- new_all_items.append(new_all_items.pop(prune_index))
- self.found_items.add(new_all_items[-1])
+ def SwitchToBad(self, item_list):
+ """Switch given items to "bad" set."""
+ if self.incremental:
+ self.l.LogOutput(
+ "Incremental set. Wanted to switch %s to bad" % str(item_list),
+ print_to_console=self.verbose,
+ )
+ incremental_items = [
+ item
+ for item in item_list
+ if item not in self.currently_bad_items
+ ]
+ item_list = incremental_items
+ self.l.LogOutput(
+ "Incremental set. Actually switching %s to bad"
+ % str(item_list),
+ print_to_console=self.verbose,
+ )
- # Everything below newly found bad item is now known to be a good item.
- # Take these good items out of the equation to save time on the next
- # search. We save these known good items so they are still sent to the
- # switch_to_good script.
- if prune_index:
- self.known_good.update(new_all_items[:prune_index])
- new_all_items = new_all_items[prune_index:]
+ if not item_list:
+ return
- self.l.LogOutput(
- 'Old list: %s. New list: %s' % (str(self.all_items),
- str(new_all_items)),
- print_to_console=self.verbose)
+ self.l.LogOutput(
+ "Switching %s to bad" % str(item_list),
+ print_to_console=self.verbose,
+ )
+ self.RunSwitchScript(self.switch_to_bad, item_list)
+ self.currently_bad_items = self.currently_bad_items.union(
+ set(item_list)
+ )
+ self.currently_good_items.difference_update(set(item_list))
- if not self.prune:
- self.l.LogOutput('Not continuning further, --prune is not set')
- break
- # FIXME: Do we need to Convert the currently good items to bad
- self.PopulateItemsUsingList(new_all_items)
+ def RunSwitchScript(self, switch_script, item_list):
+ """Pass given items to switch script.
- # If pass level bisecting is set, generate a script which contains command
- # line options to rebuild bad item.
- if self.pass_bisect:
- status = self.GenerateBadCommandScript(self.found_items)
- if status == 0:
- self.cmd_script = os.path.join(
- os.path.dirname(self.pass_bisect), 'cmd_script.sh')
- self.l.LogOutput('Command script generated at %s.' % self.cmd_script)
- else:
- raise RuntimeError('Error while generating command script.')
+ Args:
+ switch_script: path to switch script
+ item_list: list of all items to be switched
+ """
+ if self.file_args:
+ with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f:
+ f.write("\n".join(item_list))
+ f.flush()
+ command = "%s %s" % (switch_script, f.name)
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ command, print_to_console=self.verbose
+ )
+ else:
+ command = "%s %s" % (switch_script, " ".join(item_list))
+ try:
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ command, print_to_console=self.verbose
+ )
+ except OSError as e:
+ if e.errno == errno.E2BIG:
+ raise RuntimeError(
+ "Too many arguments for switch script! Use "
+ "--file_args"
+ )
+ assert ret == 0, "Switch script %s returned %d" % (switch_script, ret)
- def DoBinarySearchBadItems(self):
- """Perform single iteration of binary search."""
- # If in resume mode don't reset search_cycles
- if not self.resumed:
- self.search_cycles = 0
- else:
- self.resumed = False
+ def TestScript(self):
+ """Run test script and return exit code from script."""
+ command = self.test_script
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(command)
+ return ret
- terminated = False
- while self.search_cycles < self.iterations and not terminated:
- self.SaveState()
- self.OutputIterationProgressBadItem()
+ def TestSetupScript(self):
+ """Run test setup script and return exit code from script."""
+ if not self.test_setup_script:
+ return 0
- self.search_cycles += 1
- [bad_items, good_items] = self.GetNextItems()
+ command = self.test_setup_script
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(command)
+ return ret
- with SetFile(GOOD_SET_VAR, good_items), SetFile(BAD_SET_VAR, bad_items):
- # TODO: bad_items should come first.
- self.SwitchToGood(good_items)
- self.SwitchToBad(bad_items)
- status = self.TestSetupScript()
- if status == 0:
- status = self.TestScript()
- terminated = self.binary_search.SetStatus(status)
+ def GenerateBadCommandScript(self, bad_items):
+ """Generate command line script for building bad item."""
+ assert not self.prune, "Prune must be false if pass_bisect is set."
+ assert len(bad_items) == 1, (
+ "Pruning is off, but number of bad " "items found was not 1."
+ )
+ item = list(bad_items)[0]
+ command = "%s %s" % (self.pass_bisect, item)
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ command, print_to_console=self.verbose
+ )
+ return ret
- if terminated:
- self.l.LogOutput('Terminated!', print_to_console=self.verbose)
- if not terminated:
- self.l.LogOutput('Ran out of iterations searching...')
- self.l.LogOutput(str(self), print_to_console=self.verbose)
- return terminated
+ def DoVerify(self):
+ """Verify correctness of test environment.
- def CollectPassName(self, pass_info):
- """Mapping opt-bisect output of pass info to debugcounter name."""
- self.l.LogOutput('Pass info: %s' % pass_info, print_to_console=self.verbose)
+ Verify that a "good" set of items produces a "good" result and that a "bad"
+ set of items produces a "bad" result. To be run directly before running
+ DoSearch. If verify is False this step is skipped.
+ """
+ if not self.verify:
+ return
- for desc in pass_mapping.pass_name:
- if desc in pass_info:
- return pass_mapping.pass_name[desc]
+ self.l.LogOutput("VERIFICATION")
+ self.l.LogOutput("Beginning tests to verify good/bad sets\n")
- # If pass not found, return None
- return None
+ self._OutputProgress("Verifying items from GOOD set\n")
+ with SetFile(GOOD_SET_VAR, self.all_items), SetFile(BAD_SET_VAR, []):
+ self.l.LogOutput("Resetting all items to good to verify.")
+ self.SwitchToGood(self.all_items)
+ status = self.TestSetupScript()
+ assert status == 0, "When reset_to_good, test setup should succeed."
+ status = self.TestScript()
+ assert status == 0, "When reset_to_good, status should be 0."
- def BuildWithPassLimit(self, limit, generate_ir=False):
- """Rebuild bad item with pass level bisect limit
+ self._OutputProgress("Verifying items from BAD set\n")
+ with SetFile(GOOD_SET_VAR, []), SetFile(BAD_SET_VAR, self.all_items):
+ self.l.LogOutput("Resetting all items to bad to verify.")
+ self.SwitchToBad(self.all_items)
+ status = self.TestSetupScript()
+ # The following assumption is not true; a bad image might not
+ # successfully push onto a device.
+ # assert status == 0, 'When reset_to_bad, test setup should succeed.'
+ if status == 0:
+ status = self.TestScript()
+ assert status == 1, "When reset_to_bad, status should be 1."
- Run command line script generated by GenerateBadCommandScript(), with
- pass level limit flags.
+ def DoSearchBadItems(self):
+ """Perform full search for bad items.
- Returns:
- pass_num: current number of the pass, or total number of passes if
- limit set to -1.
- pass_name: The debugcounter name of current limit pass.
- """
- os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + str(limit)
- if generate_ir:
- os.environ['LIMIT_FLAGS'] += ' -S -emit-llvm'
- self.l.LogOutput(
- 'Limit flags: %s' % os.environ['LIMIT_FLAGS'],
- print_to_console=self.verbose)
- command = self.cmd_script
- _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+ Perform full search until prune_iterations number of bad items are found.
+ """
+ while (
+ True
+ and len(self.all_items) > 1
+ and self.prune_cycles < self.prune_iterations
+ ):
+ terminated = self.DoBinarySearchBadItems()
+ self.prune_cycles += 1
+ if not terminated:
+ break
+ # Prune is set.
+ prune_index = self.binary_search.current
- # Massages we get will be like this:
- # BISECT: running pass (9) <Pass Description> on <function> (<file>)
- # BISECT: running pass (10) <Pass Description> on <module> (<file>)
- # BISECT: NOT running pass (11) <Pass Description> on <SCG> (<file>)
- # BISECT: NOT running pass (12) <Pass Description> on <SCG> (<file>)
- # We want to get the pass description of last running pass, to have
- # transformation level bisect on it.
- if 'BISECT: ' not in msg:
- raise RuntimeError('No bisect info printed, OptBisect may not be '
- 'supported by the compiler.')
+ # If found item is last item, no new items can be found
+ if prune_index == len(self.all_items) - 1:
+ self.l.LogOutput("First bad item is the last item. Breaking.")
+ self.l.LogOutput("Bad items are: %s" % self.all_items[-1])
+ self.found_items.add(self.all_items[-1])
+ break
- lines = msg.split('\n')
- pass_num = 0
- last_pass = ''
- for l in lines:
- if 'running pass' in l:
- # For situation of limit==-1, we want the total number of passes
- if limit != -1 and 'BISECT: NOT ' in l:
- break
- pass_num += 1
- last_pass = l
- if limit not in (-1, pass_num):
- raise ValueError('[Error] While building, limit number does not match.')
- return pass_num, self.CollectPassName(last_pass)
+ # If already seen item we have no new bad items to find, finish up
+ if self.all_items[prune_index] in self.found_items:
+ self.l.LogOutput(
+ "Found item already found before: %s."
+ % self.all_items[prune_index],
+ print_to_console=self.verbose,
+ )
+ self.l.LogOutput("No more bad items remaining. Done searching.")
+ self.l.LogOutput(
+ "Bad items are: %s" % " ".join(self.found_items)
+ )
+ break
- def BuildWithTransformLimit(self,
- limit,
- pass_name=None,
- pass_limit=-1,
- generate_ir=False):
- """Rebuild bad item with transformation level bisect limit
+ new_all_items = list(self.all_items)
+ # Move prune item to the end of the list.
+ new_all_items.append(new_all_items.pop(prune_index))
+ self.found_items.add(new_all_items[-1])
- Run command line script generated by GenerateBadCommandScript(), with
- pass level limit flags and transformation level limit flags.
+ # Everything below newly found bad item is now known to be a good item.
+ # Take these good items out of the equation to save time on the next
+ # search. We save these known good items so they are still sent to the
+ # switch_to_good script.
+ if prune_index:
+ self.known_good.update(new_all_items[:prune_index])
+ new_all_items = new_all_items[prune_index:]
- Args:
- limit: transformation level limit for bad item.
- pass_name: name of bad pass debugcounter from pass level bisect result.
- pass_limit: pass level limit from pass level bisect result.
- generate_ir: Whether to generate IR comparison.
+ self.l.LogOutput(
+ "Old list: %s. New list: %s"
+ % (str(self.all_items), str(new_all_items)),
+ print_to_console=self.verbose,
+ )
- Returns:
- Total number of transformations if limit set to -1, else return 0.
- """
- counter_name = pass_name
+ if not self.prune:
+ self.l.LogOutput("Not continuning further, --prune is not set")
+ break
+ # FIXME: Do we need to Convert the currently good items to bad
+ self.PopulateItemsUsingList(new_all_items)
- os.environ['LIMIT_FLAGS'] = '-mllvm -opt-bisect-limit=' + \
- str(pass_limit) + \
- ' -mllvm -debug-counter=' + counter_name + \
- '-count=' + str(limit) + \
- ' -mllvm -print-debug-counter'
- if generate_ir:
- os.environ['LIMIT_FLAGS'] += ' -S -emit-llvm'
- self.l.LogOutput(
- 'Limit flags: %s' % os.environ['LIMIT_FLAGS'],
- print_to_console=self.verbose)
- command = self.cmd_script
- _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+ # If pass level bisecting is set, generate a script which contains command
+ # line options to rebuild bad item.
+ if self.pass_bisect:
+ status = self.GenerateBadCommandScript(self.found_items)
+ if status == 0:
+ self.cmd_script = os.path.join(
+ os.path.dirname(self.pass_bisect), "cmd_script.sh"
+ )
+ self.l.LogOutput(
+ "Command script generated at %s." % self.cmd_script
+ )
+ else:
+ raise RuntimeError("Error while generating command script.")
- if 'Counters and values:' not in msg:
- # Print pass level IR diff only if transformation level bisection does
- # not work.
- if self.ir_diff:
- self.PrintIRDiff(pass_limit)
- raise RuntimeError('No bisect info printed, DebugCounter may not be '
- 'supported by the compiler.')
+ def DoBinarySearchBadItems(self):
+ """Perform single iteration of binary search."""
+ # If in resume mode don't reset search_cycles
+ if not self.resumed:
+ self.search_cycles = 0
+ else:
+ self.resumed = False
- # With debugcounter enabled, there will be DebugCounter counting info in
- # the output.
- lines = msg.split('\n')
- for l in lines:
- if pass_name in l:
- # Output of debugcounter will be like:
- # instcombine-visit: {10, 0, 20}
- # dce-transform: {1, 0, -1}
- # which indicates {Count, Skip, StopAfter}.
- # The last number should be the limit we set.
- # We want the first number as the total transformation count.
- # Split each line by ,|{|} and we can get l_list as:
- # ['instcombine: ', '10', '0', '20', '']
- # and we will need the second item in it.
- l_list = re.split(',|{|}', l)
- count = int(l_list[1])
- if limit == -1:
- return count
- # The returned value is only useful when limit == -1, which shows total
- # transformation count.
- return 0
+ terminated = False
+ while self.search_cycles < self.iterations and not terminated:
+ self.SaveState()
+ self.OutputIterationProgressBadItem()
+
+ self.search_cycles += 1
+ [bad_items, good_items] = self.GetNextItems()
+
+ with SetFile(GOOD_SET_VAR, good_items), SetFile(
+ BAD_SET_VAR, bad_items
+ ):
+ # TODO: bad_items should come first.
+ self.SwitchToGood(good_items)
+ self.SwitchToBad(bad_items)
+ status = self.TestSetupScript()
+ if status == 0:
+ status = self.TestScript()
+ terminated = self.binary_search.SetStatus(status)
+
+ if terminated:
+ self.l.LogOutput("Terminated!", print_to_console=self.verbose)
+ if not terminated:
+ self.l.LogOutput("Ran out of iterations searching...")
+ self.l.LogOutput(str(self), print_to_console=self.verbose)
+ return terminated
+
+ def CollectPassName(self, pass_info):
+ """Mapping opt-bisect output of pass info to debugcounter name."""
+ self.l.LogOutput(
+ "Pass info: %s" % pass_info, print_to_console=self.verbose
+ )
+
+ for desc in pass_mapping.pass_name:
+ if desc in pass_info:
+ return pass_mapping.pass_name[desc]
+
+ # If pass not found, return None
+ return None
+
+ def BuildWithPassLimit(self, limit, generate_ir=False):
+ """Rebuild bad item with pass level bisect limit
+
+ Run command line script generated by GenerateBadCommandScript(), with
+ pass level limit flags.
+
+ Returns:
+ pass_num: current number of the pass, or total number of passes if
+ limit set to -1.
+ pass_name: The debugcounter name of current limit pass.
+ """
+ os.environ["LIMIT_FLAGS"] = "-mllvm -opt-bisect-limit=" + str(limit)
+ if generate_ir:
+ os.environ["LIMIT_FLAGS"] += " -S -emit-llvm"
+ self.l.LogOutput(
+ "Limit flags: %s" % os.environ["LIMIT_FLAGS"],
+ print_to_console=self.verbose,
+ )
+ command = self.cmd_script
+ _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+
+ # Massages we get will be like this:
+ # BISECT: running pass (9) <Pass Description> on <function> (<file>)
+ # BISECT: running pass (10) <Pass Description> on <module> (<file>)
+ # BISECT: NOT running pass (11) <Pass Description> on <SCG> (<file>)
+ # BISECT: NOT running pass (12) <Pass Description> on <SCG> (<file>)
+ # We want to get the pass description of last running pass, to have
+ # transformation level bisect on it.
+ if "BISECT: " not in msg:
+ raise RuntimeError(
+ "No bisect info printed, OptBisect may not be "
+ "supported by the compiler."
+ )
+
+ lines = msg.split("\n")
+ pass_num = 0
+ last_pass = ""
+ for l in lines:
+ if "running pass" in l:
+ # For situation of limit==-1, we want the total number of passes
+ if limit != -1 and "BISECT: NOT " in l:
+ break
+ pass_num += 1
+ last_pass = l
+ if limit not in (-1, pass_num):
+ raise ValueError(
+ "[Error] While building, limit number does not match."
+ )
+ return pass_num, self.CollectPassName(last_pass)
+
+ def BuildWithTransformLimit(
+ self, limit, pass_name=None, pass_limit=-1, generate_ir=False
+ ):
+ """Rebuild bad item with transformation level bisect limit
+
+ Run command line script generated by GenerateBadCommandScript(), with
+ pass level limit flags and transformation level limit flags.
+
+ Args:
+ limit: transformation level limit for bad item.
+ pass_name: name of bad pass debugcounter from pass level bisect result.
+ pass_limit: pass level limit from pass level bisect result.
+ generate_ir: Whether to generate IR comparison.
+
+ Returns:
+ Total number of transformations if limit set to -1, else return 0.
+ """
+ counter_name = pass_name
+
+ os.environ["LIMIT_FLAGS"] = (
+ "-mllvm -opt-bisect-limit="
+ + str(pass_limit)
+ + " -mllvm -debug-counter="
+ + counter_name
+ + "-count="
+ + str(limit)
+ + " -mllvm -print-debug-counter"
+ )
+ if generate_ir:
+ os.environ["LIMIT_FLAGS"] += " -S -emit-llvm"
+ self.l.LogOutput(
+ "Limit flags: %s" % os.environ["LIMIT_FLAGS"],
+ print_to_console=self.verbose,
+ )
+ command = self.cmd_script
+ _, _, msg = self.ce.RunCommandWOutput(command, print_to_console=False)
+
+ if "Counters and values:" not in msg:
+ # Print pass level IR diff only if transformation level bisection does
+ # not work.
+ if self.ir_diff:
+ self.PrintIRDiff(pass_limit)
+ raise RuntimeError(
+ "No bisect info printed, DebugCounter may not be "
+ "supported by the compiler."
+ )
+
+ # With debugcounter enabled, there will be DebugCounter counting info in
+ # the output.
+ lines = msg.split("\n")
+ for l in lines:
+ if pass_name in l:
+ # Output of debugcounter will be like:
+ # instcombine-visit: {10, 0, 20}
+ # dce-transform: {1, 0, -1}
+ # which indicates {Count, Skip, StopAfter}.
+ # The last number should be the limit we set.
+ # We want the first number as the total transformation count.
+ # Split each line by ,|{|} and we can get l_list as:
+ # ['instcombine: ', '10', '0', '20', '']
+ # and we will need the second item in it.
+ l_list = re.split(",|{|}", l)
+ count = int(l_list[1])
+ if limit == -1:
+ return count
+ # The returned value is only useful when limit == -1, which shows total
+ # transformation count.
+ return 0
- def PrintIRDiff(self, pass_index, pass_name=None, trans_index=-1):
- bad_item = list(self.found_items)[0]
- self.l.LogOutput(
- 'IR difference before and after bad pass/transformation:',
- print_to_console=self.verbose)
+ def PrintIRDiff(self, pass_index, pass_name=None, trans_index=-1):
+ bad_item = list(self.found_items)[0]
+ self.l.LogOutput(
+ "IR difference before and after bad pass/transformation:",
+ print_to_console=self.verbose,
+ )
- if trans_index == -1:
- # Pass level IR diff
- self.BuildWithPassLimit(pass_index, self.ir_diff)
- good_ir = os.path.join(tempfile.tempdir, 'good.s')
- shutil.copyfile(bad_item, good_ir)
- pass_index += 1
- self.BuildWithPassLimit(pass_index, self.ir_diff)
- else:
- # Transformation level IR diff
- self.BuildWithTransformLimit(trans_index, pass_name, pass_index,
- self.ir_diff)
- good_ir = os.path.join(tempfile.tempdir, 'good.s')
- shutil.copyfile(bad_item, good_ir)
- trans_index += 1
- self.BuildWithTransformLimit(trans_index, pass_name, pass_index,
- self.ir_diff)
+ if trans_index == -1:
+ # Pass level IR diff
+ self.BuildWithPassLimit(pass_index, self.ir_diff)
+ good_ir = os.path.join(tempfile.tempdir, "good.s")
+ shutil.copyfile(bad_item, good_ir)
+ pass_index += 1
+ self.BuildWithPassLimit(pass_index, self.ir_diff)
+ else:
+ # Transformation level IR diff
+ self.BuildWithTransformLimit(
+ trans_index, pass_name, pass_index, self.ir_diff
+ )
+ good_ir = os.path.join(tempfile.tempdir, "good.s")
+ shutil.copyfile(bad_item, good_ir)
+ trans_index += 1
+ self.BuildWithTransformLimit(
+ trans_index, pass_name, pass_index, self.ir_diff
+ )
- bad_ir = os.path.join(tempfile.tempdir, 'bad.s')
- shutil.copyfile(bad_item, bad_ir)
+ bad_ir = os.path.join(tempfile.tempdir, "bad.s")
+ shutil.copyfile(bad_item, bad_ir)
- command = 'diff %s %s' % (good_ir, bad_ir)
- _, _, _ = self.ce.RunCommandWOutput(command, print_to_console=self.verbose)
+ command = "diff %s %s" % (good_ir, bad_ir)
+ _, _, _ = self.ce.RunCommandWOutput(
+ command, print_to_console=self.verbose
+ )
- def DoSearchBadPass(self):
- """Perform full search for bad pass of bad item."""
- logger.GetLogger().LogOutput('Starting to bisect bad pass for bad item.')
+ def DoSearchBadPass(self):
+ """Perform full search for bad pass of bad item."""
+ logger.GetLogger().LogOutput(
+ "Starting to bisect bad pass for bad item."
+ )
- # Pass level bisection
- self.mode = 'pass'
- self.binary_search = binary_search_perforce.BinarySearcherForPass(
- logger_to_set=self.l)
- self.binary_search.total, _ = self.BuildWithPassLimit(-1)
- logger.GetLogger().LogOutput(
- 'Total %s number: %d' % (self.mode, self.binary_search.total))
+ # Pass level bisection
+ self.mode = "pass"
+ self.binary_search = binary_search_perforce.BinarySearcherForPass(
+ logger_to_set=self.l
+ )
+ self.binary_search.total, _ = self.BuildWithPassLimit(-1)
+ logger.GetLogger().LogOutput(
+ "Total %s number: %d" % (self.mode, self.binary_search.total)
+ )
- pass_index, pass_name = self.DoBinarySearchBadPass()
+ pass_index, pass_name = self.DoBinarySearchBadPass()
- if (not pass_name and pass_index == 0):
- raise ValueError('Bisecting passes cannot reproduce good result.')
- logger.GetLogger().LogOutput('Bad pass found: %s.' % pass_name)
+ if not pass_name and pass_index == 0:
+ raise ValueError("Bisecting passes cannot reproduce good result.")
+ logger.GetLogger().LogOutput("Bad pass found: %s." % pass_name)
- # Transformation level bisection.
- logger.GetLogger().LogOutput('Starting to bisect at transformation level.')
+ # Transformation level bisection.
+ logger.GetLogger().LogOutput(
+ "Starting to bisect at transformation level."
+ )
- self.mode = 'transform'
- self.binary_search = binary_search_perforce.BinarySearcherForPass(
- logger_to_set=self.l)
- self.binary_search.total = self.BuildWithTransformLimit(
- -1, pass_name, pass_index)
- logger.GetLogger().LogOutput(
- 'Total %s number: %d' % (self.mode, self.binary_search.total))
+ self.mode = "transform"
+ self.binary_search = binary_search_perforce.BinarySearcherForPass(
+ logger_to_set=self.l
+ )
+ self.binary_search.total = self.BuildWithTransformLimit(
+ -1, pass_name, pass_index
+ )
+ logger.GetLogger().LogOutput(
+ "Total %s number: %d" % (self.mode, self.binary_search.total)
+ )
- trans_index, _ = self.DoBinarySearchBadPass(pass_index, pass_name)
- if trans_index == 0:
- raise ValueError('Bisecting %s cannot reproduce good result.' % pass_name)
+ trans_index, _ = self.DoBinarySearchBadPass(pass_index, pass_name)
+ if trans_index == 0:
+ raise ValueError(
+ "Bisecting %s cannot reproduce good result." % pass_name
+ )
- if self.ir_diff:
- self.PrintIRDiff(pass_index, pass_name, trans_index)
+ if self.ir_diff:
+ self.PrintIRDiff(pass_index, pass_name, trans_index)
- logger.GetLogger().LogOutput(
- 'Bisection result for bad item %s:\n'
- 'Bad pass: %s at number %d\n'
- 'Bad transformation number: %d' % (self.found_items, pass_name,
- pass_index, trans_index))
+ logger.GetLogger().LogOutput(
+ "Bisection result for bad item %s:\n"
+ "Bad pass: %s at number %d\n"
+ "Bad transformation number: %d"
+ % (self.found_items, pass_name, pass_index, trans_index)
+ )
- def DoBinarySearchBadPass(self, pass_index=-1, pass_name=None):
- """Perform single iteration of binary search at pass level
+ def DoBinarySearchBadPass(self, pass_index=-1, pass_name=None):
+ """Perform single iteration of binary search at pass level
- Args:
- pass_index: Works for transformation level bisection, indicates the limit
- number of pass from pass level bisecting result.
- pass_name: Works for transformation level bisection, indicates
- DebugCounter name of the bad pass from pass level bisecting
- result.
+ Args:
+ pass_index: Works for transformation level bisection, indicates the limit
+ number of pass from pass level bisecting result.
+ pass_name: Works for transformation level bisection, indicates
+ DebugCounter name of the bad pass from pass level bisecting
+ result.
- Returns:
- index: Index of problematic pass/transformation.
- pass_name: Works for pass level bisection, returns DebugCounter name for
- bad pass.
- """
- # If in resume mode don't reset search_cycles
- if not self.resumed:
- self.search_cycles = 0
- else:
- self.resumed = False
+ Returns:
+ index: Index of problematic pass/transformation.
+ pass_name: Works for pass level bisection, returns DebugCounter name for
+ bad pass.
+ """
+ # If in resume mode don't reset search_cycles
+ if not self.resumed:
+ self.search_cycles = 0
+ else:
+ self.resumed = False
- terminated = False
- index = 0
- while self.search_cycles < self.iterations and not terminated:
- self.SaveState()
- self.OutputIterationProgressBadPass()
+ terminated = False
+ index = 0
+ while self.search_cycles < self.iterations and not terminated:
+ self.SaveState()
+ self.OutputIterationProgressBadPass()
- self.search_cycles += 1
- current = self.binary_search.GetNext()
+ self.search_cycles += 1
+ current = self.binary_search.GetNext()
- if self.mode == 'pass':
- index, pass_name = self.BuildWithPassLimit(current)
- else:
- self.BuildWithTransformLimit(current, pass_name, pass_index)
- index = current
+ if self.mode == "pass":
+ index, pass_name = self.BuildWithPassLimit(current)
+ else:
+ self.BuildWithTransformLimit(current, pass_name, pass_index)
+ index = current
- # TODO: Newly generated object should not directly replace original
- # one, need to put it somewhere and symbol link original one to it.
- # Will update cmd_script to do it.
+ # TODO: Newly generated object should not directly replace original
+ # one, need to put it somewhere and symbol link original one to it.
+ # Will update cmd_script to do it.
- status = self.TestSetupScript()
- assert status == 0, 'Test setup should succeed.'
- status = self.TestScript()
- terminated = self.binary_search.SetStatus(status)
+ status = self.TestSetupScript()
+ assert status == 0, "Test setup should succeed."
+ status = self.TestScript()
+ terminated = self.binary_search.SetStatus(status)
- if terminated:
- self.l.LogOutput('Terminated!', print_to_console=self.verbose)
- if not terminated:
- self.l.LogOutput('Ran out of iterations searching...')
- self.l.LogOutput(str(self), print_to_console=self.verbose)
- return index, pass_name
+ if terminated:
+ self.l.LogOutput("Terminated!", print_to_console=self.verbose)
+ if not terminated:
+ self.l.LogOutput("Ran out of iterations searching...")
+ self.l.LogOutput(str(self), print_to_console=self.verbose)
+ return index, pass_name
- def PopulateItemsUsingCommand(self, command):
- """Update all_items and binary search logic from executable.
+ def PopulateItemsUsingCommand(self, command):
+ """Update all_items and binary search logic from executable.
- This method is mainly required for enumerating the initial list of items
- from the get_initial_items script.
+ This method is mainly required for enumerating the initial list of items
+ from the get_initial_items script.
- Args:
- command: path to executable that will enumerate items.
- """
- ce = command_executer.GetCommandExecuter()
- _, out, _ = ce.RunCommandWExceptionCleanup(
- command, return_output=True, print_to_console=self.verbose)
- all_items = out.split()
- self.PopulateItemsUsingList(all_items)
+ Args:
+ command: path to executable that will enumerate items.
+ """
+ ce = command_executer.GetCommandExecuter()
+ _, out, _ = ce.RunCommandWExceptionCleanup(
+ command, return_output=True, print_to_console=self.verbose
+ )
+ all_items = out.split()
+ self.PopulateItemsUsingList(all_items)
- def PopulateItemsUsingList(self, all_items):
- """Update all_items and binary searching logic from list.
+ def PopulateItemsUsingList(self, all_items):
+ """Update all_items and binary searching logic from list.
- Args:
- all_items: new list of all_items
- """
- self.all_items = all_items
- self.binary_search = binary_search_perforce.BinarySearcher(
- logger_to_set=self.l)
- self.binary_search.SetSortedList(self.all_items)
+ Args:
+ all_items: new list of all_items
+ """
+ self.all_items = all_items
+ self.binary_search = binary_search_perforce.BinarySearcher(
+ logger_to_set=self.l
+ )
+ self.binary_search.SetSortedList(self.all_items)
- def SaveState(self):
- """Save state to STATE_FILE.
+ def SaveState(self):
+ """Save state to STATE_FILE.
- SaveState will create a new unique, hidden state file to hold data from
- object. Then atomically overwrite the STATE_FILE symlink to point to the
- new data.
+ SaveState will create a new unique, hidden state file to hold data from
+ object. Then atomically overwrite the STATE_FILE symlink to point to the
+ new data.
- Raises:
- OSError if STATE_FILE already exists but is not a symlink.
- """
- ce, l = self.ce, self.l
- self.ce, self.l, self.binary_search.logger = None, None, None
- old_state = None
+ Raises:
+ OSError if STATE_FILE already exists but is not a symlink.
+ """
+ ce, l = self.ce, self.l
+ self.ce, self.l, self.binary_search.logger = None, None, None
+ old_state = None
- _, path = tempfile.mkstemp(prefix=HIDDEN_STATE_FILE, dir='.')
- with open(path, 'wb') as f:
- pickle.dump(self, f)
+ _, path = tempfile.mkstemp(prefix=HIDDEN_STATE_FILE, dir=".")
+ with open(path, "wb") as f:
+ pickle.dump(self, f)
- if os.path.exists(STATE_FILE):
- if os.path.islink(STATE_FILE):
- old_state = os.readlink(STATE_FILE)
- else:
- raise OSError(('%s already exists and is not a symlink!\n'
- 'State file saved to %s' % (STATE_FILE, path)))
+ if os.path.exists(STATE_FILE):
+ if os.path.islink(STATE_FILE):
+ old_state = os.readlink(STATE_FILE)
+ else:
+ raise OSError(
+ (
+ "%s already exists and is not a symlink!\n"
+ "State file saved to %s" % (STATE_FILE, path)
+ )
+ )
- # Create new link and atomically overwrite old link
- temp_link = '%s.link' % HIDDEN_STATE_FILE
- os.symlink(path, temp_link)
- os.rename(temp_link, STATE_FILE)
+ # Create new link and atomically overwrite old link
+ temp_link = "%s.link" % HIDDEN_STATE_FILE
+ os.symlink(path, temp_link)
+ os.rename(temp_link, STATE_FILE)
- if old_state:
- os.remove(old_state)
+ if old_state:
+ os.remove(old_state)
- self.ce, self.l, self.binary_search.logger = ce, l, l
+ self.ce, self.l, self.binary_search.logger = ce, l, l
- @classmethod
- def LoadState(cls):
- """Create BinarySearchState object from STATE_FILE."""
- if not os.path.isfile(STATE_FILE):
- return None
- try:
- with open(STATE_FILE, 'rb') as f:
- bss = pickle.load(f)
- bss.l = logger.GetLogger()
- bss.ce = command_executer.GetCommandExecuter()
- bss.binary_search.logger = bss.l
- bss.start_time = time.time()
+ @classmethod
+ def LoadState(cls):
+ """Create BinarySearchState object from STATE_FILE."""
+ if not os.path.isfile(STATE_FILE):
+ return None
+ try:
+ with open(STATE_FILE, "rb") as f:
+ bss = pickle.load(f)
+ bss.l = logger.GetLogger()
+ bss.ce = command_executer.GetCommandExecuter()
+ bss.binary_search.logger = bss.l
+ bss.start_time = time.time()
- # Set resumed to be True so we can enter DoBinarySearch without the
- # method resetting our current search_cycles to 0.
- bss.resumed = True
+ # Set resumed to be True so we can enter DoBinarySearch without the
+ # method resetting our current search_cycles to 0.
+ bss.resumed = True
- # Set currently_good_items and currently_bad_items to empty so that the
- # first iteration after resuming will always be non-incremental. This
- # is just in case the environment changes, the user makes manual
- # changes, or a previous switch_script corrupted the environment.
- bss.currently_good_items = set()
- bss.currently_bad_items = set()
+ # Set currently_good_items and currently_bad_items to empty so that the
+ # first iteration after resuming will always be non-incremental. This
+ # is just in case the environment changes, the user makes manual
+ # changes, or a previous switch_script corrupted the environment.
+ bss.currently_good_items = set()
+ bss.currently_bad_items = set()
- binary_search_perforce.verbose = bss.verbose
- return bss
- except Exception:
- return None
+ binary_search_perforce.verbose = bss.verbose
+ return bss
+ except Exception:
+ return None
- def RemoveState(self):
- """Remove STATE_FILE and its symlinked data from file system."""
- if os.path.exists(STATE_FILE):
- if os.path.islink(STATE_FILE):
- real_file = os.readlink(STATE_FILE)
- os.remove(real_file)
- os.remove(STATE_FILE)
+ def RemoveState(self):
+ """Remove STATE_FILE and its symlinked data from file system."""
+ if os.path.exists(STATE_FILE):
+ if os.path.islink(STATE_FILE):
+ real_file = os.readlink(STATE_FILE)
+ os.remove(real_file)
+ os.remove(STATE_FILE)
- def GetNextItems(self):
- """Get next items for binary search based on result of the last test run."""
- border_item = self.binary_search.GetNext()
- index = self.all_items.index(border_item)
+ def GetNextItems(self):
+ """Get next items for binary search based on result of the last test run."""
+ border_item = self.binary_search.GetNext()
+ index = self.all_items.index(border_item)
- next_bad_items = self.all_items[:index + 1]
- next_good_items = self.all_items[index + 1:] + list(self.known_good)
+ next_bad_items = self.all_items[: index + 1]
+ next_good_items = self.all_items[index + 1 :] + list(self.known_good)
- return [next_bad_items, next_good_items]
+ return [next_bad_items, next_good_items]
- def ElapsedTimeString(self):
- """Return h m s format of elapsed time since execution has started."""
- diff = int(time.time() - self.start_time)
- seconds = diff % 60
- minutes = (diff // 60) % 60
- hours = diff // (60 * 60)
+ def ElapsedTimeString(self):
+ """Return h m s format of elapsed time since execution has started."""
+ diff = int(time.time() - self.start_time)
+ seconds = diff % 60
+ minutes = (diff // 60) % 60
+ hours = diff // (60 * 60)
- seconds = str(seconds).rjust(2)
- minutes = str(minutes).rjust(2)
- hours = str(hours).rjust(2)
+ seconds = str(seconds).rjust(2)
+ minutes = str(minutes).rjust(2)
+ hours = str(hours).rjust(2)
- return '%sh %sm %ss' % (hours, minutes, seconds)
+ return "%sh %sm %ss" % (hours, minutes, seconds)
- def _OutputProgress(self, progress_text):
- """Output current progress of binary search to console and logs.
+ def _OutputProgress(self, progress_text):
+ """Output current progress of binary search to console and logs.
- Args:
- progress_text: The progress to display to the user.
- """
- progress = ('\n***** PROGRESS (elapsed time: %s) *****\n'
- '%s'
- '************************************************')
- progress = progress % (self.ElapsedTimeString(), progress_text)
- self.l.LogOutput(progress)
+ Args:
+ progress_text: The progress to display to the user.
+ """
+ progress = (
+ "\n***** PROGRESS (elapsed time: %s) *****\n"
+ "%s"
+ "************************************************"
+ )
+ progress = progress % (self.ElapsedTimeString(), progress_text)
+ self.l.LogOutput(progress)
- def OutputIterationProgressBadItem(self):
- out = ('Search %d of estimated %d.\n'
- 'Prune %d of max %d.\n'
- 'Current bad items found:\n'
- '%s\n')
- out = out % (self.search_cycles + 1,
- math.ceil(math.log(len(self.all_items), 2)), self.prune_cycles
- + 1, self.prune_iterations, ', '.join(self.found_items))
- self._OutputProgress(out)
+ def OutputIterationProgressBadItem(self):
+ out = (
+ "Search %d of estimated %d.\n"
+ "Prune %d of max %d.\n"
+ "Current bad items found:\n"
+ "%s\n"
+ )
+ out = out % (
+ self.search_cycles + 1,
+ math.ceil(math.log(len(self.all_items), 2)),
+ self.prune_cycles + 1,
+ self.prune_iterations,
+ ", ".join(self.found_items),
+ )
+ self._OutputProgress(out)
- def OutputIterationProgressBadPass(self):
- out = ('Search %d of estimated %d.\n' 'Current limit: %s\n')
- out = out % (self.search_cycles + 1,
- math.ceil(math.log(self.binary_search.total, 2)),
- self.binary_search.current)
- self._OutputProgress(out)
+ def OutputIterationProgressBadPass(self):
+ out = "Search %d of estimated %d.\n" "Current limit: %s\n"
+ out = out % (
+ self.search_cycles + 1,
+ math.ceil(math.log(self.binary_search.total, 2)),
+ self.binary_search.current,
+ )
+ self._OutputProgress(out)
- def __str__(self):
- ret = ''
- ret += 'all: %s\n' % str(self.all_items)
- ret += 'currently_good: %s\n' % str(self.currently_good_items)
- ret += 'currently_bad: %s\n' % str(self.currently_bad_items)
- ret += str(self.binary_search)
- return ret
+ def __str__(self):
+ ret = ""
+ ret += "all: %s\n" % str(self.all_items)
+ ret += "currently_good: %s\n" % str(self.currently_good_items)
+ ret += "currently_bad: %s\n" % str(self.currently_bad_items)
+ ret += str(self.binary_search)
+ return ret
class MockBinarySearchState(BinarySearchState):
- """Mock class for BinarySearchState."""
+ """Mock class for BinarySearchState."""
- def __init__(self, **kwargs):
- # Initialize all arguments to None
- default_kwargs = {
- 'get_initial_items': 'echo "1"',
- 'switch_to_good': None,
- 'switch_to_bad': None,
- 'test_setup_script': None,
- 'test_script': None,
- 'incremental': True,
- 'prune': False,
- 'pass_bisect': None,
- 'ir_diff': False,
- 'iterations': 50,
- 'prune_iterations': 100,
- 'verify': True,
- 'file_args': False,
- 'verbose': False
- }
- default_kwargs.update(kwargs)
- super(MockBinarySearchState, self).__init__(**default_kwargs)
+ def __init__(self, **kwargs):
+ # Initialize all arguments to None
+ default_kwargs = {
+ "get_initial_items": 'echo "1"',
+ "switch_to_good": None,
+ "switch_to_bad": None,
+ "test_setup_script": None,
+ "test_script": None,
+ "incremental": True,
+ "prune": False,
+ "pass_bisect": None,
+ "ir_diff": False,
+ "iterations": 50,
+ "prune_iterations": 100,
+ "verify": True,
+ "file_args": False,
+ "verbose": False,
+ }
+ default_kwargs.update(kwargs)
+ super(MockBinarySearchState, self).__init__(**default_kwargs)
def _CanonicalizeScript(script_name):
- """Return canonical path to script.
+ """Return canonical path to script.
- Args:
- script_name: Relative or absolute path to script
+ Args:
+ script_name: Relative or absolute path to script
- Returns:
- Canonicalized script path
- """
- script_name = os.path.expanduser(script_name)
- if not script_name.startswith('/'):
- return os.path.join('.', script_name)
+ Returns:
+ Canonicalized script path
+ """
+ script_name = os.path.expanduser(script_name)
+ if not script_name.startswith("/"):
+ return os.path.join(".", script_name)
-def Run(get_initial_items,
- switch_to_good,
- switch_to_bad,
- test_script,
- test_setup_script=None,
- iterations=50,
- prune=False,
- pass_bisect=None,
- ir_diff=False,
- noincremental=False,
- file_args=False,
- verify=True,
- prune_iterations=100,
- verbose=False,
- resume=False):
- """Run binary search tool.
+def Run(
+ get_initial_items,
+ switch_to_good,
+ switch_to_bad,
+ test_script,
+ test_setup_script=None,
+ iterations=50,
+ prune=False,
+ pass_bisect=None,
+ ir_diff=False,
+ noincremental=False,
+ file_args=False,
+ verify=True,
+ prune_iterations=100,
+ verbose=False,
+ resume=False,
+):
+ """Run binary search tool.
- Equivalent to running through terminal.
+ Equivalent to running through terminal.
- Args:
- get_initial_items: Script to enumerate all items being binary searched
- switch_to_good: Script that will take items as input and switch them to good
- set
- switch_to_bad: Script that will take items as input and switch them to bad
- set
- test_script: Script that will determine if the current combination of good
- and bad items make a "good" or "bad" result.
- test_setup_script: Script to do necessary setup (building, compilation,
- etc.) for test_script.
- iterations: How many binary search iterations to run before exiting.
- prune: If False the binary search tool will stop when the first bad item is
- found. Otherwise then binary search tool will continue searching until all
- bad items are found (or prune_iterations is reached).
- pass_bisect: Script that takes single bad item from POPULATE_BAD and returns
- the compiler command used to generate the bad item. This will turn on
- pass/ transformation level bisection for the bad item. Requires that
- 'prune' be set to False, and needs support of `-opt-bisect-limit`(pass)
- and `-print-debug-counter`(transformation) from LLVM.
- ir_diff: Whether to print IR differences before and after bad
- pass/transformation to verbose output. Defaults to False, only works when
- pass_bisect is enabled.
- noincremental: Whether to send "diffs" of good/bad items to switch scripts.
- file_args: If True then arguments to switch scripts will be a file name
- containing a newline separated list of the items to switch.
- verify: If True, run tests to ensure initial good/bad sets actually produce
- a good/bad result.
- prune_iterations: Max number of bad items to search for.
- verbose: If True will print extra debug information to user.
- resume: If True will resume using STATE_FILE.
+ Args:
+ get_initial_items: Script to enumerate all items being binary searched
+ switch_to_good: Script that will take items as input and switch them to good
+ set
+ switch_to_bad: Script that will take items as input and switch them to bad
+ set
+ test_script: Script that will determine if the current combination of good
+ and bad items make a "good" or "bad" result.
+ test_setup_script: Script to do necessary setup (building, compilation,
+ etc.) for test_script.
+ iterations: How many binary search iterations to run before exiting.
+ prune: If False the binary search tool will stop when the first bad item is
+ found. Otherwise then binary search tool will continue searching until all
+ bad items are found (or prune_iterations is reached).
+ pass_bisect: Script that takes single bad item from POPULATE_BAD and returns
+ the compiler command used to generate the bad item. This will turn on
+ pass/ transformation level bisection for the bad item. Requires that
+ 'prune' be set to False, and needs support of `-opt-bisect-limit`(pass)
+ and `-print-debug-counter`(transformation) from LLVM.
+ ir_diff: Whether to print IR differences before and after bad
+ pass/transformation to verbose output. Defaults to False, only works when
+ pass_bisect is enabled.
+ noincremental: Whether to send "diffs" of good/bad items to switch scripts.
+ file_args: If True then arguments to switch scripts will be a file name
+ containing a newline separated list of the items to switch.
+ verify: If True, run tests to ensure initial good/bad sets actually produce
+ a good/bad result.
+ prune_iterations: Max number of bad items to search for.
+ verbose: If True will print extra debug information to user.
+ resume: If True will resume using STATE_FILE.
- Returns:
- 0 for success, error otherwise
- """
- # Notice that all the argument checks are in the Run() function rather than
- # in the Main() function. It is not common to do so but some wrappers are
- # going to call Run() directly and bypass checks in Main() function.
- if resume:
- logger.GetLogger().LogOutput('Resuming from %s' % STATE_FILE)
- bss = BinarySearchState.LoadState()
- if not bss:
- logger.GetLogger().LogOutput(
- '%s is not a valid binary_search_tool state file, cannot resume!' %
- STATE_FILE)
- return 1
- logger.GetLogger().LogOutput('Note: resuming from previous state, '
- 'ignoring given options and loading saved '
- 'options instead.')
- else:
- if not (get_initial_items and switch_to_good and switch_to_bad and
- test_script):
- logger.GetLogger().LogOutput('The following options are required: '
- '[-i, -g, -b, -t] | [-r]')
- return 1
- if pass_bisect and prune:
- logger.GetLogger().LogOutput('"--pass_bisect" only works when '
- '"--prune" is set to be False.')
- return 1
- if not pass_bisect and ir_diff:
- logger.GetLogger().LogOutput('"--ir_diff" only works when '
- '"--pass_bisect" is enabled.')
+ Returns:
+ 0 for success, error otherwise
+ """
+ # Notice that all the argument checks are in the Run() function rather than
+ # in the Main() function. It is not common to do so but some wrappers are
+ # going to call Run() directly and bypass checks in Main() function.
+ if resume:
+ logger.GetLogger().LogOutput("Resuming from %s" % STATE_FILE)
+ bss = BinarySearchState.LoadState()
+ if not bss:
+ logger.GetLogger().LogOutput(
+ "%s is not a valid binary_search_tool state file, cannot resume!"
+ % STATE_FILE
+ )
+ return 1
+ logger.GetLogger().LogOutput(
+ "Note: resuming from previous state, "
+ "ignoring given options and loading saved "
+ "options instead."
+ )
+ else:
+ if not (
+ get_initial_items
+ and switch_to_good
+ and switch_to_bad
+ and test_script
+ ):
+ logger.GetLogger().LogOutput(
+ "The following options are required: " "[-i, -g, -b, -t] | [-r]"
+ )
+ return 1
+ if pass_bisect and prune:
+ logger.GetLogger().LogOutput(
+ '"--pass_bisect" only works when '
+ '"--prune" is set to be False.'
+ )
+ return 1
+ if not pass_bisect and ir_diff:
+ logger.GetLogger().LogOutput(
+ '"--ir_diff" only works when ' '"--pass_bisect" is enabled.'
+ )
- switch_to_good = _CanonicalizeScript(switch_to_good)
- switch_to_bad = _CanonicalizeScript(switch_to_bad)
- if test_setup_script:
- test_setup_script = _CanonicalizeScript(test_setup_script)
+ switch_to_good = _CanonicalizeScript(switch_to_good)
+ switch_to_bad = _CanonicalizeScript(switch_to_bad)
+ if test_setup_script:
+ test_setup_script = _CanonicalizeScript(test_setup_script)
+ if pass_bisect:
+ pass_bisect = _CanonicalizeScript(pass_bisect)
+ test_script = _CanonicalizeScript(test_script)
+ get_initial_items = _CanonicalizeScript(get_initial_items)
+ incremental = not noincremental
+
+ binary_search_perforce.verbose = verbose
+
+ bss = BinarySearchState(
+ get_initial_items,
+ switch_to_good,
+ switch_to_bad,
+ test_setup_script,
+ test_script,
+ incremental,
+ prune,
+ pass_bisect,
+ ir_diff,
+ iterations,
+ prune_iterations,
+ verify,
+ file_args,
+ verbose,
+ )
+ bss.DoVerify()
+
+ bss.DoSearchBadItems()
if pass_bisect:
- pass_bisect = _CanonicalizeScript(pass_bisect)
- test_script = _CanonicalizeScript(test_script)
- get_initial_items = _CanonicalizeScript(get_initial_items)
- incremental = not noincremental
+ bss.DoSearchBadPass()
+ bss.RemoveState()
+ logger.GetLogger().LogOutput(
+ "Total execution time: %s" % bss.ElapsedTimeString()
+ )
- binary_search_perforce.verbose = verbose
-
- bss = BinarySearchState(get_initial_items, switch_to_good, switch_to_bad,
- test_setup_script, test_script, incremental, prune,
- pass_bisect, ir_diff, iterations, prune_iterations,
- verify, file_args, verbose)
- bss.DoVerify()
-
- bss.DoSearchBadItems()
- if pass_bisect:
- bss.DoSearchBadPass()
- bss.RemoveState()
- logger.GetLogger().LogOutput(
- 'Total execution time: %s' % bss.ElapsedTimeString())
-
- return 0
+ return 0
def Main(argv):
- """The main function."""
- # Common initializations
+ """The main function."""
+ # Common initializations
- parser = argparse.ArgumentParser()
- common.BuildArgParser(parser)
- logger.GetLogger().LogOutput(' '.join(argv))
- options = parser.parse_args(argv)
+ parser = argparse.ArgumentParser()
+ common.BuildArgParser(parser)
+ logger.GetLogger().LogOutput(" ".join(argv))
+ options = parser.parse_args(argv)
- # Get dictionary of all options
- args = vars(options)
- return Run(**args)
+ # Get dictionary of all options
+ args = vars(options)
+ return Run(**args)
-if __name__ == '__main__':
- sys.exit(Main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv[1:]))
diff --git a/binary_search_tool/bisect_driver.py b/binary_search_tool/bisect_driver.py
index ac37ad9..8feb1a3 100644
--- a/binary_search_tool/bisect_driver.py
+++ b/binary_search_tool/bisect_driver.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
@@ -19,400 +19,413 @@
https://docs.google.com/document/d/1yDgaUIa2O5w6dc3sSTe1ry-1ehKajTGJGQCbyn0fcEM
"""
-from __future__ import print_function
import contextlib
import fcntl
import os
import shutil
-import subprocess
import stat
+import subprocess
import sys
-VALID_MODES = ('POPULATE_GOOD', 'POPULATE_BAD', 'TRIAGE')
-GOOD_CACHE = 'good'
-BAD_CACHE = 'bad'
-LIST_FILE = os.path.join(GOOD_CACHE, '_LIST')
-CONTINUE_ON_MISSING = os.environ.get('BISECT_CONTINUE_ON_MISSING', None) == '1'
-CONTINUE_ON_REDUNDANCY = os.environ.get('BISECT_CONTINUE_ON_REDUNDANCY',
- None) == '1'
-WRAPPER_SAFE_MODE = os.environ.get('BISECT_WRAPPER_SAFE_MODE', None) == '1'
+VALID_MODES = ("POPULATE_GOOD", "POPULATE_BAD", "TRIAGE")
+GOOD_CACHE = "good"
+BAD_CACHE = "bad"
+LIST_FILE = os.path.join(GOOD_CACHE, "_LIST")
+
+CONTINUE_ON_MISSING = os.environ.get("BISECT_CONTINUE_ON_MISSING", None) == "1"
+CONTINUE_ON_REDUNDANCY = (
+ os.environ.get("BISECT_CONTINUE_ON_REDUNDANCY", None) == "1"
+)
+WRAPPER_SAFE_MODE = os.environ.get("BISECT_WRAPPER_SAFE_MODE", None) == "1"
class Error(Exception):
- """The general compiler wrapper error class."""
+ """The general compiler wrapper error class."""
@contextlib.contextmanager
def lock_file(path, mode):
- """Lock file and block if other process has lock on file.
+ """Lock file and block if other process has lock on file.
- Acquire exclusive lock for file. Only blocks other processes if they attempt
- to also acquire lock through this method. If only reading (modes 'r' and 'rb')
- then the lock is shared (i.e. many reads can happen concurrently, but only one
- process may write at a time).
+ Acquire exclusive lock for file. Only blocks other processes if they attempt
+ to also acquire lock through this method. If only reading (modes 'r' and 'rb')
+ then the lock is shared (i.e. many reads can happen concurrently, but only one
+ process may write at a time).
- This function is a contextmanager, meaning it's meant to be used with the
- "with" statement in Python. This is so cleanup and setup happens automatically
- and cleanly. Execution of the outer "with" statement happens at the "yield"
- statement. Execution resumes after the yield when the outer "with" statement
- ends.
+ This function is a contextmanager, meaning it's meant to be used with the
+ "with" statement in Python. This is so cleanup and setup happens automatically
+ and cleanly. Execution of the outer "with" statement happens at the "yield"
+ statement. Execution resumes after the yield when the outer "with" statement
+ ends.
- Args:
- path: path to file being locked
- mode: mode to open file with ('w', 'r', etc.)
- """
- with open(path, mode) as f:
- # Apply FD_CLOEXEC argument to fd. This ensures that the file descriptor
- # won't be leaked to any child processes.
- current_args = fcntl.fcntl(f.fileno(), fcntl.F_GETFD)
- fcntl.fcntl(f.fileno(), fcntl.F_SETFD, current_args | fcntl.FD_CLOEXEC)
+ Args:
+ path: path to file being locked
+ mode: mode to open file with ('w', 'r', etc.)
+ """
+ with open(path, mode) as f:
+ # Apply FD_CLOEXEC argument to fd. This ensures that the file descriptor
+ # won't be leaked to any child processes.
+ current_args = fcntl.fcntl(f.fileno(), fcntl.F_GETFD)
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFD, current_args | fcntl.FD_CLOEXEC)
- # Reads can share the lock as no race conditions exist. If write is needed,
- # give writing process exclusive access to the file.
- if f.mode == 'r' or f.mode == 'rb':
- lock_type = fcntl.LOCK_SH
- else:
- lock_type = fcntl.LOCK_EX
+ # Reads can share the lock as no race conditions exist. If write is needed,
+ # give writing process exclusive access to the file.
+ if f.mode == "r" or f.mode == "rb":
+ lock_type = fcntl.LOCK_SH
+ else:
+ lock_type = fcntl.LOCK_EX
- try:
- fcntl.lockf(f, lock_type)
- yield f
- f.flush()
- finally:
- fcntl.lockf(f, fcntl.LOCK_UN)
+ try:
+ fcntl.lockf(f, lock_type)
+ yield f
+ f.flush()
+ finally:
+ fcntl.lockf(f, fcntl.LOCK_UN)
def log_to_file(path, execargs, link_from=None, link_to=None):
- """Common logging function.
+ """Common logging function.
- Log current working directory, current execargs, and a from-to relationship
- between files.
- """
- with lock_file(path, 'a') as log:
- log.write('cd: %s; %s\n' % (os.getcwd(), ' '.join(execargs)))
- if link_from and link_to:
- log.write('%s -> %s\n' % (link_from, link_to))
+ Log current working directory, current execargs, and a from-to relationship
+ between files.
+ """
+ with lock_file(path, "a") as log:
+ log.write("cd: %s; %s\n" % (os.getcwd(), " ".join(execargs)))
+ if link_from and link_to:
+ log.write("%s -> %s\n" % (link_from, link_to))
def exec_and_return(execargs):
- """Execute process and return.
+ """Execute process and return.
- Execute according to execargs and return immediately. Don't inspect
- stderr or stdout.
- """
- return subprocess.call(execargs)
+ Execute according to execargs and return immediately. Don't inspect
+ stderr or stdout.
+ """
+ return subprocess.call(execargs)
def which_cache(obj_file):
- """Determine which cache an object belongs to.
+ """Determine which cache an object belongs to.
- The binary search tool creates two files for each search iteration listing
- the full set of bad objects and full set of good objects. We use this to
- determine where an object file should be linked from (good or bad).
- """
- bad_set_file = os.environ.get('BISECT_BAD_SET')
- if in_object_list(obj_file, bad_set_file):
- return BAD_CACHE
- else:
- return GOOD_CACHE
+ The binary search tool creates two files for each search iteration listing
+ the full set of bad objects and full set of good objects. We use this to
+ determine where an object file should be linked from (good or bad).
+ """
+ bad_set_file = os.environ.get("BISECT_BAD_SET")
+ if in_object_list(obj_file, bad_set_file):
+ return BAD_CACHE
+ else:
+ return GOOD_CACHE
def makedirs(path):
- """Try to create directories in path."""
- try:
- os.makedirs(path)
- except os.error:
- if not os.path.isdir(path):
- raise
+ """Try to create directories in path."""
+ try:
+ os.makedirs(path)
+ except os.error:
+ if not os.path.isdir(path):
+ raise
def get_obj_path(execargs):
- """Get the object path for the object file in the list of arguments.
+ """Get the object path for the object file in the list of arguments.
- Returns:
- Absolute object path from execution args (-o argument). If no object being
- outputted, then return empty string. -o argument is checked only if -c is
- also present.
- """
- try:
- i = execargs.index('-o')
- _ = execargs.index('-c')
- except ValueError:
- return ''
+ Returns:
+ Absolute object path from execution args (-o argument). If no object being
+ outputted, then return empty string. -o argument is checked only if -c is
+ also present.
+ """
+ try:
+ i = execargs.index("-o")
+ _ = execargs.index("-c")
+ except ValueError:
+ return ""
- obj_path = execargs[i + 1]
- # Ignore args that do not create a file.
- if obj_path in (
- '-',
- '/dev/null',
- ):
- return ''
- # Ignore files ending in .tmp.
- if obj_path.endswith(('.tmp',)):
- return ''
- # Ignore configuration files generated by Automake/Autoconf/CMake etc.
- if (obj_path.endswith('conftest.o') or
- obj_path.endswith('CMakeFiles/test.o') or
- obj_path.find('CMakeTmp') != -1 or
- os.path.abspath(obj_path).find('CMakeTmp') != -1):
- return ''
+ obj_path = execargs[i + 1]
+ # Ignore args that do not create a file.
+ if obj_path in (
+ "-",
+ "/dev/null",
+ ):
+ return ""
+ # Ignore files ending in .tmp.
+ if obj_path.endswith((".tmp",)):
+ return ""
+ # Ignore configuration files generated by Automake/Autoconf/CMake etc.
+ if (
+ obj_path.endswith("conftest.o")
+ or obj_path.endswith("CMakeFiles/test.o")
+ or obj_path.find("CMakeTmp") != -1
+ or os.path.abspath(obj_path).find("CMakeTmp") != -1
+ ):
+ return ""
- return os.path.abspath(obj_path)
+ return os.path.abspath(obj_path)
def get_dep_path(execargs):
- """Get the dep file path for the dep file in the list of arguments.
+ """Get the dep file path for the dep file in the list of arguments.
- Returns:
- Absolute path of dependency file path from execution args (-o argument). If
- no dependency being outputted then return empty string.
- """
- if '-MD' not in execargs and '-MMD' not in execargs:
- return ''
+ Returns:
+ Absolute path of dependency file path from execution args (-o argument). If
+ no dependency being outputted then return empty string.
+ """
+ if "-MD" not in execargs and "-MMD" not in execargs:
+ return ""
- # If -MF is given this is the path of the dependency file. Otherwise the
- # dependency file is the value of -o but with a .d extension
- if '-MF' in execargs:
- i = execargs.index('-MF')
- dep_path = execargs[i + 1]
- return os.path.abspath(dep_path)
+ # If -MF is given this is the path of the dependency file. Otherwise the
+ # dependency file is the value of -o but with a .d extension
+ if "-MF" in execargs:
+ i = execargs.index("-MF")
+ dep_path = execargs[i + 1]
+ return os.path.abspath(dep_path)
- full_obj_path = get_obj_path(execargs)
- if not full_obj_path:
- return ''
+ full_obj_path = get_obj_path(execargs)
+ if not full_obj_path:
+ return ""
- return full_obj_path[:-2] + '.d'
+ return full_obj_path[:-2] + ".d"
def get_dwo_path(execargs):
- """Get the dwo file path for the dwo file in the list of arguments.
+ """Get the dwo file path for the dwo file in the list of arguments.
- Returns:
- Absolute dwo file path from execution args (-gsplit-dwarf argument) If no
- dwo file being outputted then return empty string.
- """
- if '-gsplit-dwarf' not in execargs:
- return ''
+ Returns:
+ Absolute dwo file path from execution args (-gsplit-dwarf argument) If no
+ dwo file being outputted then return empty string.
+ """
+ if "-gsplit-dwarf" not in execargs:
+ return ""
- full_obj_path = get_obj_path(execargs)
- if not full_obj_path:
- return ''
+ full_obj_path = get_obj_path(execargs)
+ if not full_obj_path:
+ return ""
- return full_obj_path[:-2] + '.dwo'
+ return full_obj_path[:-2] + ".dwo"
def in_object_list(obj_name, list_filename):
- """Check if object file name exist in file with object list."""
- if not obj_name:
- return False
+ """Check if object file name exist in file with object list."""
+ if not obj_name:
+ return False
- with lock_file(list_filename, 'r') as list_file:
- for line in list_file:
- if line.strip() == obj_name:
- return True
+ with lock_file(list_filename, "r") as list_file:
+ for line in list_file:
+ if line.strip() == obj_name:
+ return True
- return False
+ return False
def get_side_effects(execargs):
- """Determine side effects generated by compiler
+ """Determine side effects generated by compiler
- Returns:
- List of paths of objects that the compiler generates as side effects.
- """
- side_effects = []
+ Returns:
+ List of paths of objects that the compiler generates as side effects.
+ """
+ side_effects = []
- # Cache dependency files
- full_dep_path = get_dep_path(execargs)
- if full_dep_path:
- side_effects.append(full_dep_path)
+ # Cache dependency files
+ full_dep_path = get_dep_path(execargs)
+ if full_dep_path:
+ side_effects.append(full_dep_path)
- # Cache dwo files
- full_dwo_path = get_dwo_path(execargs)
- if full_dwo_path:
- side_effects.append(full_dwo_path)
+ # Cache dwo files
+ full_dwo_path = get_dwo_path(execargs)
+ if full_dwo_path:
+ side_effects.append(full_dwo_path)
- return side_effects
+ return side_effects
def cache_file(execargs, bisect_dir, cache, abs_file_path):
- """Cache compiler output file (.o/.d/.dwo).
+ """Cache compiler output file (.o/.d/.dwo).
- Args:
- execargs: compiler execution arguments.
- bisect_dir: The directory where bisection caches live.
- cache: Which cache the file will be cached to (GOOD/BAD).
- abs_file_path: Absolute path to file being cached.
+ Args:
+ execargs: compiler execution arguments.
+ bisect_dir: The directory where bisection caches live.
+ cache: Which cache the file will be cached to (GOOD/BAD).
+ abs_file_path: Absolute path to file being cached.
- Returns:
- True if caching was successful, False otherwise.
- """
- # os.path.join fails with absolute paths, use + instead
- bisect_path = os.path.join(bisect_dir, cache) + abs_file_path
- bisect_path_dir = os.path.dirname(bisect_path)
- makedirs(bisect_path_dir)
- pop_log = os.path.join(bisect_dir, cache, '_POPULATE_LOG')
- log_to_file(pop_log, execargs, abs_file_path, bisect_path)
+ Returns:
+ True if caching was successful, False otherwise.
+ """
+ # os.path.join fails with absolute paths, use + instead
+ bisect_path = os.path.join(bisect_dir, cache) + abs_file_path
+ bisect_path_dir = os.path.dirname(bisect_path)
+ makedirs(bisect_path_dir)
+ pop_log = os.path.join(bisect_dir, cache, "_POPULATE_LOG")
+ log_to_file(pop_log, execargs, abs_file_path, bisect_path)
- try:
- if os.path.exists(abs_file_path):
- if os.path.exists(bisect_path):
- # File exists
- population_dir = os.path.join(bisect_dir, cache)
- with lock_file(os.path.join(population_dir, '_DUPS'),
- 'a') as dup_object_list:
- dup_object_list.write('%s\n' % abs_file_path)
- if CONTINUE_ON_REDUNDANCY:
- return True
- raise Exception(
- 'Trying to cache file %s multiple times. To avoid the error, set ' \
- 'BISECT_CONTINUE_ON_REDUNDANCY to 1. For reference, the list of ' \
- 'such files will be written to %s' % (abs_file_path, os.path.join(
- population_dir, '_DUPS')))
+ try:
+ if os.path.exists(abs_file_path):
+ if os.path.exists(bisect_path):
+ # File exists
+ population_dir = os.path.join(bisect_dir, cache)
+ with lock_file(
+ os.path.join(population_dir, "_DUPS"), "a"
+ ) as dup_object_list:
+ dup_object_list.write("%s\n" % abs_file_path)
+ if CONTINUE_ON_REDUNDANCY:
+ return True
+ raise Exception(
+ "Trying to cache file %s multiple times. To avoid the error, set "
+ "BISECT_CONTINUE_ON_REDUNDANCY to 1. For reference, the list of "
+ "such files will be written to %s"
+ % (abs_file_path, os.path.join(population_dir, "_DUPS"))
+ )
- shutil.copy2(abs_file_path, bisect_path)
- # Set cache object to be read-only so later compilations can't
- # accidentally overwrite it.
- os.chmod(bisect_path, 0o444)
- return True
- else:
- # File not found (happens when compilation fails but error code is still
- # 0)
- return False
- except Exception:
- print('Could not cache file %s' % abs_file_path, file=sys.stderr)
- raise
+ shutil.copy2(abs_file_path, bisect_path)
+ # Set cache object to be read-only so later compilations can't
+ # accidentally overwrite it.
+ os.chmod(bisect_path, 0o444)
+ return True
+ else:
+ # File not found (happens when compilation fails but error code is still
+ # 0)
+ return False
+ except Exception:
+ print("Could not cache file %s" % abs_file_path, file=sys.stderr)
+ raise
def restore_file(bisect_dir, cache, abs_file_path):
- """Restore file from cache (.o/.d/.dwo).
+ """Restore file from cache (.o/.d/.dwo).
- Args:
- bisect_dir: The directory where bisection caches live.
- cache: Which cache the file will be restored from (GOOD/BAD).
- abs_file_path: Absolute path to file being restored.
- """
- # os.path.join fails with absolute paths, use + instead
- cached_path = os.path.join(bisect_dir, cache) + abs_file_path
- if os.path.exists(cached_path):
- if os.path.exists(abs_file_path):
- os.remove(abs_file_path)
- shutil.copy2(cached_path, abs_file_path)
- # Add write permission to the restored object files as some packages
- # (such as kernels) may need write permission to delete files.
- os.chmod(abs_file_path, os.stat(abs_file_path).st_mode | stat.S_IWUSR)
- else:
- raise Error(('%s is missing from %s cache! Unsure how to proceed. Make '
- 'will now crash.' % (cache, cached_path)))
+ Args:
+ bisect_dir: The directory where bisection caches live.
+ cache: Which cache the file will be restored from (GOOD/BAD).
+ abs_file_path: Absolute path to file being restored.
+ """
+ # os.path.join fails with absolute paths, use + instead
+ cached_path = os.path.join(bisect_dir, cache) + abs_file_path
+ if os.path.exists(cached_path):
+ if os.path.exists(abs_file_path):
+ os.remove(abs_file_path)
+ shutil.copy2(cached_path, abs_file_path)
+ # Add write permission to the restored object files as some packages
+ # (such as kernels) may need write permission to delete files.
+ os.chmod(abs_file_path, os.stat(abs_file_path).st_mode | stat.S_IWUSR)
+ else:
+ raise Error(
+ (
+ "%s is missing from %s cache! Unsure how to proceed. Make "
+ "will now crash." % (cache, cached_path)
+ )
+ )
def bisect_populate(execargs, bisect_dir, population_name):
- """Add necessary information to the bisect cache for the given execution.
+ """Add necessary information to the bisect cache for the given execution.
- Extract the necessary information for bisection from the compiler
- execution arguments and put it into the bisection cache. This
- includes copying the created object file, adding the object
- file path to the cache list and keeping a log of the execution.
+ Extract the necessary information for bisection from the compiler
+ execution arguments and put it into the bisection cache. This
+ includes copying the created object file, adding the object
+ file path to the cache list and keeping a log of the execution.
- Args:
- execargs: compiler execution arguments.
- bisect_dir: bisection directory.
- population_name: name of the cache being populated (good/bad).
- """
- retval = exec_and_return(execargs)
- if retval:
+ Args:
+ execargs: compiler execution arguments.
+ bisect_dir: bisection directory.
+ population_name: name of the cache being populated (good/bad).
+ """
+ retval = exec_and_return(execargs)
+ if retval:
+ return retval
+
+ full_obj_path = get_obj_path(execargs)
+ # This is not a normal compiler call because it doesn't have a -o argument,
+ # or the -o argument has an unusable output file.
+ # It's likely that this compiler call was actually made to invoke the linker,
+ # or as part of a configuratoin test. In this case we want to simply call the
+ # compiler and return.
+ if not full_obj_path:
+ return retval
+
+ # Return if not able to cache the object file
+ if not cache_file(execargs, bisect_dir, population_name, full_obj_path):
+ return retval
+
+ population_dir = os.path.join(bisect_dir, population_name)
+ with lock_file(os.path.join(population_dir, "_LIST"), "a") as object_list:
+ object_list.write("%s\n" % full_obj_path)
+
+ for side_effect in get_side_effects(execargs):
+ _ = cache_file(execargs, bisect_dir, population_name, side_effect)
+
return retval
- full_obj_path = get_obj_path(execargs)
- # This is not a normal compiler call because it doesn't have a -o argument,
- # or the -o argument has an unusable output file.
- # It's likely that this compiler call was actually made to invoke the linker,
- # or as part of a configuratoin test. In this case we want to simply call the
- # compiler and return.
- if not full_obj_path:
- return retval
-
- # Return if not able to cache the object file
- if not cache_file(execargs, bisect_dir, population_name, full_obj_path):
- return retval
-
- population_dir = os.path.join(bisect_dir, population_name)
- with lock_file(os.path.join(population_dir, '_LIST'), 'a') as object_list:
- object_list.write('%s\n' % full_obj_path)
-
- for side_effect in get_side_effects(execargs):
- _ = cache_file(execargs, bisect_dir, population_name, side_effect)
-
- return retval
-
def bisect_triage(execargs, bisect_dir):
- """Use object object file from appropriate cache (good/bad).
+ """Use object object file from appropriate cache (good/bad).
- Given a populated bisection directory, use the object file saved
- into one of the caches (good/bad) according to what is specified
- in the good/bad sets. The good/bad sets are generated by the
- high level binary search tool. Additionally restore any possible
- side effects of compiler.
+ Given a populated bisection directory, use the object file saved
+ into one of the caches (good/bad) according to what is specified
+ in the good/bad sets. The good/bad sets are generated by the
+ high level binary search tool. Additionally restore any possible
+ side effects of compiler.
- Args:
- execargs: compiler execution arguments.
- bisect_dir: populated bisection directory.
- """
- full_obj_path = get_obj_path(execargs)
- obj_list = os.path.join(bisect_dir, LIST_FILE)
+ Args:
+ execargs: compiler execution arguments.
+ bisect_dir: populated bisection directory.
+ """
+ full_obj_path = get_obj_path(execargs)
+ obj_list = os.path.join(bisect_dir, LIST_FILE)
- # If the output isn't an object file just call compiler
- if not full_obj_path:
- return exec_and_return(execargs)
+ # If the output isn't an object file just call compiler
+ if not full_obj_path:
+ return exec_and_return(execargs)
- # If this isn't a bisected object just call compiler
- # This shouldn't happen!
- if not in_object_list(full_obj_path, obj_list):
- if CONTINUE_ON_MISSING:
- log_file = os.path.join(bisect_dir, '_MISSING_CACHED_OBJ_LOG')
- log_to_file(log_file, execargs, '? compiler', full_obj_path)
- return exec_and_return(execargs)
- else:
- raise Error(('%s is missing from cache! To ignore export '
- 'BISECT_CONTINUE_ON_MISSING=1. See documentation for more '
- 'details on this option.' % full_obj_path))
+ # If this isn't a bisected object just call compiler
+ # This shouldn't happen!
+ if not in_object_list(full_obj_path, obj_list):
+ if CONTINUE_ON_MISSING:
+ log_file = os.path.join(bisect_dir, "_MISSING_CACHED_OBJ_LOG")
+ log_to_file(log_file, execargs, "? compiler", full_obj_path)
+ return exec_and_return(execargs)
+ else:
+ raise Error(
+ (
+ "%s is missing from cache! To ignore export "
+ "BISECT_CONTINUE_ON_MISSING=1. See documentation for more "
+ "details on this option." % full_obj_path
+ )
+ )
- cache = which_cache(full_obj_path)
+ cache = which_cache(full_obj_path)
- # If using safe WRAPPER_SAFE_MODE option call compiler and overwrite the
- # result from the good/bad cache. This option is safe and covers all compiler
- # side effects, but is very slow!
- if WRAPPER_SAFE_MODE:
- retval = exec_and_return(execargs)
- if retval:
- return retval
- os.remove(full_obj_path)
- restore_file(bisect_dir, cache, full_obj_path)
- return retval
+ # If using safe WRAPPER_SAFE_MODE option call compiler and overwrite the
+ # result from the good/bad cache. This option is safe and covers all compiler
+ # side effects, but is very slow!
+ if WRAPPER_SAFE_MODE:
+ retval = exec_and_return(execargs)
+ if retval:
+ return retval
+ os.remove(full_obj_path)
+ restore_file(bisect_dir, cache, full_obj_path)
+ return retval
- # Generate compiler side effects. Trick Make into thinking compiler was
- # actually executed.
- for side_effect in get_side_effects(execargs):
- restore_file(bisect_dir, cache, side_effect)
+ # Generate compiler side effects. Trick Make into thinking compiler was
+ # actually executed.
+ for side_effect in get_side_effects(execargs):
+ restore_file(bisect_dir, cache, side_effect)
- # If generated object file happened to be pruned/cleaned by Make then link it
- # over from cache again.
- if not os.path.exists(full_obj_path):
- restore_file(bisect_dir, cache, full_obj_path)
+ # If generated object file happened to be pruned/cleaned by Make then link it
+ # over from cache again.
+ if not os.path.exists(full_obj_path):
+ restore_file(bisect_dir, cache, full_obj_path)
- return 0
+ return 0
def bisect_driver(bisect_stage, bisect_dir, execargs):
- """Call appropriate bisection stage according to value in bisect_stage."""
- if bisect_stage == 'POPULATE_GOOD':
- return bisect_populate(execargs, bisect_dir, GOOD_CACHE)
- elif bisect_stage == 'POPULATE_BAD':
- return bisect_populate(execargs, bisect_dir, BAD_CACHE)
- elif bisect_stage == 'TRIAGE':
- return bisect_triage(execargs, bisect_dir)
- else:
- raise ValueError('wrong value for BISECT_STAGE: %s' % bisect_stage)
+ """Call appropriate bisection stage according to value in bisect_stage."""
+ if bisect_stage == "POPULATE_GOOD":
+ return bisect_populate(execargs, bisect_dir, GOOD_CACHE)
+ elif bisect_stage == "POPULATE_BAD":
+ return bisect_populate(execargs, bisect_dir, BAD_CACHE)
+ elif bisect_stage == "TRIAGE":
+ return bisect_triage(execargs, bisect_dir)
+ else:
+ raise ValueError("wrong value for BISECT_STAGE: %s" % bisect_stage)
diff --git a/binary_search_tool/common.py b/binary_search_tool/common.py
index 85cd478..f616584 100644
--- a/binary_search_tool/common.py
+++ b/binary_search_tool/common.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -21,276 +21,303 @@
created so the help text is made properly.
"""
-from __future__ import print_function
import collections
import os
import sys
+
# Programatically adding utils python path to PYTHONPATH
if os.path.isabs(sys.argv[0]):
- utils_pythonpath = os.path.abspath('{0}/..'.format(
- os.path.dirname(sys.argv[0])))
+ utils_pythonpath = os.path.abspath(
+ "{0}/..".format(os.path.dirname(sys.argv[0]))
+ )
else:
- wdir = os.getcwd()
- utils_pythonpath = os.path.abspath('{0}/{1}/..'.format(
- wdir, os.path.dirname(sys.argv[0])))
+ wdir = os.getcwd()
+ utils_pythonpath = os.path.abspath(
+ "{0}/{1}/..".format(wdir, os.path.dirname(sys.argv[0]))
+ )
sys.path.append(utils_pythonpath)
class ArgumentDict(collections.OrderedDict):
- """Wrapper around OrderedDict, represents CLI arguments for program.
+ """Wrapper around OrderedDict, represents CLI arguments for program.
- AddArgument enforces the following layout:
- {
- ['-n', '--iterations'] : {
- 'dest': 'iterations',
- 'type': int,
- 'help': 'Number of iterations to try in the search.',
- 'default': 50
- }
- [arg_name1, arg_name2, ...] : {
- arg_option1 : arg_option_val1,
- ...
- },
- ...
- }
- """
- _POSSIBLE_OPTIONS = [
- 'action', 'nargs', 'const', 'default', 'type', 'choices', 'required',
- 'help', 'metavar', 'dest'
- ]
-
- def AddArgument(self, *args, **kwargs):
- """Add argument to ArgsDict, has same signature as argparse.add_argument
-
- Emulates the the argparse.add_argument method so the internal OrderedDict
- can be safely and easily populated. Each call to this method will have a 1-1
- corresponding call to argparse.add_argument once BuildArgParser is called.
-
- Args:
- *args: The names for the argument (-V, --verbose, etc.)
- **kwargs: The options for the argument, corresponds to the args of
- argparse.add_argument
-
- Returns:
- None
-
- Raises:
- TypeError: if args is empty or if option in kwargs is not a valid
- option for argparse.add_argument.
+ AddArgument enforces the following layout:
+ {
+ ['-n', '--iterations'] : {
+ 'dest': 'iterations',
+ 'type': int,
+ 'help': 'Number of iterations to try in the search.',
+ 'default': 50
+ }
+ [arg_name1, arg_name2, ...] : {
+ arg_option1 : arg_option_val1,
+ ...
+ },
+ ...
+ }
"""
- if not args:
- raise TypeError('Argument needs at least one name')
- for key in kwargs:
- if key not in self._POSSIBLE_OPTIONS:
- raise TypeError('Invalid option "%s" for argument %s' % (key, args[0]))
+ _POSSIBLE_OPTIONS = [
+ "action",
+ "nargs",
+ "const",
+ "default",
+ "type",
+ "choices",
+ "required",
+ "help",
+ "metavar",
+ "dest",
+ ]
- self[args] = kwargs
+ def AddArgument(self, *args, **kwargs):
+ """Add argument to ArgsDict, has same signature as argparse.add_argument
+
+ Emulates the the argparse.add_argument method so the internal OrderedDict
+ can be safely and easily populated. Each call to this method will have a 1-1
+ corresponding call to argparse.add_argument once BuildArgParser is called.
+
+ Args:
+ *args: The names for the argument (-V, --verbose, etc.)
+ **kwargs: The options for the argument, corresponds to the args of
+ argparse.add_argument
+
+ Returns:
+ None
+
+ Raises:
+ TypeError: if args is empty or if option in kwargs is not a valid
+ option for argparse.add_argument.
+ """
+ if not args:
+ raise TypeError("Argument needs at least one name")
+
+ for key in kwargs:
+ if key not in self._POSSIBLE_OPTIONS:
+ raise TypeError(
+ 'Invalid option "%s" for argument %s' % (key, args[0])
+ )
+
+ self[args] = kwargs
_ArgsDict = ArgumentDict()
def GetArgsDict():
- """_ArgsDict singleton method"""
- if not _ArgsDict:
- _BuildArgsDict(_ArgsDict)
- return _ArgsDict
+ """_ArgsDict singleton method"""
+ if not _ArgsDict:
+ _BuildArgsDict(_ArgsDict)
+ return _ArgsDict
def BuildArgParser(parser, override=False):
- """Add all arguments from singleton ArgsDict to parser.
+ """Add all arguments from singleton ArgsDict to parser.
- Will take argparse parser and add all arguments in ArgsDict. Will ignore
- the default and required options if override is set to True.
+ Will take argparse parser and add all arguments in ArgsDict. Will ignore
+ the default and required options if override is set to True.
- Args:
- parser: type argparse.ArgumentParser, will call add_argument for every item
- in _ArgsDict
- override: True if being called from run_bisect.py. Used to say that default
- and required options are to be ignored
+ Args:
+ parser: type argparse.ArgumentParser, will call add_argument for every item
+ in _ArgsDict
+ override: True if being called from run_bisect.py. Used to say that default
+ and required options are to be ignored
- Returns:
- None
- """
- ArgsDict = GetArgsDict()
+ Returns:
+ None
+ """
+ ArgsDict = GetArgsDict()
- # Have no defaults when overriding
- for arg_names, arg_options in ArgsDict.items():
- if override:
- arg_options = arg_options.copy()
- arg_options.pop('default', None)
- arg_options.pop('required', None)
+ # Have no defaults when overriding
+ for arg_names, arg_options in ArgsDict.items():
+ if override:
+ arg_options = arg_options.copy()
+ arg_options.pop("default", None)
+ arg_options.pop("required", None)
- parser.add_argument(*arg_names, **arg_options)
+ parser.add_argument(*arg_names, **arg_options)
def StrToBool(str_in):
- if str_in.lower() in ['true', 't', '1']:
- return True
- if str_in.lower() in ['false', 'f', '0']:
- return False
+ if str_in.lower() in ["true", "t", "1"]:
+ return True
+ if str_in.lower() in ["false", "f", "0"]:
+ return False
- raise AttributeError('%s is not a valid boolean string' % str_in)
+ raise AttributeError("%s is not a valid boolean string" % str_in)
def _BuildArgsDict(args):
- """Populate ArgumentDict with all arguments"""
- args.AddArgument(
- '-n',
- '--iterations',
- dest='iterations',
- type=int,
- help='Number of iterations to try in the search.',
- default=50)
- args.AddArgument(
- '-i',
- '--get_initial_items',
- dest='get_initial_items',
- help='Script to run to get the initial objects. '
- 'If your script requires user input '
- 'the --verbose option must be used')
- args.AddArgument(
- '-g',
- '--switch_to_good',
- dest='switch_to_good',
- help='Script to run to switch to good. '
- 'If your switch script requires user input '
- 'the --verbose option must be used')
- args.AddArgument(
- '-b',
- '--switch_to_bad',
- dest='switch_to_bad',
- help='Script to run to switch to bad. '
- 'If your switch script requires user input '
- 'the --verbose option must be used')
- args.AddArgument(
- '-I',
- '--test_setup_script',
- dest='test_setup_script',
- help='Optional script to perform building, flashing, '
- 'and other setup before the test script runs.')
- args.AddArgument(
- '-t',
- '--test_script',
- dest='test_script',
- help='Script to run to test the '
- 'output after packages are built.')
- # No input (evals to False),
- # --prune (evals to True),
- # --prune=False,
- # --prune=True
- args.AddArgument(
- '-p',
- '--prune',
- dest='prune',
- nargs='?',
- const=True,
- default=False,
- type=StrToBool,
- metavar='bool',
- help='If True, continue until all bad items are found. '
- 'Defaults to False.')
- args.AddArgument(
- '-P',
- '--pass_bisect',
- dest='pass_bisect',
- default=None,
- help='Script to generate another script for pass level bisect, '
- 'which contains command line options to build bad item. '
- 'This will also turn on pass/transformation level bisection. '
- 'Needs support of `-opt-bisect-limit`(pass) and '
- '`-print-debug-counter`(transformation) from LLVM. '
- 'For now it only supports one single bad item, so to use it, '
- 'prune must be set to False.')
- # No input (evals to False),
- # --ir_diff (evals to True),
- # --ir_diff=False,
- # --ir_diff=True
- args.AddArgument(
- '-d',
- '--ir_diff',
- dest='ir_diff',
- nargs='?',
- const=True,
- default=False,
- type=StrToBool,
- metavar='bool',
- help='Whether to print IR differences before and after bad '
- 'pass/transformation to verbose output. Defaults to False, '
- 'only works when pass_bisect is enabled.')
- # No input (evals to False),
- # --noincremental (evals to True),
- # --noincremental=False,
- # --noincremental=True
- args.AddArgument(
- '-c',
- '--noincremental',
- dest='noincremental',
- nargs='?',
- const=True,
- default=False,
- type=StrToBool,
- metavar='bool',
- help="If True, don't propagate good/bad changes "
- 'incrementally. Defaults to False.')
- # No input (evals to False),
- # --file_args (evals to True),
- # --file_args=False,
- # --file_args=True
- args.AddArgument(
- '-f',
- '--file_args',
- dest='file_args',
- nargs='?',
- const=True,
- default=False,
- type=StrToBool,
- metavar='bool',
- help='Whether to use a file to pass arguments to scripts. '
- 'Defaults to False.')
- # No input (evals to True),
- # --verify (evals to True),
- # --verify=False,
- # --verify=True
- args.AddArgument(
- '--verify',
- dest='verify',
- nargs='?',
- const=True,
- default=True,
- type=StrToBool,
- metavar='bool',
- help='Whether to run verify iterations before searching. '
- 'Defaults to True.')
- args.AddArgument(
- '-N',
- '--prune_iterations',
- dest='prune_iterations',
- type=int,
- help='Number of prune iterations to try in the search.',
- default=100)
- # No input (evals to False),
- # --verbose (evals to True),
- # --verbose=False,
- # --verbose=True
- args.AddArgument(
- '-V',
- '--verbose',
- dest='verbose',
- nargs='?',
- const=True,
- default=False,
- type=StrToBool,
- metavar='bool',
- help='If True, print full output to console.')
- args.AddArgument(
- '-r',
- '--resume',
- dest='resume',
- action='store_true',
- help='Resume bisection tool execution from state file.'
- 'Useful if the last bisection was terminated '
- 'before it could properly finish.')
+ """Populate ArgumentDict with all arguments"""
+ args.AddArgument(
+ "-n",
+ "--iterations",
+ dest="iterations",
+ type=int,
+ help="Number of iterations to try in the search.",
+ default=50,
+ )
+ args.AddArgument(
+ "-i",
+ "--get_initial_items",
+ dest="get_initial_items",
+ help="Script to run to get the initial objects. "
+ "If your script requires user input "
+ "the --verbose option must be used",
+ )
+ args.AddArgument(
+ "-g",
+ "--switch_to_good",
+ dest="switch_to_good",
+ help="Script to run to switch to good. "
+ "If your switch script requires user input "
+ "the --verbose option must be used",
+ )
+ args.AddArgument(
+ "-b",
+ "--switch_to_bad",
+ dest="switch_to_bad",
+ help="Script to run to switch to bad. "
+ "If your switch script requires user input "
+ "the --verbose option must be used",
+ )
+ args.AddArgument(
+ "-I",
+ "--test_setup_script",
+ dest="test_setup_script",
+ help="Optional script to perform building, flashing, "
+ "and other setup before the test script runs.",
+ )
+ args.AddArgument(
+ "-t",
+ "--test_script",
+ dest="test_script",
+ help="Script to run to test the " "output after packages are built.",
+ )
+ # No input (evals to False),
+ # --prune (evals to True),
+ # --prune=False,
+ # --prune=True
+ args.AddArgument(
+ "-p",
+ "--prune",
+ dest="prune",
+ nargs="?",
+ const=True,
+ default=False,
+ type=StrToBool,
+ metavar="bool",
+ help="If True, continue until all bad items are found. "
+ "Defaults to False.",
+ )
+ args.AddArgument(
+ "-P",
+ "--pass_bisect",
+ dest="pass_bisect",
+ default=None,
+ help="Script to generate another script for pass level bisect, "
+ "which contains command line options to build bad item. "
+ "This will also turn on pass/transformation level bisection. "
+ "Needs support of `-opt-bisect-limit`(pass) and "
+ "`-print-debug-counter`(transformation) from LLVM. "
+ "For now it only supports one single bad item, so to use it, "
+ "prune must be set to False.",
+ )
+ # No input (evals to False),
+ # --ir_diff (evals to True),
+ # --ir_diff=False,
+ # --ir_diff=True
+ args.AddArgument(
+ "-d",
+ "--ir_diff",
+ dest="ir_diff",
+ nargs="?",
+ const=True,
+ default=False,
+ type=StrToBool,
+ metavar="bool",
+ help="Whether to print IR differences before and after bad "
+ "pass/transformation to verbose output. Defaults to False, "
+ "only works when pass_bisect is enabled.",
+ )
+ # No input (evals to False),
+ # --noincremental (evals to True),
+ # --noincremental=False,
+ # --noincremental=True
+ args.AddArgument(
+ "-c",
+ "--noincremental",
+ dest="noincremental",
+ nargs="?",
+ const=True,
+ default=False,
+ type=StrToBool,
+ metavar="bool",
+ help="If True, don't propagate good/bad changes "
+ "incrementally. Defaults to False.",
+ )
+ # No input (evals to False),
+ # --file_args (evals to True),
+ # --file_args=False,
+ # --file_args=True
+ args.AddArgument(
+ "-f",
+ "--file_args",
+ dest="file_args",
+ nargs="?",
+ const=True,
+ default=False,
+ type=StrToBool,
+ metavar="bool",
+ help="Whether to use a file to pass arguments to scripts. "
+ "Defaults to False.",
+ )
+ # No input (evals to True),
+ # --verify (evals to True),
+ # --verify=False,
+ # --verify=True
+ args.AddArgument(
+ "--verify",
+ dest="verify",
+ nargs="?",
+ const=True,
+ default=True,
+ type=StrToBool,
+ metavar="bool",
+ help="Whether to run verify iterations before searching. "
+ "Defaults to True.",
+ )
+ args.AddArgument(
+ "-N",
+ "--prune_iterations",
+ dest="prune_iterations",
+ type=int,
+ help="Number of prune iterations to try in the search.",
+ default=100,
+ )
+ # No input (evals to False),
+ # --verbose (evals to True),
+ # --verbose=False,
+ # --verbose=True
+ args.AddArgument(
+ "-V",
+ "--verbose",
+ dest="verbose",
+ nargs="?",
+ const=True,
+ default=False,
+ type=StrToBool,
+ metavar="bool",
+ help="If True, print full output to console.",
+ )
+ args.AddArgument(
+ "-r",
+ "--resume",
+ dest="resume",
+ action="store_true",
+ help="Resume bisection tool execution from state file."
+ "Useful if the last bisection was terminated "
+ "before it could properly finish.",
+ )
diff --git a/binary_search_tool/common/boot_test.sh b/binary_search_tool/common/boot_test.sh
index 8f6d9a7..384712b 100755
--- a/binary_search_tool/common/boot_test.sh
+++ b/binary_search_tool/common/boot_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script pings the chromebook to determine if it has successfully booted.
#
diff --git a/binary_search_tool/common/hash_test.sh b/binary_search_tool/common/hash_test.sh
index 5450988..338ee02 100755
--- a/binary_search_tool/common/hash_test.sh
+++ b/binary_search_tool/common/hash_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py. It is to
# be used for testing/development of the binary search triage tool
diff --git a/binary_search_tool/common/interactive_test.sh b/binary_search_tool/common/interactive_test.sh
index 8773dd1..05d47b7 100755
--- a/binary_search_tool/common/interactive_test.sh
+++ b/binary_search_tool/common/interactive_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script pings the chromebook to determine if it successfully booted.
# It then asks the user if the image is good or not, allowing the user to
diff --git a/binary_search_tool/common/interactive_test_noping.sh b/binary_search_tool/common/interactive_test_noping.sh
index bb01b95..d4e77d7 100755
--- a/binary_search_tool/common/interactive_test_noping.sh
+++ b/binary_search_tool/common/interactive_test_noping.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script asks the user if the image is good or not, allowing the user to
# conduct whatever tests the user wishes, and waiting for a response.
diff --git a/binary_search_tool/common/test_setup.sh b/binary_search_tool/common/test_setup.sh
index 3ea7327..0645234 100755
--- a/binary_search_tool/common/test_setup.sh
+++ b/binary_search_tool/common/test_setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py
index 0fd92c6..c32826b 100755
--- a/binary_search_tool/compiler_wrapper.py
+++ b/binary_search_tool/compiler_wrapper.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -20,7 +20,6 @@
https://docs.google.com/document/d/1yDgaUIa2O5w6dc3sSTe1ry-1ehKajTGJGQCbyn0fcEM
"""
-from __future__ import print_function
import os
import shlex
@@ -28,41 +27,42 @@
from binary_search_tool import bisect_driver
-WRAPPED = '%s.real' % sys.argv[0]
-BISECT_STAGE = os.environ.get('BISECT_STAGE')
-DEFAULT_BISECT_DIR = os.path.expanduser('~/ANDROID_BISECT')
-BISECT_DIR = os.environ.get('BISECT_DIR') or DEFAULT_BISECT_DIR
+
+WRAPPED = "%s.real" % sys.argv[0]
+BISECT_STAGE = os.environ.get("BISECT_STAGE")
+DEFAULT_BISECT_DIR = os.path.expanduser("~/ANDROID_BISECT")
+BISECT_DIR = os.environ.get("BISECT_DIR") or DEFAULT_BISECT_DIR
def ProcessArgFile(arg_file):
- args = []
- # Read in entire file at once and parse as if in shell
- with open(arg_file, 'r', encoding='utf-8') as f:
- args.extend(shlex.split(f.read()))
+ args = []
+ # Read in entire file at once and parse as if in shell
+ with open(arg_file, "r", encoding="utf-8") as f:
+ args.extend(shlex.split(f.read()))
- return args
+ return args
def Main(_):
- if not os.path.islink(sys.argv[0]):
- print("Compiler wrapper can't be called directly!")
- return 1
+ if not os.path.islink(sys.argv[0]):
+ print("Compiler wrapper can't be called directly!")
+ return 1
- execargs = [WRAPPED] + sys.argv[1:]
+ execargs = [WRAPPED] + sys.argv[1:]
- if BISECT_STAGE not in bisect_driver.VALID_MODES or '-o' not in execargs:
- os.execv(WRAPPED, [WRAPPED] + sys.argv[1:])
+ if BISECT_STAGE not in bisect_driver.VALID_MODES or "-o" not in execargs:
+ os.execv(WRAPPED, [WRAPPED] + sys.argv[1:])
- # Handle @file argument syntax with compiler
- for idx, _ in enumerate(execargs):
- # @file can be nested in other @file arguments, use While to re-evaluate
- # the first argument of the embedded file.
- while execargs[idx][0] == '@':
- args_in_file = ProcessArgFile(execargs[idx][1:])
- execargs = execargs[0:idx] + args_in_file + execargs[idx + 1:]
+ # Handle @file argument syntax with compiler
+ for idx, _ in enumerate(execargs):
+ # @file can be nested in other @file arguments, use While to re-evaluate
+ # the first argument of the embedded file.
+ while execargs[idx][0] == "@":
+ args_in_file = ProcessArgFile(execargs[idx][1:])
+ execargs = execargs[0:idx] + args_in_file + execargs[idx + 1 :]
- bisect_driver.bisect_driver(BISECT_STAGE, BISECT_DIR, execargs)
+ bisect_driver.bisect_driver(BISECT_STAGE, BISECT_DIR, execargs)
-if __name__ == '__main__':
- sys.exit(Main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv[1:]))
diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py
index 62ee38f..abfea5e 100755
--- a/binary_search_tool/cros_pkg/create_cleanup_script.py
+++ b/binary_search_tool/cros_pkg/create_cleanup_script.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -13,20 +13,19 @@
original state.
"""
-from __future__ import print_function
import argparse
import sys
def Usage(parser, msg):
- print('ERROR: ' + msg)
- parser.print_help()
- sys.exit(1)
+ print("ERROR: " + msg)
+ parser.print_help()
+ sys.exit(1)
def Main(argv):
- """Generate a script to undo changes done by setup.sh
+ """Generate a script to undo changes done by setup.sh
The script setup.sh makes a change that needs to be
undone, namely it creates a soft link making /build/${board} point
@@ -40,80 +39,91 @@
This function takes arguments that tell it exactly what setup.sh
actually did, then generates a script to undo those exact changes.
- """
+ """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--board',
- dest='board',
- required=True,
- help='Chromeos board for packages/image.')
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--board",
+ dest="board",
+ required=True,
+ help="Chromeos board for packages/image.",
+ )
- parser.add_argument(
- '--old_tree_missing',
- dest='tree_existed',
- action='store_false',
- help='Did /build/${BOARD} exist.',
- default=True)
+ parser.add_argument(
+ "--old_tree_missing",
+ dest="tree_existed",
+ action="store_false",
+ help="Did /build/${BOARD} exist.",
+ default=True,
+ )
- parser.add_argument(
- '--renamed_tree',
- dest='renamed_tree',
- action='store_true',
- help='Was /build/${BOARD} saved & renamed.',
- default=False)
+ parser.add_argument(
+ "--renamed_tree",
+ dest="renamed_tree",
+ action="store_true",
+ help="Was /build/${BOARD} saved & renamed.",
+ default=False,
+ )
- parser.add_argument(
- '--old_link',
- dest='old_link',
- help=('The original build tree soft link.'))
+ parser.add_argument(
+ "--old_link",
+ dest="old_link",
+ help=("The original build tree soft link."),
+ )
- options = parser.parse_args(argv[1:])
+ options = parser.parse_args(argv[1:])
- if options.old_link or options.renamed_tree:
- if not options.tree_existed:
- Usage(
- parser, 'If --tree_existed is False, cannot have '
- '--renamed_tree or --old_link')
+ if options.old_link or options.renamed_tree:
+ if not options.tree_existed:
+ Usage(
+ parser,
+ "If --tree_existed is False, cannot have "
+ "--renamed_tree or --old_link",
+ )
- if options.old_link and options.renamed_tree:
- Usage(parser, '--old_link and --renamed_tree are incompatible options.')
+ if options.old_link and options.renamed_tree:
+ Usage(parser, "--old_link and --renamed_tree are incompatible options.")
- if options.tree_existed:
- if not options.old_link and not options.renamed_tree:
- Usage(
- parser, 'If --tree_existed is True, then must have either '
- '--old_link or --renamed_tree')
-
- out_filename = 'cros_pkg/' + options.board + '_cleanup.sh'
-
- with open(out_filename, 'w', encoding='utf-8') as out_file:
- out_file.write('#!/bin/bash\n\n')
- # First, remove the 'new' soft link.
- out_file.write('sudo rm /build/%s\n' % options.board)
if options.tree_existed:
- if options.renamed_tree:
- # Old build tree existed and was a real tree, so it got
- # renamed. Move the renamed tree back to the original tree.
- out_file.write('sudo mv /build/%s.save /build/%s\n' % (options.board,
- options.board))
- else:
- # Old tree existed and was already a soft link. Re-create the
- # original soft link.
- original_link = options.old_link
- if original_link[0] == "'":
- original_link = original_link[1:]
- if original_link[-1] == "'":
- original_link = original_link[:-1]
- out_file.write(
- 'sudo ln -s %s /build/%s\n' % (original_link, options.board))
- out_file.write('\n')
- # Remove common.sh file
- out_file.write('rm common/common.sh\n')
+ if not options.old_link and not options.renamed_tree:
+ Usage(
+ parser,
+ "If --tree_existed is True, then must have either "
+ "--old_link or --renamed_tree",
+ )
- return 0
+ out_filename = "cros_pkg/" + options.board + "_cleanup.sh"
+
+ with open(out_filename, "w", encoding="utf-8") as out_file:
+ out_file.write("#!/bin/bash\n\n")
+ # First, remove the 'new' soft link.
+ out_file.write("sudo rm /build/%s\n" % options.board)
+ if options.tree_existed:
+ if options.renamed_tree:
+ # Old build tree existed and was a real tree, so it got
+ # renamed. Move the renamed tree back to the original tree.
+ out_file.write(
+ "sudo mv /build/%s.save /build/%s\n"
+ % (options.board, options.board)
+ )
+ else:
+ # Old tree existed and was already a soft link. Re-create the
+ # original soft link.
+ original_link = options.old_link
+ if original_link[0] == "'":
+ original_link = original_link[1:]
+ if original_link[-1] == "'":
+ original_link = original_link[:-1]
+ out_file.write(
+ "sudo ln -s %s /build/%s\n" % (original_link, options.board)
+ )
+ out_file.write("\n")
+ # Remove common.sh file
+ out_file.write("rm common/common.sh\n")
+
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/cros_pkg/get_initial_items.sh b/binary_search_tool/cros_pkg/get_initial_items.sh
index 49ca3d1..bc0fd2e 100755
--- a/binary_search_tool/cros_pkg/get_initial_items.sh
+++ b/binary_search_tool/cros_pkg/get_initial_items.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2015 Google Inc. All Rights Reserved.
+# Copyright 2015 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on ChromeOS packages. This script
@@ -13,4 +13,3 @@
cd ${GOOD_BUILD}/packages
find . -name "*.tbz2"
-
diff --git a/binary_search_tool/cros_pkg/setup.sh b/binary_search_tool/cros_pkg/setup.sh
index ae31fa8..30a3a42 100755
--- a/binary_search_tool/cros_pkg/setup.sh
+++ b/binary_search_tool/cros_pkg/setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2015 Google Inc. All Rights Reserved.
+# Copyright 2015 Google LLC
#
# This script is part of the ChromeOS package binary search triage process.
# It should be the first script called by the user, after the user has set up
diff --git a/binary_search_tool/cros_pkg/switch_to_bad.sh b/binary_search_tool/cros_pkg/switch_to_bad.sh
index 126425f..b4156a0 100755
--- a/binary_search_tool/cros_pkg/switch_to_bad.sh
+++ b/binary_search_tool/cros_pkg/switch_to_bad.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2015 Google Inc. All Rights Reserved.
+# Copyright 2015 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on ChromeOS packages. This script
diff --git a/binary_search_tool/cros_pkg/switch_to_good.sh b/binary_search_tool/cros_pkg/switch_to_good.sh
index a9095e9..5f7c2d7 100755
--- a/binary_search_tool/cros_pkg/switch_to_good.sh
+++ b/binary_search_tool/cros_pkg/switch_to_good.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2015 Google Inc. All Rights Reserved.
+# Copyright 2015 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on ChromeOS packages. This script
diff --git a/binary_search_tool/cros_pkg/test_setup_usb.sh b/binary_search_tool/cros_pkg/test_setup_usb.sh
index fec66f8..54d0baa 100755
--- a/binary_search_tool/cros_pkg/test_setup_usb.sh
+++ b/binary_search_tool/cros_pkg/test_setup_usb.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This is a generic ChromeOS package/image test setup script. It is meant to
# be used for the package bisection tool, in particular when there is a booting
diff --git a/binary_search_tool/ndk/DO_BISECTION.sh b/binary_search_tool/ndk/DO_BISECTION.sh
index 298d574..e6eed76 100755
--- a/binary_search_tool/ndk/DO_BISECTION.sh
+++ b/binary_search_tool/ndk/DO_BISECTION.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This is an example script to show users the steps for bisecting an NDK
# application for Android. Our example is the Teapot app that comes bundled with
diff --git a/binary_search_tool/ndk/boot_test.sh b/binary_search_tool/ndk/boot_test.sh
index b8c34aa..0b66ddf 100755
--- a/binary_search_tool/ndk/boot_test.sh
+++ b/binary_search_tool/ndk/boot_test.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script checks the android device to determine if the app is currently
# running. For our specific test case we will be checking if the Teapot app
diff --git a/binary_search_tool/ndk/get_initial_items.sh b/binary_search_tool/ndk/get_initial_items.sh
index bc2d05c..5dd3396 100755
--- a/binary_search_tool/ndk/get_initial_items.sh
+++ b/binary_search_tool/ndk/get_initial_items.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on the Android NDK apps. This script
@@ -9,4 +9,3 @@
#
cat ${BISECT_DIR}/good/_LIST
-
diff --git a/binary_search_tool/ndk/switch_to_good.sh b/binary_search_tool/ndk/switch_to_good.sh
index cb8d5fd..c98de67 100755
--- a/binary_search_tool/ndk/switch_to_good.sh
+++ b/binary_search_tool/ndk/switch_to_good.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on Android NDK apps. This script simply
@@ -43,4 +43,3 @@
cat $OBJ_LIST_FILE | xargs rm
exit 0
-
diff --git a/binary_search_tool/ndk/test_setup.sh b/binary_search_tool/ndk/test_setup.sh
index 477bcb2..8f3ce04 100755
--- a/binary_search_tool/ndk/test_setup.sh
+++ b/binary_search_tool/ndk/test_setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This is the setup script for generating and installing the ndk app.
#
diff --git a/binary_search_tool/pass_mapping.py b/binary_search_tool/pass_mapping.py
index 2678fd6..33c023a 100644
--- a/binary_search_tool/pass_mapping.py
+++ b/binary_search_tool/pass_mapping.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -15,20 +15,12 @@
# For users who make local changes to passes, please add a map from pass
# description to newly introduced DebugCounter name for transformation
# level bisection purpose.
- 'Hoist/decompose integer division and remainder':
- 'div-rem-pairs-transform',
- 'Early CSE':
- 'early-cse',
- 'Falkor HW Prefetch Fix Late Phase':
- 'falkor-hwpf',
- 'Combine redundant instructions':
- 'instcombine-visit',
- 'Machine Copy Propagation Pass':
- 'machine-cp-fwd',
- 'Global Value Numbering':
- 'newgvn-phi',
- 'PredicateInfo Printer':
- 'predicateinfo-rename',
- 'SI Insert Waitcnts':
- 'si-insert-waitcnts-forceexp',
+ "Hoist/decompose integer division and remainder": "div-rem-pairs-transform",
+ "Early CSE": "early-cse",
+ "Falkor HW Prefetch Fix Late Phase": "falkor-hwpf",
+ "Combine redundant instructions": "instcombine-visit",
+ "Machine Copy Propagation Pass": "machine-cp-fwd",
+ "Global Value Numbering": "newgvn-phi",
+ "PredicateInfo Printer": "predicateinfo-rename",
+ "SI Insert Waitcnts": "si-insert-waitcnts-forceexp",
}
diff --git a/binary_search_tool/run_bisect.py b/binary_search_tool/run_bisect.py
index 249b9cf..f54e00e 100755
--- a/binary_search_tool/run_bisect.py
+++ b/binary_search_tool/run_bisect.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unified package/object bisecting tool."""
-from __future__ import print_function
import abc
import argparse
@@ -17,293 +16,336 @@
from binary_search_tool import binary_search_state
from binary_search_tool import common
-
from cros_utils import command_executer
from cros_utils import logger
class Bisector(object, metaclass=abc.ABCMeta):
- """The abstract base class for Bisectors."""
+ """The abstract base class for Bisectors."""
- def __init__(self, options, overrides=None):
- """Constructor for Bisector abstract base class
+ def __init__(self, options, overrides=None):
+ """Constructor for Bisector abstract base class
- Args:
- options: positional arguments for specific mode (board, remote, etc.)
- overrides: optional dict of overrides for argument defaults
- """
- self.options = options
- self.overrides = overrides
- if not overrides:
- self.overrides = {}
- self.logger = logger.GetLogger()
- self.ce = command_executer.GetCommandExecuter()
+ Args:
+ options: positional arguments for specific mode (board, remote, etc.)
+ overrides: optional dict of overrides for argument defaults
+ """
+ self.options = options
+ self.overrides = overrides
+ if not overrides:
+ self.overrides = {}
+ self.logger = logger.GetLogger()
+ self.ce = command_executer.GetCommandExecuter()
- def _PrettyPrintArgs(self, args, overrides):
- """Output arguments in a nice, human readable format
+ def _PrettyPrintArgs(self, args, overrides):
+ """Output arguments in a nice, human readable format
- Will print and log all arguments for the bisecting tool and make note of
- which arguments have been overridden.
+ Will print and log all arguments for the bisecting tool and make note of
+ which arguments have been overridden.
- Example output:
- ./run_bisect.py package daisy 172.17.211.184 -I "" -t cros_pkg/my_test.sh
- Performing ChromeOS Package bisection
- Method Config:
- board : daisy
- remote : 172.17.211.184
+ Example output:
+ ./run_bisect.py package daisy 172.17.211.184 -I "" -t cros_pkg/my_test.sh
+ Performing ChromeOS Package bisection
+ Method Config:
+ board : daisy
+ remote : 172.17.211.184
- Bisection Config: (* = overridden)
- get_initial_items : cros_pkg/get_initial_items.sh
- switch_to_good : cros_pkg/switch_to_good.sh
- switch_to_bad : cros_pkg/switch_to_bad.sh
- * test_setup_script :
- * test_script : cros_pkg/my_test.sh
- prune : True
- noincremental : False
- file_args : True
+ Bisection Config: (* = overridden)
+ get_initial_items : cros_pkg/get_initial_items.sh
+ switch_to_good : cros_pkg/switch_to_good.sh
+ switch_to_bad : cros_pkg/switch_to_bad.sh
+ * test_setup_script :
+ * test_script : cros_pkg/my_test.sh
+ prune : True
+ noincremental : False
+ file_args : True
- Args:
- args: The args to be given to binary_search_state.Run. This represents
- how the bisection tool will run (with overridden arguments already
- added in).
- overrides: The dict of overriden arguments provided by the user. This is
- provided so the user can be told which arguments were
- overriden and with what value.
- """
- # Output method config (board, remote, etc.)
- options = vars(self.options)
- out = '\nPerforming %s bisection\n' % self.method_name
- out += 'Method Config:\n'
- max_key_len = max([len(str(x)) for x in options.keys()])
- for key in sorted(options):
- val = options[key]
- key_str = str(key).rjust(max_key_len)
- val_str = str(val)
- out += ' %s : %s\n' % (key_str, val_str)
+ Args:
+ args: The args to be given to binary_search_state.Run. This represents
+ how the bisection tool will run (with overridden arguments already
+ added in).
+ overrides: The dict of overriden arguments provided by the user. This is
+ provided so the user can be told which arguments were
+ overriden and with what value.
+ """
+ # Output method config (board, remote, etc.)
+ options = vars(self.options)
+ out = "\nPerforming %s bisection\n" % self.method_name
+ out += "Method Config:\n"
+ max_key_len = max([len(str(x)) for x in options.keys()])
+ for key in sorted(options):
+ val = options[key]
+ key_str = str(key).rjust(max_key_len)
+ val_str = str(val)
+ out += " %s : %s\n" % (key_str, val_str)
- # Output bisection config (scripts, prune, etc.)
- out += '\nBisection Config: (* = overridden)\n'
- max_key_len = max([len(str(x)) for x in args.keys()])
- # Print args in common._ArgsDict order
- args_order = [x['dest'] for x in common.GetArgsDict().values()]
- for key in sorted(args, key=args_order.index):
- val = args[key]
- key_str = str(key).rjust(max_key_len)
- val_str = str(val)
- changed_str = '*' if key in overrides else ' '
+ # Output bisection config (scripts, prune, etc.)
+ out += "\nBisection Config: (* = overridden)\n"
+ max_key_len = max([len(str(x)) for x in args.keys()])
+ # Print args in common._ArgsDict order
+ args_order = [x["dest"] for x in common.GetArgsDict().values()]
+ for key in sorted(args, key=args_order.index):
+ val = args[key]
+ key_str = str(key).rjust(max_key_len)
+ val_str = str(val)
+ changed_str = "*" if key in overrides else " "
- out += ' %s %s : %s\n' % (changed_str, key_str, val_str)
+ out += " %s %s : %s\n" % (changed_str, key_str, val_str)
- out += '\n'
- self.logger.LogOutput(out)
+ out += "\n"
+ self.logger.LogOutput(out)
- def ArgOverride(self, args, overrides, pretty_print=True):
- """Override arguments based on given overrides and provide nice output
+ def ArgOverride(self, args, overrides, pretty_print=True):
+ """Override arguments based on given overrides and provide nice output
- Args:
- args: dict of arguments to be passed to binary_search_state.Run (runs
- dict.update, causing args to be mutated).
- overrides: dict of arguments to update args with
- pretty_print: if True print out args/overrides to user in pretty format
- """
- args.update(overrides)
- if pretty_print:
- self._PrettyPrintArgs(args, overrides)
+ Args:
+ args: dict of arguments to be passed to binary_search_state.Run (runs
+ dict.update, causing args to be mutated).
+ overrides: dict of arguments to update args with
+ pretty_print: if True print out args/overrides to user in pretty format
+ """
+ args.update(overrides)
+ if pretty_print:
+ self._PrettyPrintArgs(args, overrides)
- @abc.abstractmethod
- def PreRun(self):
- pass
+ @abc.abstractmethod
+ def PreRun(self):
+ pass
- @abc.abstractmethod
- def Run(self):
- pass
+ @abc.abstractmethod
+ def Run(self):
+ pass
- @abc.abstractmethod
- def PostRun(self):
- pass
+ @abc.abstractmethod
+ def PostRun(self):
+ pass
class BisectPackage(Bisector):
- """The class for package bisection steps."""
+ """The class for package bisection steps."""
- cros_pkg_setup = 'cros_pkg/setup.sh'
- cros_pkg_cleanup = 'cros_pkg/%s_cleanup.sh'
+ cros_pkg_setup = "cros_pkg/setup.sh"
+ cros_pkg_cleanup = "cros_pkg/%s_cleanup.sh"
- def __init__(self, options, overrides):
- super(BisectPackage, self).__init__(options, overrides)
- self.method_name = 'ChromeOS Package'
- self.default_kwargs = {
- 'get_initial_items': 'cros_pkg/get_initial_items.sh',
- 'switch_to_good': 'cros_pkg/switch_to_good.sh',
- 'switch_to_bad': 'cros_pkg/switch_to_bad.sh',
- 'test_setup_script': 'cros_pkg/test_setup.sh',
- 'test_script': 'cros_pkg/interactive_test.sh',
- 'noincremental': False,
- 'prune': True,
- 'file_args': True
- }
- self.setup_cmd = ' '.join(
- (self.cros_pkg_setup, self.options.board, self.options.remote))
- self.ArgOverride(self.default_kwargs, self.overrides)
+ def __init__(self, options, overrides):
+ super(BisectPackage, self).__init__(options, overrides)
+ self.method_name = "ChromeOS Package"
+ self.default_kwargs = {
+ "get_initial_items": "cros_pkg/get_initial_items.sh",
+ "switch_to_good": "cros_pkg/switch_to_good.sh",
+ "switch_to_bad": "cros_pkg/switch_to_bad.sh",
+ "test_setup_script": "cros_pkg/test_setup.sh",
+ "test_script": "cros_pkg/interactive_test.sh",
+ "noincremental": False,
+ "prune": True,
+ "file_args": True,
+ }
+ self.setup_cmd = " ".join(
+ (self.cros_pkg_setup, self.options.board, self.options.remote)
+ )
+ self.ArgOverride(self.default_kwargs, self.overrides)
- def PreRun(self):
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- self.setup_cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Package bisector setup failed w/ error %d' % ret)
- return 1
- return 0
+ def PreRun(self):
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ self.setup_cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Package bisector setup failed w/ error %d" % ret
+ )
+ return 1
+ return 0
- def Run(self):
- return binary_search_state.Run(**self.default_kwargs)
+ def Run(self):
+ return binary_search_state.Run(**self.default_kwargs)
- def PostRun(self):
- cmd = self.cros_pkg_cleanup % self.options.board
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Package bisector cleanup failed w/ error %d' % ret)
- return 1
+ def PostRun(self):
+ cmd = self.cros_pkg_cleanup % self.options.board
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Package bisector cleanup failed w/ error %d" % ret
+ )
+ return 1
- self.logger.LogOutput(('Cleanup successful! To restore the bisection '
- 'environment run the following:\n'
- ' cd %s; %s') % (os.getcwd(), self.setup_cmd))
- return 0
+ self.logger.LogOutput(
+ (
+ "Cleanup successful! To restore the bisection "
+ "environment run the following:\n"
+ " cd %s; %s"
+ )
+ % (os.getcwd(), self.setup_cmd)
+ )
+ return 0
class BisectObject(Bisector):
- """The class for object bisection steps."""
+ """The class for object bisection steps."""
- sysroot_wrapper_setup = 'sysroot_wrapper/setup.sh'
- sysroot_wrapper_cleanup = 'sysroot_wrapper/cleanup.sh'
+ sysroot_wrapper_setup = "sysroot_wrapper/setup.sh"
+ sysroot_wrapper_cleanup = "sysroot_wrapper/cleanup.sh"
- def __init__(self, options, overrides):
- super(BisectObject, self).__init__(options, overrides)
- self.method_name = 'ChromeOS Object'
- self.default_kwargs = {
- 'get_initial_items': 'sysroot_wrapper/get_initial_items.sh',
- 'switch_to_good': 'sysroot_wrapper/switch_to_good.sh',
- 'switch_to_bad': 'sysroot_wrapper/switch_to_bad.sh',
- 'test_setup_script': 'sysroot_wrapper/test_setup.sh',
- 'test_script': 'sysroot_wrapper/interactive_test.sh',
- 'noincremental': False,
- 'prune': True,
- 'file_args': True
- }
- self.options = options
- if options.dir:
- os.environ['BISECT_DIR'] = options.dir
- self.options.dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
- self.setup_cmd = ' '.join(
- (self.sysroot_wrapper_setup, self.options.board, self.options.remote,
- self.options.package, str(self.options.reboot).lower(),
- shlex.quote(self.options.use_flags)))
+ def __init__(self, options, overrides):
+ super(BisectObject, self).__init__(options, overrides)
+ self.method_name = "ChromeOS Object"
+ self.default_kwargs = {
+ "get_initial_items": "sysroot_wrapper/get_initial_items.sh",
+ "switch_to_good": "sysroot_wrapper/switch_to_good.sh",
+ "switch_to_bad": "sysroot_wrapper/switch_to_bad.sh",
+ "test_setup_script": "sysroot_wrapper/test_setup.sh",
+ "test_script": "sysroot_wrapper/interactive_test.sh",
+ "noincremental": False,
+ "prune": True,
+ "file_args": True,
+ }
+ self.options = options
+ if options.dir:
+ os.environ["BISECT_DIR"] = options.dir
+ self.options.dir = os.environ.get("BISECT_DIR", "/tmp/sysroot_bisect")
+ self.setup_cmd = " ".join(
+ (
+ self.sysroot_wrapper_setup,
+ self.options.board,
+ self.options.remote,
+ self.options.package,
+ str(self.options.reboot).lower(),
+ shlex.quote(self.options.use_flags),
+ )
+ )
- self.ArgOverride(self.default_kwargs, overrides)
+ self.ArgOverride(self.default_kwargs, overrides)
- def PreRun(self):
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- self.setup_cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Object bisector setup failed w/ error %d' % ret)
- return 1
+ def PreRun(self):
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ self.setup_cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Object bisector setup failed w/ error %d" % ret
+ )
+ return 1
- os.environ['BISECT_STAGE'] = 'TRIAGE'
- return 0
+ os.environ["BISECT_STAGE"] = "TRIAGE"
+ return 0
- def Run(self):
- return binary_search_state.Run(**self.default_kwargs)
+ def Run(self):
+ return binary_search_state.Run(**self.default_kwargs)
- def PostRun(self):
- cmd = self.sysroot_wrapper_cleanup
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Object bisector cleanup failed w/ error %d' % ret)
- return 1
- self.logger.LogOutput(('Cleanup successful! To restore the bisection '
- 'environment run the following:\n'
- ' cd %s; %s') % (os.getcwd(), self.setup_cmd))
- return 0
+ def PostRun(self):
+ cmd = self.sysroot_wrapper_cleanup
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Object bisector cleanup failed w/ error %d" % ret
+ )
+ return 1
+ self.logger.LogOutput(
+ (
+ "Cleanup successful! To restore the bisection "
+ "environment run the following:\n"
+ " cd %s; %s"
+ )
+ % (os.getcwd(), self.setup_cmd)
+ )
+ return 0
class BisectAndroid(Bisector):
- """The class for Android bisection steps."""
+ """The class for Android bisection steps."""
- android_setup = 'android/setup.sh'
- android_cleanup = 'android/cleanup.sh'
- default_dir = os.path.expanduser('~/ANDROID_BISECT')
+ android_setup = "android/setup.sh"
+ android_cleanup = "android/cleanup.sh"
+ default_dir = os.path.expanduser("~/ANDROID_BISECT")
- def __init__(self, options, overrides):
- super(BisectAndroid, self).__init__(options, overrides)
- self.method_name = 'Android'
- self.default_kwargs = {
- 'get_initial_items': 'android/get_initial_items.sh',
- 'switch_to_good': 'android/switch_to_good.sh',
- 'switch_to_bad': 'android/switch_to_bad.sh',
- 'test_setup_script': 'android/test_setup.sh',
- 'test_script': 'android/interactive_test.sh',
- 'prune': True,
- 'file_args': True,
- 'noincremental': False,
- }
- self.options = options
- if options.dir:
- os.environ['BISECT_DIR'] = options.dir
- self.options.dir = os.environ.get('BISECT_DIR', self.default_dir)
+ def __init__(self, options, overrides):
+ super(BisectAndroid, self).__init__(options, overrides)
+ self.method_name = "Android"
+ self.default_kwargs = {
+ "get_initial_items": "android/get_initial_items.sh",
+ "switch_to_good": "android/switch_to_good.sh",
+ "switch_to_bad": "android/switch_to_bad.sh",
+ "test_setup_script": "android/test_setup.sh",
+ "test_script": "android/interactive_test.sh",
+ "prune": True,
+ "file_args": True,
+ "noincremental": False,
+ }
+ self.options = options
+ if options.dir:
+ os.environ["BISECT_DIR"] = options.dir
+ self.options.dir = os.environ.get("BISECT_DIR", self.default_dir)
- num_jobs = "NUM_JOBS='%s'" % self.options.num_jobs
- device_id = ''
- if self.options.device_id:
- device_id = "ANDROID_SERIAL='%s'" % self.options.device_id
+ num_jobs = "NUM_JOBS='%s'" % self.options.num_jobs
+ device_id = ""
+ if self.options.device_id:
+ device_id = "ANDROID_SERIAL='%s'" % self.options.device_id
- self.setup_cmd = ' '.join(
- (num_jobs, device_id, self.android_setup, self.options.android_src))
+ self.setup_cmd = " ".join(
+ (num_jobs, device_id, self.android_setup, self.options.android_src)
+ )
- self.ArgOverride(self.default_kwargs, overrides)
+ self.ArgOverride(self.default_kwargs, overrides)
- def PreRun(self):
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(
- self.setup_cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Android bisector setup failed w/ error %d' % ret)
- return 1
+ def PreRun(self):
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ self.setup_cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Android bisector setup failed w/ error %d" % ret
+ )
+ return 1
- os.environ['BISECT_STAGE'] = 'TRIAGE'
- return 0
+ os.environ["BISECT_STAGE"] = "TRIAGE"
+ return 0
- def Run(self):
- return binary_search_state.Run(**self.default_kwargs)
+ def Run(self):
+ return binary_search_state.Run(**self.default_kwargs)
- def PostRun(self):
- cmd = self.android_cleanup
- ret, _, _ = self.ce.RunCommandWExceptionCleanup(cmd, print_to_console=True)
- if ret:
- self.logger.LogError('Android bisector cleanup failed w/ error %d' % ret)
- return 1
- self.logger.LogOutput(('Cleanup successful! To restore the bisection '
- 'environment run the following:\n'
- ' cd %s; %s') % (os.getcwd(), self.setup_cmd))
- return 0
+ def PostRun(self):
+ cmd = self.android_cleanup
+ ret, _, _ = self.ce.RunCommandWExceptionCleanup(
+ cmd, print_to_console=True
+ )
+ if ret:
+ self.logger.LogError(
+ "Android bisector cleanup failed w/ error %d" % ret
+ )
+ return 1
+ self.logger.LogOutput(
+ (
+ "Cleanup successful! To restore the bisection "
+ "environment run the following:\n"
+ " cd %s; %s"
+ )
+ % (os.getcwd(), self.setup_cmd)
+ )
+ return 0
def Run(bisector):
- log = logger.GetLogger()
+ log = logger.GetLogger()
- log.LogOutput('Setting up Bisection tool')
- ret = bisector.PreRun()
- if ret:
- return ret
+ log.LogOutput("Setting up Bisection tool")
+ ret = bisector.PreRun()
+ if ret:
+ return ret
- log.LogOutput('Running Bisection tool')
- ret = bisector.Run()
- if ret:
- return ret
+ log.LogOutput("Running Bisection tool")
+ ret = bisector.Run()
+ if ret:
+ return ret
- log.LogOutput('Cleaning up Bisection tool')
- ret = bisector.PostRun()
- if ret:
- return ret
+ log.LogOutput("Cleaning up Bisection tool")
+ ret = bisector.PostRun()
+ if ret:
+ return ret
- return 0
+ return 0
_HELP_EPILOG = """
@@ -318,92 +360,113 @@
def Main(argv):
- override_parser = argparse.ArgumentParser(
- add_help=False,
- argument_default=argparse.SUPPRESS,
- usage='run_bisect.py {mode} [options]')
- common.BuildArgParser(override_parser, override=True)
+ override_parser = argparse.ArgumentParser(
+ add_help=False,
+ argument_default=argparse.SUPPRESS,
+ usage="run_bisect.py {mode} [options]",
+ )
+ common.BuildArgParser(override_parser, override=True)
- epilog = _HELP_EPILOG + override_parser.format_help()
- parser = argparse.ArgumentParser(
- epilog=epilog, formatter_class=RawTextHelpFormatter)
- subparsers = parser.add_subparsers(
- title='Bisect mode',
- description=('Which bisection method to '
- 'use. Each method has '
- 'specific setup and '
- 'arguments. Please consult '
- 'the README for more '
- 'information.'))
+ epilog = _HELP_EPILOG + override_parser.format_help()
+ parser = argparse.ArgumentParser(
+ epilog=epilog, formatter_class=RawTextHelpFormatter
+ )
+ subparsers = parser.add_subparsers(
+ title="Bisect mode",
+ description=(
+ "Which bisection method to "
+ "use. Each method has "
+ "specific setup and "
+ "arguments. Please consult "
+ "the README for more "
+ "information."
+ ),
+ )
- parser_package = subparsers.add_parser('package')
- parser_package.add_argument('board', help='Board to target')
- parser_package.add_argument('remote', help='Remote machine to test on')
- parser_package.set_defaults(handler=BisectPackage)
+ parser_package = subparsers.add_parser("package")
+ parser_package.add_argument("board", help="Board to target")
+ parser_package.add_argument("remote", help="Remote machine to test on")
+ parser_package.set_defaults(handler=BisectPackage)
- parser_object = subparsers.add_parser('object')
- parser_object.add_argument('board', help='Board to target')
- parser_object.add_argument('remote', help='Remote machine to test on')
- parser_object.add_argument('package', help='Package to emerge and test')
- parser_object.add_argument(
- '--use_flags',
- required=False,
- default='',
- help='Use flags passed to emerge')
- parser_object.add_argument(
- '--noreboot',
- action='store_false',
- dest='reboot',
- help='Do not reboot after updating the package (default: False)')
- parser_object.add_argument(
- '--dir',
- help=('Bisection directory to use, sets '
- '$BISECT_DIR if provided. Defaults to '
- 'current value of $BISECT_DIR (or '
- '/tmp/sysroot_bisect if $BISECT_DIR is '
- 'empty).'))
- parser_object.set_defaults(handler=BisectObject)
+ parser_object = subparsers.add_parser("object")
+ parser_object.add_argument("board", help="Board to target")
+ parser_object.add_argument("remote", help="Remote machine to test on")
+ parser_object.add_argument("package", help="Package to emerge and test")
+ parser_object.add_argument(
+ "--use_flags",
+ required=False,
+ default="",
+ help="Use flags passed to emerge",
+ )
+ parser_object.add_argument(
+ "--noreboot",
+ action="store_false",
+ dest="reboot",
+ help="Do not reboot after updating the package (default: False)",
+ )
+ parser_object.add_argument(
+ "--dir",
+ help=(
+ "Bisection directory to use, sets "
+ "$BISECT_DIR if provided. Defaults to "
+ "current value of $BISECT_DIR (or "
+ "/tmp/sysroot_bisect if $BISECT_DIR is "
+ "empty)."
+ ),
+ )
+ parser_object.set_defaults(handler=BisectObject)
- parser_android = subparsers.add_parser('android')
- parser_android.add_argument('android_src', help='Path to android source tree')
- parser_android.add_argument(
- '--dir',
- help=('Bisection directory to use, sets '
- '$BISECT_DIR if provided. Defaults to '
- 'current value of $BISECT_DIR (or '
- '~/ANDROID_BISECT/ if $BISECT_DIR is '
- 'empty).'))
- parser_android.add_argument(
- '-j',
- '--num_jobs',
- type=int,
- default=1,
- help=('Number of jobs that make and various '
- 'scripts for bisector can spawn. Setting '
- 'this value too high can freeze up your '
- 'machine!'))
- parser_android.add_argument(
- '--device_id',
- default='',
- help=('Device id for device used for testing. '
- 'Use this if you have multiple Android '
- 'devices plugged into your machine.'))
- parser_android.set_defaults(handler=BisectAndroid)
+ parser_android = subparsers.add_parser("android")
+ parser_android.add_argument(
+ "android_src", help="Path to android source tree"
+ )
+ parser_android.add_argument(
+ "--dir",
+ help=(
+ "Bisection directory to use, sets "
+ "$BISECT_DIR if provided. Defaults to "
+ "current value of $BISECT_DIR (or "
+ "~/ANDROID_BISECT/ if $BISECT_DIR is "
+ "empty)."
+ ),
+ )
+ parser_android.add_argument(
+ "-j",
+ "--num_jobs",
+ type=int,
+ default=1,
+ help=(
+ "Number of jobs that make and various "
+ "scripts for bisector can spawn. Setting "
+ "this value too high can freeze up your "
+ "machine!"
+ ),
+ )
+ parser_android.add_argument(
+ "--device_id",
+ default="",
+ help=(
+ "Device id for device used for testing. "
+ "Use this if you have multiple Android "
+ "devices plugged into your machine."
+ ),
+ )
+ parser_android.set_defaults(handler=BisectAndroid)
- options, remaining = parser.parse_known_args(argv)
- if remaining:
- overrides = override_parser.parse_args(remaining)
- overrides = vars(overrides)
- else:
- overrides = {}
+ options, remaining = parser.parse_known_args(argv)
+ if remaining:
+ overrides = override_parser.parse_args(remaining)
+ overrides = vars(overrides)
+ else:
+ overrides = {}
- subcmd = options.handler
- del options.handler
+ subcmd = options.handler
+ del options.handler
- bisector = subcmd(options, overrides)
- return Run(bisector)
+ bisector = subcmd(options, overrides)
+ return Run(bisector)
-if __name__ == '__main__':
- os.chdir(os.path.dirname(__file__))
- sys.exit(Main(sys.argv[1:]))
+if __name__ == "__main__":
+ os.chdir(os.path.dirname(__file__))
+ sys.exit(Main(sys.argv[1:]))
diff --git a/binary_search_tool/run_bisect_tests.py b/binary_search_tool/run_bisect_tests.py
index 9172d67..ca7077d 100755
--- a/binary_search_tool/run_bisect_tests.py
+++ b/binary_search_tool/run_bisect_tests.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run full bisection test."""
-from __future__ import print_function
import argparse
import os
@@ -14,81 +13,88 @@
from cros_utils import command_executer
-TEST_DIR = 'full_bisect_test'
-DEFAULT_BISECT_DIR = '/tmp/sysroot_bisect'
+
+TEST_DIR = "full_bisect_test"
+DEFAULT_BISECT_DIR = "/tmp/sysroot_bisect"
def populate_good_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR):
- # 'make clean'
- work_dir = os.path.join(top_dir, TEST_DIR, 'work')
- cmd = 'rm -f %s/*.o' % work_dir
- status = ce.RunCommand(cmd)
- if status != 0:
- print('Error trying to clean out work directory: %s' % cmd)
- return status
+ # 'make clean'
+ work_dir = os.path.join(top_dir, TEST_DIR, "work")
+ cmd = "rm -f %s/*.o" % work_dir
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ print("Error trying to clean out work directory: %s" % cmd)
+ return status
- # set up the 'good' source files
- script = os.path.join(top_dir, TEST_DIR, 'make_sources_good.sh')
- status = ce.RunCommand(script)
- if status != 0:
- print('Error setting up "good" source files: %s' % script)
- return status
+ # set up the 'good' source files
+ script = os.path.join(top_dir, TEST_DIR, "make_sources_good.sh")
+ status = ce.RunCommand(script)
+ if status != 0:
+ print('Error setting up "good" source files: %s' % script)
+ return status
- export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir
- # build the good source files
- script_path = os.path.join(top_dir, TEST_DIR)
- if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
- build_script = 'chromeos_build.sh'
- else:
- build_script = 'build.sh'
- cmd = ('%s export BISECT_STAGE=POPULATE_GOOD; pushd %s; ./%s; popd' %
- (export_bisect, script_path, build_script))
- status = ce.RunCommand(cmd)
- return status
+ export_bisect = "export BISECT_DIR=%s; " % bisect_dir
+ # build the good source files
+ script_path = os.path.join(top_dir, TEST_DIR)
+ if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"):
+ build_script = "chromeos_build.sh"
+ else:
+ build_script = "build.sh"
+ cmd = "%s export BISECT_STAGE=POPULATE_GOOD; pushd %s; ./%s; popd" % (
+ export_bisect,
+ script_path,
+ build_script,
+ )
+ status = ce.RunCommand(cmd)
+ return status
def populate_bad_files(top_dir, ce, bisect_dir=DEFAULT_BISECT_DIR):
- # 'make clean'
- work_dir = os.path.join(top_dir, TEST_DIR, 'work')
- cmd = 'rm -f %s/*.o' % work_dir
- status = ce.RunCommand(cmd)
- if status != 0:
- print('Error trying to clean out work directory: %s' % cmd)
- return status
+ # 'make clean'
+ work_dir = os.path.join(top_dir, TEST_DIR, "work")
+ cmd = "rm -f %s/*.o" % work_dir
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ print("Error trying to clean out work directory: %s" % cmd)
+ return status
- # set up the 'bad' source files
- script = os.path.join(top_dir, TEST_DIR, 'make_sources_bad.sh')
- status = ce.RunCommand(script)
- if status != 0:
- print('Error setting up "bad" source files: %s' % script)
- return status
+ # set up the 'bad' source files
+ script = os.path.join(top_dir, TEST_DIR, "make_sources_bad.sh")
+ status = ce.RunCommand(script)
+ if status != 0:
+ print('Error setting up "bad" source files: %s' % script)
+ return status
- export_bisect = 'export BISECT_DIR=%s; ' % bisect_dir
- # build the bad source files
- script_path = os.path.join(top_dir, TEST_DIR)
- if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
- build_script = 'chromeos_build.sh'
- else:
- build_script = 'build.sh'
- cmd = ('%s export BISECT_STAGE=POPULATE_BAD; pushd %s; ./%s ; popd' %
- (export_bisect, script_path, build_script))
- status = ce.RunCommand(cmd)
- return status
+ export_bisect = "export BISECT_DIR=%s; " % bisect_dir
+ # build the bad source files
+ script_path = os.path.join(top_dir, TEST_DIR)
+ if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"):
+ build_script = "chromeos_build.sh"
+ else:
+ build_script = "build.sh"
+ cmd = "%s export BISECT_STAGE=POPULATE_BAD; pushd %s; ./%s ; popd" % (
+ export_bisect,
+ script_path,
+ build_script,
+ )
+ status = ce.RunCommand(cmd)
+ return status
def run_main_bisection_test(top_dir, ce):
- test_script = os.path.join(top_dir, TEST_DIR, 'main-bisect-test.sh')
- status = ce.RunCommand(test_script)
- return status
+ test_script = os.path.join(top_dir, TEST_DIR, "main-bisect-test.sh")
+ status = ce.RunCommand(test_script)
+ return status
def verify_compiler_and_wrapper():
- # We don't need to do any special setup if running inside a ChromeOS
- # chroot.
- if os.path.exists('/usr/bin/x86_64-cros-linux-gnu-gcc'):
- return True
+ # We don't need to do any special setup if running inside a ChromeOS
+ # chroot.
+ if os.path.exists("/usr/bin/x86_64-cros-linux-gnu-gcc"):
+ return True
- message = """
+ message = """
*** IMPORTANT --- READ THIS CAREFULLY!! ***
This test uses the command 'gcc' to compile the good/bad versions of the
@@ -100,78 +106,93 @@
Is your compiler wrapper properly set up? [Y/n]
"""
- print(message)
- inp = sys.stdin.readline()
- inp = inp.strip()
- inp = inp.lower()
- return not inp or inp == 'y' or inp == 'yes'
+ print(message)
+ inp = sys.stdin.readline()
+ inp = inp.strip()
+ inp = inp.lower()
+ return not inp or inp == "y" or inp == "yes"
def Main(argv):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--dir',
- dest='directory',
- help='Bisection work tree, where good & bad object '
- 'files go. Default is /tmp/sysroot_bisect')
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--dir",
+ dest="directory",
+ help="Bisection work tree, where good & bad object "
+ "files go. Default is /tmp/sysroot_bisect",
+ )
- options = parser.parse_args(argv)
+ options = parser.parse_args(argv)
- # Make sure the compiler wrapper & soft links are properly set up.
- wrapper_is_setup = verify_compiler_and_wrapper()
- if not wrapper_is_setup:
- print('Exiting now. Please re-run after you have set up the compiler '
- 'wrapper.')
- return 0
+ # Make sure the compiler wrapper & soft links are properly set up.
+ wrapper_is_setup = verify_compiler_and_wrapper()
+ if not wrapper_is_setup:
+ print(
+ "Exiting now. Please re-run after you have set up the compiler "
+ "wrapper."
+ )
+ return 0
- # Make sure we're in the correct directory for running this test.
- cwd = os.getcwd()
- if not os.path.exists(os.path.join(cwd, 'full_bisect_test')):
- print('Error: Wrong directory. This script must be run from the top level'
- ' of the binary_search_tool tree (under toolchain_utils).')
- return 1
+ # Make sure we're in the correct directory for running this test.
+ cwd = os.getcwd()
+ if not os.path.exists(os.path.join(cwd, "full_bisect_test")):
+ print(
+ "Error: Wrong directory. This script must be run from the top level"
+ " of the binary_search_tool tree (under toolchain_utils)."
+ )
+ return 1
- ce = command_executer.GetCommandExecuter()
- bisect_dir = options.directory
- if not bisect_dir:
- bisect_dir = DEFAULT_BISECT_DIR
+ ce = command_executer.GetCommandExecuter()
+ bisect_dir = options.directory
+ if not bisect_dir:
+ bisect_dir = DEFAULT_BISECT_DIR
- # Make sure BISECT_DIR is clean
- if os.path.exists(bisect_dir):
- cmd = 'rm -Rf %s/*' % bisect_dir
- retv = ce.RunCommand(cmd)
+ # Make sure BISECT_DIR is clean
+ if os.path.exists(bisect_dir):
+ cmd = "rm -Rf %s/*" % bisect_dir
+ retv = ce.RunCommand(cmd)
+ if retv != 0:
+ return retv
+
+ retv = populate_good_files(cwd, ce, bisect_dir)
if retv != 0:
- return retv
+ return retv
- retv = populate_good_files(cwd, ce, bisect_dir)
- if retv != 0:
+ retv = populate_bad_files(cwd, ce, bisect_dir)
+ if retv != 0:
+ return retv
+
+ # Set up good/bad work soft links
+ cmd = "rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects" % (
+ cwd,
+ TEST_DIR,
+ bisect_dir,
+ cwd,
+ TEST_DIR,
+ )
+
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ print("Error executing: %s; exiting now." % cmd)
+ return status
+
+ cmd = "rm -f %s/%s/bad-objects; ln -s %s/bad %s/%s/bad-objects" % (
+ cwd,
+ TEST_DIR,
+ bisect_dir,
+ cwd,
+ TEST_DIR,
+ )
+
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ print("Error executing: %s; exiting now." % cmd)
+ return status
+
+ retv = run_main_bisection_test(cwd, ce)
return retv
- retv = populate_bad_files(cwd, ce, bisect_dir)
- if retv != 0:
- return retv
- # Set up good/bad work soft links
- cmd = ('rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects' %
- (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR))
-
- status = ce.RunCommand(cmd)
- if status != 0:
- print('Error executing: %s; exiting now.' % cmd)
- return status
-
- cmd = ('rm -f %s/%s/bad-objects; ln -s %s/bad %s/%s/bad-objects' %
- (cwd, TEST_DIR, bisect_dir, cwd, TEST_DIR))
-
- status = ce.RunCommand(cmd)
- if status != 0:
- print('Error executing: %s; exiting now.' % cmd)
- return status
-
- retv = run_main_bisection_test(cwd, ce)
- return retv
-
-
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/binary_search_tool/sysroot_wrapper/cleanup.sh b/binary_search_tool/sysroot_wrapper/cleanup.sh
index 5066d63..b3ae2dd 100755
--- a/binary_search_tool/sysroot_wrapper/cleanup.sh
+++ b/binary_search_tool/sysroot_wrapper/cleanup.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2016 Google Inc. All Rights Reserved.
+# Copyright 2016 Google LLC
#
# This script is part of the ChromeOS object binary search triage process.
# It should be the last script called by the user, after the user has
diff --git a/binary_search_tool/sysroot_wrapper/interactive_test_host.sh b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh
index 58adffc..bd84936 100755
--- a/binary_search_tool/sysroot_wrapper/interactive_test_host.sh
+++ b/binary_search_tool/sysroot_wrapper/interactive_test_host.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2017 Google Inc. All Rights Reserved.
+# Copyright 2017 Google LLC
#
# This script is intended to be used by binary_search_state.py, as
# part of the binary search triage on ChromeOS package and object files for a
diff --git a/binary_search_tool/sysroot_wrapper/setup.sh b/binary_search_tool/sysroot_wrapper/setup.sh
index 6b9b48f..f9ecb0e 100755
--- a/binary_search_tool/sysroot_wrapper/setup.sh
+++ b/binary_search_tool/sysroot_wrapper/setup.sh
@@ -1,6 +1,6 @@
#!/bin/bash -u
#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
diff --git a/binary_search_tool/sysroot_wrapper/test_setup_host.sh b/binary_search_tool/sysroot_wrapper/test_setup_host.sh
index b5169ee..e61bc36 100755
--- a/binary_search_tool/sysroot_wrapper/test_setup_host.sh
+++ b/binary_search_tool/sysroot_wrapper/test_setup_host.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2017 Google Inc. All Rights Reserved.
+# Copyright 2017 Google LLC
#
# This is a generic ChromeOS package/image test setup script. It is meant to
# be used for either the object file or package bisection tools. This script
diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py
index b5ceec1..af884be 100755
--- a/binary_search_tool/sysroot_wrapper/testing_test.py
+++ b/binary_search_tool/sysroot_wrapper/testing_test.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,32 +11,35 @@
will need to change the base_path accordingly.
"""
-from __future__ import print_function
import subprocess
import sys
import os
-base_path = ('/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/'
- 'Release/obj/')
+base_path = (
+ "/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/"
+ "Release/obj/"
+)
bad_files = [
- os.path.join(base_path, 'base/base.cpu.o'),
- os.path.join(base_path, 'base/base.version.o'),
- os.path.join(base_path, 'apps/apps.launcher.o')
+ os.path.join(base_path, "base/base.cpu.o"),
+ os.path.join(base_path, "base/base.version.o"),
+ os.path.join(base_path, "apps/apps.launcher.o"),
]
-bisect_dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
+bisect_dir = os.environ.get("BISECT_DIR", "/tmp/sysroot_bisect")
def Main(_):
- for test_file in bad_files:
- test_file = test_file.strip()
- cmd = ['grep', test_file, os.path.join(bisect_dir, 'BAD_SET')]
- ret = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if not ret:
- return 1
- return 0
+ for test_file in bad_files:
+ test_file = test_file.strip()
+ cmd = ["grep", test_file, os.path.join(bisect_dir, "BAD_SET")]
+ ret = subprocess.call(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ if not ret:
+ return 1
+ return 0
-if __name__ == '__main__':
- sys.exit(Main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv[1:]))
diff --git a/binary_search_tool/test/__init__.py b/binary_search_tool/test/__init__.py
index 76500de..6e3ade4 100644
--- a/binary_search_tool/test/__init__.py
+++ b/binary_search_tool/test/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/binary_search_tool/test/binary_search_tool_test.py b/binary_search_tool/test/binary_search_tool_test.py
index 6f5b514..a79c9a1 100755
--- a/binary_search_tool/test/binary_search_tool_test.py
+++ b/binary_search_tool/test/binary_search_tool_test.py
@@ -1,15 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for bisecting tool."""
-from __future__ import division
-from __future__ import print_function
-__author__ = 'shenhan@google.com (Han Shen)'
+__author__ = "shenhan@google.com (Han Shen)"
import os
import random
@@ -25,545 +23,597 @@
def GenObj():
- obj_num = random.randint(100, 1000)
- bad_obj_num = random.randint(obj_num // 100, obj_num // 20)
- if bad_obj_num == 0:
- bad_obj_num = 1
- gen_obj.Main(['--obj_num', str(obj_num), '--bad_obj_num', str(bad_obj_num)])
+ obj_num = random.randint(100, 1000)
+ bad_obj_num = random.randint(obj_num // 100, obj_num // 20)
+ if bad_obj_num == 0:
+ bad_obj_num = 1
+ gen_obj.Main(["--obj_num", str(obj_num), "--bad_obj_num", str(bad_obj_num)])
def CleanObj():
- os.remove(common.OBJECTS_FILE)
- os.remove(common.WORKING_SET_FILE)
- print('Deleted "{0}" and "{1}"'.format(common.OBJECTS_FILE,
- common.WORKING_SET_FILE))
+ os.remove(common.OBJECTS_FILE)
+ os.remove(common.WORKING_SET_FILE)
+ print(
+ 'Deleted "{0}" and "{1}"'.format(
+ common.OBJECTS_FILE, common.WORKING_SET_FILE
+ )
+ )
class BisectTest(unittest.TestCase):
- """Tests for run_bisect.py"""
+ """Tests for run_bisect.py"""
- def setUp(self):
- with open('./is_setup', 'w', encoding='utf-8'):
- pass
+ def setUp(self):
+ with open("./is_setup", "w", encoding="utf-8"):
+ pass
- try:
- os.remove(binary_search_state.STATE_FILE)
- except OSError:
- pass
+ try:
+ os.remove(binary_search_state.STATE_FILE)
+ except OSError:
+ pass
- def tearDown(self):
- try:
- os.remove('./is_setup')
- os.remove(os.readlink(binary_search_state.STATE_FILE))
- os.remove(binary_search_state.STATE_FILE)
- except OSError:
- pass
+ def tearDown(self):
+ try:
+ os.remove("./is_setup")
+ os.remove(os.readlink(binary_search_state.STATE_FILE))
+ os.remove(binary_search_state.STATE_FILE)
+ except OSError:
+ pass
- class FullBisector(run_bisect.Bisector):
- """Test bisector to test run_bisect.py with"""
+ class FullBisector(run_bisect.Bisector):
+ """Test bisector to test run_bisect.py with"""
- def __init__(self, options, overrides):
- super(BisectTest.FullBisector, self).__init__(options, overrides)
+ def __init__(self, options, overrides):
+ super(BisectTest.FullBisector, self).__init__(options, overrides)
- def PreRun(self):
- GenObj()
- return 0
+ def PreRun(self):
+ GenObj()
+ return 0
- def Run(self):
- return binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
+ def Run(self):
+ return binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ )
- def PostRun(self):
- CleanObj()
- return 0
+ def PostRun(self):
+ CleanObj()
+ return 0
- def test_full_bisector(self):
- ret = run_bisect.Run(self.FullBisector({}, {}))
- self.assertEqual(ret, 0)
- self.assertFalse(os.path.exists(common.OBJECTS_FILE))
- self.assertFalse(os.path.exists(common.WORKING_SET_FILE))
+ def test_full_bisector(self):
+ ret = run_bisect.Run(self.FullBisector({}, {}))
+ self.assertEqual(ret, 0)
+ self.assertFalse(os.path.exists(common.OBJECTS_FILE))
+ self.assertFalse(os.path.exists(common.WORKING_SET_FILE))
- def check_output(self):
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- ('grep "Bad items are: " logs/binary_search_tool_test.py.out | '
- 'tail -n1'))
- ls = out.splitlines()
- self.assertEqual(len(ls), 1)
- line = ls[0]
+ def check_output(self):
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ (
+ 'grep "Bad items are: " logs/binary_search_tool_test.py.out | '
+ "tail -n1"
+ )
+ )
+ ls = out.splitlines()
+ self.assertEqual(len(ls), 1)
+ line = ls[0]
- _, _, bad_ones = line.partition('Bad items are: ')
- bad_ones = bad_ones.split()
- expected_result = common.ReadObjectsFile()
+ _, _, bad_ones = line.partition("Bad items are: ")
+ bad_ones = bad_ones.split()
+ expected_result = common.ReadObjectsFile()
- # Reconstruct objects file from bad_ones and compare
- actual_result = [0] * len(expected_result)
- for bad_obj in bad_ones:
- actual_result[int(bad_obj)] = 1
+ # Reconstruct objects file from bad_ones and compare
+ actual_result = [0] * len(expected_result)
+ for bad_obj in bad_ones:
+ actual_result[int(bad_obj)] = 1
- self.assertEqual(actual_result, expected_result)
+ self.assertEqual(actual_result, expected_result)
class BisectingUtilsTest(unittest.TestCase):
- """Tests for bisecting tool."""
+ """Tests for bisecting tool."""
- def setUp(self):
- """Generate [100-1000] object files, and 1-5% of which are bad ones."""
- GenObj()
+ def setUp(self):
+ """Generate [100-1000] object files, and 1-5% of which are bad ones."""
+ GenObj()
- with open('./is_setup', 'w', encoding='utf-8'):
- pass
+ with open("./is_setup", "w", encoding="utf-8"):
+ pass
- try:
- os.remove(binary_search_state.STATE_FILE)
- except OSError:
- pass
+ try:
+ os.remove(binary_search_state.STATE_FILE)
+ except OSError:
+ pass
- def tearDown(self):
- """Cleanup temp files."""
- CleanObj()
+ def tearDown(self):
+ """Cleanup temp files."""
+ CleanObj()
- try:
- os.remove(os.readlink(binary_search_state.STATE_FILE))
- except OSError:
- pass
+ try:
+ os.remove(os.readlink(binary_search_state.STATE_FILE))
+ except OSError:
+ pass
- cleanup_list = [
- './is_setup', binary_search_state.STATE_FILE, 'noinc_prune_bad',
- 'noinc_prune_good', './cmd_script.sh'
- ]
- for f in cleanup_list:
- if os.path.exists(f):
- os.remove(f)
+ cleanup_list = [
+ "./is_setup",
+ binary_search_state.STATE_FILE,
+ "noinc_prune_bad",
+ "noinc_prune_good",
+ "./cmd_script.sh",
+ ]
+ for f in cleanup_list:
+ if os.path.exists(f):
+ os.remove(f)
- def runTest(self):
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
- self.assertEqual(ret, 0)
- self.check_output()
+ def runTest(self):
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ )
+ self.assertEqual(ret, 0)
+ self.check_output()
- def test_arg_parse(self):
- args = [
- '--get_initial_items', './gen_init_list.py', '--switch_to_good',
- './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
- '--test_script', './is_good.py', '--prune', '--file_args'
- ]
- ret = binary_search_state.Main(args)
- self.assertEqual(ret, 0)
- self.check_output()
+ def test_arg_parse(self):
+ args = [
+ "--get_initial_items",
+ "./gen_init_list.py",
+ "--switch_to_good",
+ "./switch_to_good.py",
+ "--switch_to_bad",
+ "./switch_to_bad.py",
+ "--test_script",
+ "./is_good.py",
+ "--prune",
+ "--file_args",
+ ]
+ ret = binary_search_state.Main(args)
+ self.assertEqual(ret, 0)
+ self.check_output()
- def test_test_setup_script(self):
- os.remove('./is_setup')
- with self.assertRaises(AssertionError):
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
+ def test_test_setup_script(self):
+ os.remove("./is_setup")
+ with self.assertRaises(AssertionError):
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ )
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- test_setup_script='./test_setup.py',
- prune=True,
- file_args=True)
- self.assertEqual(ret, 0)
- self.check_output()
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ test_setup_script="./test_setup.py",
+ prune=True,
+ file_args=True,
+ )
+ self.assertEqual(ret, 0)
+ self.check_output()
- def test_bad_test_setup_script(self):
- with self.assertRaises(AssertionError):
- binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- test_setup_script='./test_setup_bad.py',
- prune=True,
- file_args=True)
+ def test_bad_test_setup_script(self):
+ with self.assertRaises(AssertionError):
+ binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ test_setup_script="./test_setup_bad.py",
+ prune=True,
+ file_args=True,
+ )
- def test_bad_save_state(self):
- state_file = binary_search_state.STATE_FILE
- hidden_state_file = os.path.basename(binary_search_state.HIDDEN_STATE_FILE)
+ def test_bad_save_state(self):
+ state_file = binary_search_state.STATE_FILE
+ hidden_state_file = os.path.basename(
+ binary_search_state.HIDDEN_STATE_FILE
+ )
- with open(state_file, 'w', encoding='utf-8') as f:
- f.write('test123')
+ with open(state_file, "w", encoding="utf-8") as f:
+ f.write("test123")
- bss = binary_search_state.MockBinarySearchState()
- with self.assertRaises(OSError):
- bss.SaveState()
+ bss = binary_search_state.MockBinarySearchState()
+ with self.assertRaises(OSError):
+ bss.SaveState()
- with open(state_file, 'r', encoding='utf-8') as f:
- self.assertEqual(f.read(), 'test123')
+ with open(state_file, "r", encoding="utf-8") as f:
+ self.assertEqual(f.read(), "test123")
- os.remove(state_file)
+ os.remove(state_file)
- # Cleanup generated save state that has no symlink
- files = os.listdir(os.getcwd())
- save_states = [x for x in files if x.startswith(hidden_state_file)]
- _ = [os.remove(x) for x in save_states]
+ # Cleanup generated save state that has no symlink
+ files = os.listdir(os.getcwd())
+ save_states = [x for x in files if x.startswith(hidden_state_file)]
+ _ = [os.remove(x) for x in save_states]
- def test_save_state(self):
- state_file = binary_search_state.STATE_FILE
+ def test_save_state(self):
+ state_file = binary_search_state.STATE_FILE
- bss = binary_search_state.MockBinarySearchState()
- bss.SaveState()
- self.assertTrue(os.path.exists(state_file))
- first_state = os.readlink(state_file)
+ bss = binary_search_state.MockBinarySearchState()
+ bss.SaveState()
+ self.assertTrue(os.path.exists(state_file))
+ first_state = os.readlink(state_file)
- bss.SaveState()
- second_state = os.readlink(state_file)
- self.assertTrue(os.path.exists(state_file))
- self.assertTrue(second_state != first_state)
- self.assertFalse(os.path.exists(first_state))
+ bss.SaveState()
+ second_state = os.readlink(state_file)
+ self.assertTrue(os.path.exists(state_file))
+ self.assertTrue(second_state != first_state)
+ self.assertFalse(os.path.exists(first_state))
- bss.RemoveState()
- self.assertFalse(os.path.islink(state_file))
- self.assertFalse(os.path.exists(second_state))
+ bss.RemoveState()
+ self.assertFalse(os.path.islink(state_file))
+ self.assertFalse(os.path.exists(second_state))
- def test_load_state(self):
- test_items = [1, 2, 3, 4, 5]
+ def test_load_state(self):
+ test_items = [1, 2, 3, 4, 5]
- bss = binary_search_state.MockBinarySearchState()
- bss.all_items = test_items
- bss.currently_good_items = set([1, 2, 3])
- bss.currently_bad_items = set([4, 5])
- bss.SaveState()
+ bss = binary_search_state.MockBinarySearchState()
+ bss.all_items = test_items
+ bss.currently_good_items = set([1, 2, 3])
+ bss.currently_bad_items = set([4, 5])
+ bss.SaveState()
- bss = None
+ bss = None
- bss2 = binary_search_state.MockBinarySearchState.LoadState()
- self.assertEqual(bss2.all_items, test_items)
- self.assertEqual(bss2.currently_good_items, set([]))
- self.assertEqual(bss2.currently_bad_items, set([]))
+ bss2 = binary_search_state.MockBinarySearchState.LoadState()
+ self.assertEqual(bss2.all_items, test_items)
+ self.assertEqual(bss2.currently_good_items, set([]))
+ self.assertEqual(bss2.currently_bad_items, set([]))
- def test_tmp_cleanup(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='echo "0\n1\n2\n3"',
- switch_to_good='./switch_tmp.py',
- file_args=True)
- bss.SwitchToGood(['0', '1', '2', '3'])
+ def test_tmp_cleanup(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items='echo "0\n1\n2\n3"',
+ switch_to_good="./switch_tmp.py",
+ file_args=True,
+ )
+ bss.SwitchToGood(["0", "1", "2", "3"])
- tmp_file = None
- with open('tmp_file', 'r', encoding='utf-8') as f:
- tmp_file = f.read()
- os.remove('tmp_file')
+ tmp_file = None
+ with open("tmp_file", "r", encoding="utf-8") as f:
+ tmp_file = f.read()
+ os.remove("tmp_file")
- self.assertFalse(os.path.exists(tmp_file))
- ws = common.ReadWorkingSet()
- for i in range(3):
- self.assertEqual(ws[i], 42)
+ self.assertFalse(os.path.exists(tmp_file))
+ ws = common.ReadWorkingSet()
+ for i in range(3):
+ self.assertEqual(ws[i], 42)
- def test_verify_fail(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_bad.py',
- switch_to_bad='./switch_to_good.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- verify=True)
- with self.assertRaises(AssertionError):
- bss.DoVerify()
+ def test_verify_fail(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_bad.py",
+ switch_to_bad="./switch_to_good.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ verify=True,
+ )
+ with self.assertRaises(AssertionError):
+ bss.DoVerify()
- def test_early_terminate(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- iterations=1)
- bss.DoSearchBadItems()
- self.assertFalse(bss.found_items)
+ def test_early_terminate(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ iterations=1,
+ )
+ bss.DoSearchBadItems()
+ self.assertFalse(bss.found_items)
- def test_no_prune(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- test_setup_script='./test_setup.py',
- prune=False,
- file_args=True)
- bss.DoSearchBadItems()
- self.assertEqual(len(bss.found_items), 1)
+ def test_no_prune(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ test_setup_script="./test_setup.py",
+ prune=False,
+ file_args=True,
+ )
+ bss.DoSearchBadItems()
+ self.assertEqual(len(bss.found_items), 1)
- bad_objs = common.ReadObjectsFile()
- found_obj = int(bss.found_items.pop())
- self.assertEqual(bad_objs[found_obj], 1)
+ bad_objs = common.ReadObjectsFile()
+ found_obj = int(bss.found_items.pop())
+ self.assertEqual(bad_objs[found_obj], 1)
- def test_set_file(self):
- binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good_set_file.py',
- switch_to_bad='./switch_to_bad_set_file.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- verify=True)
- self.check_output()
+ def test_set_file(self):
+ binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good_set_file.py",
+ switch_to_bad="./switch_to_bad_set_file.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ verify=True,
+ )
+ self.check_output()
- def test_noincremental_prune(self):
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good_noinc_prune.py',
- switch_to_bad='./switch_to_bad_noinc_prune.py',
- test_script='./is_good_noinc_prune.py',
- test_setup_script='./test_setup.py',
- prune=True,
- noincremental=True,
- file_args=True,
- verify=False)
- self.assertEqual(ret, 0)
- self.check_output()
+ def test_noincremental_prune(self):
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good_noinc_prune.py",
+ switch_to_bad="./switch_to_bad_noinc_prune.py",
+ test_script="./is_good_noinc_prune.py",
+ test_setup_script="./test_setup.py",
+ prune=True,
+ noincremental=True,
+ file_args=True,
+ verify=False,
+ )
+ self.assertEqual(ret, 0)
+ self.check_output()
- def check_output(self):
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- ('grep "Bad items are: " logs/binary_search_tool_test.py.out | '
- 'tail -n1'))
- ls = out.splitlines()
- self.assertEqual(len(ls), 1)
- line = ls[0]
+ def check_output(self):
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ (
+ 'grep "Bad items are: " logs/binary_search_tool_test.py.out | '
+ "tail -n1"
+ )
+ )
+ ls = out.splitlines()
+ self.assertEqual(len(ls), 1)
+ line = ls[0]
- _, _, bad_ones = line.partition('Bad items are: ')
- bad_ones = bad_ones.split()
- expected_result = common.ReadObjectsFile()
+ _, _, bad_ones = line.partition("Bad items are: ")
+ bad_ones = bad_ones.split()
+ expected_result = common.ReadObjectsFile()
- # Reconstruct objects file from bad_ones and compare
- actual_result = [0] * len(expected_result)
- for bad_obj in bad_ones:
- actual_result[int(bad_obj)] = 1
+ # Reconstruct objects file from bad_ones and compare
+ actual_result = [0] * len(expected_result)
+ for bad_obj in bad_ones:
+ actual_result[int(bad_obj)] = 1
- self.assertEqual(actual_result, expected_result)
+ self.assertEqual(actual_result, expected_result)
class BisectingUtilsPassTest(BisectingUtilsTest):
- """Tests for bisecting tool at pass/transformation level."""
+ """Tests for bisecting tool at pass/transformation level."""
- def check_pass_output(self, pass_name, pass_num, trans_num):
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- ('grep "Bad pass: " logs/binary_search_tool_test.py.out | '
- 'tail -n1'))
- ls = out.splitlines()
- self.assertEqual(len(ls), 1)
- line = ls[0]
- _, _, bad_info = line.partition('Bad pass: ')
- actual_info = pass_name + ' at number ' + str(pass_num)
- self.assertEqual(actual_info, bad_info)
+ def check_pass_output(self, pass_name, pass_num, trans_num):
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ (
+ 'grep "Bad pass: " logs/binary_search_tool_test.py.out | '
+ "tail -n1"
+ )
+ )
+ ls = out.splitlines()
+ self.assertEqual(len(ls), 1)
+ line = ls[0]
+ _, _, bad_info = line.partition("Bad pass: ")
+ actual_info = pass_name + " at number " + str(pass_num)
+ self.assertEqual(actual_info, bad_info)
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- ('grep "Bad transformation number: '
- '" logs/binary_search_tool_test.py.out | '
- 'tail -n1'))
- ls = out.splitlines()
- self.assertEqual(len(ls), 1)
- line = ls[0]
- _, _, bad_info = line.partition('Bad transformation number: ')
- actual_info = str(trans_num)
- self.assertEqual(actual_info, bad_info)
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ (
+ 'grep "Bad transformation number: '
+ '" logs/binary_search_tool_test.py.out | '
+ "tail -n1"
+ )
+ )
+ ls = out.splitlines()
+ self.assertEqual(len(ls), 1)
+ line = ls[0]
+ _, _, bad_info = line.partition("Bad transformation number: ")
+ actual_info = str(trans_num)
+ self.assertEqual(actual_info, bad_info)
- def test_with_prune(self):
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=True,
- file_args=True)
- self.assertEqual(ret, 1)
+ def test_with_prune(self):
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=True,
+ file_args=True,
+ )
+ self.assertEqual(ret, 1)
- def test_gen_cmd_script(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- bss.DoSearchBadItems()
- cmd_script_path = bss.cmd_script
- self.assertTrue(os.path.exists(cmd_script_path))
+ def test_gen_cmd_script(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ bss.DoSearchBadItems()
+ cmd_script_path = bss.cmd_script
+ self.assertTrue(os.path.exists(cmd_script_path))
- def test_no_pass_support(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- bss.cmd_script = './cmd_script_no_support.py'
- # No support for -opt-bisect-limit
- with self.assertRaises(RuntimeError):
- bss.BuildWithPassLimit(-1)
+ def test_no_pass_support(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ bss.cmd_script = "./cmd_script_no_support.py"
+ # No support for -opt-bisect-limit
+ with self.assertRaises(RuntimeError):
+ bss.BuildWithPassLimit(-1)
- def test_no_transform_support(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- bss.cmd_script = './cmd_script_no_support.py'
- # No support for -print-debug-counter
- with self.assertRaises(RuntimeError):
- bss.BuildWithTransformLimit(-1, 'counter_name')
+ def test_no_transform_support(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ bss.cmd_script = "./cmd_script_no_support.py"
+ # No support for -print-debug-counter
+ with self.assertRaises(RuntimeError):
+ bss.BuildWithTransformLimit(-1, "counter_name")
- def test_pass_transform_bisect(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- pass_num = 4
- trans_num = 19
- bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num)
- bss.DoSearchBadPass()
- self.check_pass_output('instcombine-visit', pass_num, trans_num)
+ def test_pass_transform_bisect(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ pass_num = 4
+ trans_num = 19
+ bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num)
+ bss.DoSearchBadPass()
+ self.check_pass_output("instcombine-visit", pass_num, trans_num)
- def test_result_not_reproduced_pass(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- # Fails reproducing at pass level.
- pass_num = 0
- trans_num = 19
- bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num)
- with self.assertRaises(ValueError):
- bss.DoSearchBadPass()
+ def test_result_not_reproduced_pass(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ # Fails reproducing at pass level.
+ pass_num = 0
+ trans_num = 19
+ bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num)
+ with self.assertRaises(ValueError):
+ bss.DoSearchBadPass()
- def test_result_not_reproduced_transform(self):
- bss = binary_search_state.MockBinarySearchState(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- pass_bisect='./generate_cmd.py',
- prune=False,
- file_args=True)
- # Fails reproducing at transformation level.
- pass_num = 4
- trans_num = 0
- bss.cmd_script = './cmd_script.py %d %d' % (pass_num, trans_num)
- with self.assertRaises(ValueError):
- bss.DoSearchBadPass()
+ def test_result_not_reproduced_transform(self):
+ bss = binary_search_state.MockBinarySearchState(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ pass_bisect="./generate_cmd.py",
+ prune=False,
+ file_args=True,
+ )
+ # Fails reproducing at transformation level.
+ pass_num = 4
+ trans_num = 0
+ bss.cmd_script = "./cmd_script.py %d %d" % (pass_num, trans_num)
+ with self.assertRaises(ValueError):
+ bss.DoSearchBadPass()
class BisectStressTest(unittest.TestCase):
- """Stress tests for bisecting tool."""
+ """Stress tests for bisecting tool."""
- def test_every_obj_bad(self):
- amt = 25
- gen_obj.Main(['--obj_num', str(amt), '--bad_obj_num', str(amt)])
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- verify=False)
- self.assertEqual(ret, 0)
- self.check_output()
+ def test_every_obj_bad(self):
+ amt = 25
+ gen_obj.Main(["--obj_num", str(amt), "--bad_obj_num", str(amt)])
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ verify=False,
+ )
+ self.assertEqual(ret, 0)
+ self.check_output()
- def test_every_index_is_bad(self):
- amt = 25
- for i in range(amt):
- obj_list = ['0'] * amt
- obj_list[i] = '1'
- obj_list = ','.join(obj_list)
- gen_obj.Main(['--obj_list', obj_list])
- ret = binary_search_state.Run(
- get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_setup_script='./test_setup.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
- self.assertEqual(ret, 0)
- self.check_output()
+ def test_every_index_is_bad(self):
+ amt = 25
+ for i in range(amt):
+ obj_list = ["0"] * amt
+ obj_list[i] = "1"
+ obj_list = ",".join(obj_list)
+ gen_obj.Main(["--obj_list", obj_list])
+ ret = binary_search_state.Run(
+ get_initial_items="./gen_init_list.py",
+ switch_to_good="./switch_to_good.py",
+ switch_to_bad="./switch_to_bad.py",
+ test_setup_script="./test_setup.py",
+ test_script="./is_good.py",
+ prune=True,
+ file_args=True,
+ )
+ self.assertEqual(ret, 0)
+ self.check_output()
- def check_output(self):
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- ('grep "Bad items are: " logs/binary_search_tool_test.py.out | '
- 'tail -n1'))
- ls = out.splitlines()
- self.assertEqual(len(ls), 1)
- line = ls[0]
+ def check_output(self):
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ (
+ 'grep "Bad items are: " logs/binary_search_tool_test.py.out | '
+ "tail -n1"
+ )
+ )
+ ls = out.splitlines()
+ self.assertEqual(len(ls), 1)
+ line = ls[0]
- _, _, bad_ones = line.partition('Bad items are: ')
- bad_ones = bad_ones.split()
- expected_result = common.ReadObjectsFile()
+ _, _, bad_ones = line.partition("Bad items are: ")
+ bad_ones = bad_ones.split()
+ expected_result = common.ReadObjectsFile()
- # Reconstruct objects file from bad_ones and compare
- actual_result = [0] * len(expected_result)
- for bad_obj in bad_ones:
- actual_result[int(bad_obj)] = 1
+ # Reconstruct objects file from bad_ones and compare
+ actual_result = [0] * len(expected_result)
+ for bad_obj in bad_ones:
+ actual_result[int(bad_obj)] = 1
- self.assertEqual(actual_result, expected_result)
+ self.assertEqual(actual_result, expected_result)
def Main(argv):
- num_tests = 2
- if len(argv) > 1:
- num_tests = int(argv[1])
+ num_tests = 2
+ if len(argv) > 1:
+ num_tests = int(argv[1])
- suite = unittest.TestSuite()
- for _ in range(0, num_tests):
- suite.addTest(BisectingUtilsTest())
- suite.addTest(BisectingUtilsTest('test_arg_parse'))
- suite.addTest(BisectingUtilsTest('test_test_setup_script'))
- suite.addTest(BisectingUtilsTest('test_bad_test_setup_script'))
- suite.addTest(BisectingUtilsTest('test_bad_save_state'))
- suite.addTest(BisectingUtilsTest('test_save_state'))
- suite.addTest(BisectingUtilsTest('test_load_state'))
- suite.addTest(BisectingUtilsTest('test_tmp_cleanup'))
- suite.addTest(BisectingUtilsTest('test_verify_fail'))
- suite.addTest(BisectingUtilsTest('test_early_terminate'))
- suite.addTest(BisectingUtilsTest('test_no_prune'))
- suite.addTest(BisectingUtilsTest('test_set_file'))
- suite.addTest(BisectingUtilsTest('test_noincremental_prune'))
- suite.addTest(BisectingUtilsPassTest('test_with_prune'))
- suite.addTest(BisectingUtilsPassTest('test_gen_cmd_script'))
- suite.addTest(BisectingUtilsPassTest('test_no_pass_support'))
- suite.addTest(BisectingUtilsPassTest('test_no_transform_support'))
- suite.addTest(BisectingUtilsPassTest('test_pass_transform_bisect'))
- suite.addTest(BisectingUtilsPassTest('test_result_not_reproduced_pass'))
- suite.addTest(BisectingUtilsPassTest('test_result_not_reproduced_transform'))
- suite.addTest(BisectTest('test_full_bisector'))
- suite.addTest(BisectStressTest('test_every_obj_bad'))
- suite.addTest(BisectStressTest('test_every_index_is_bad'))
- runner = unittest.TextTestRunner()
- runner.run(suite)
+ suite = unittest.TestSuite()
+ for _ in range(0, num_tests):
+ suite.addTest(BisectingUtilsTest())
+ suite.addTest(BisectingUtilsTest("test_arg_parse"))
+ suite.addTest(BisectingUtilsTest("test_test_setup_script"))
+ suite.addTest(BisectingUtilsTest("test_bad_test_setup_script"))
+ suite.addTest(BisectingUtilsTest("test_bad_save_state"))
+ suite.addTest(BisectingUtilsTest("test_save_state"))
+ suite.addTest(BisectingUtilsTest("test_load_state"))
+ suite.addTest(BisectingUtilsTest("test_tmp_cleanup"))
+ suite.addTest(BisectingUtilsTest("test_verify_fail"))
+ suite.addTest(BisectingUtilsTest("test_early_terminate"))
+ suite.addTest(BisectingUtilsTest("test_no_prune"))
+ suite.addTest(BisectingUtilsTest("test_set_file"))
+ suite.addTest(BisectingUtilsTest("test_noincremental_prune"))
+ suite.addTest(BisectingUtilsPassTest("test_with_prune"))
+ suite.addTest(BisectingUtilsPassTest("test_gen_cmd_script"))
+ suite.addTest(BisectingUtilsPassTest("test_no_pass_support"))
+ suite.addTest(BisectingUtilsPassTest("test_no_transform_support"))
+ suite.addTest(BisectingUtilsPassTest("test_pass_transform_bisect"))
+ suite.addTest(BisectingUtilsPassTest("test_result_not_reproduced_pass"))
+ suite.addTest(
+ BisectingUtilsPassTest("test_result_not_reproduced_transform")
+ )
+ suite.addTest(BisectTest("test_full_bisector"))
+ suite.addTest(BisectStressTest("test_every_obj_bad"))
+ suite.addTest(BisectStressTest("test_every_index_is_bad"))
+ runner = unittest.TextTestRunner()
+ runner.run(suite)
-if __name__ == '__main__':
- Main(sys.argv)
+if __name__ == "__main__":
+ Main(sys.argv)
diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py
index bfd5605..b0475c7 100755
--- a/binary_search_tool/test/cmd_script.py
+++ b/binary_search_tool/test/cmd_script.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,7 +11,6 @@
compiler.
"""
-from __future__ import print_function
import os
import sys
@@ -20,57 +19,62 @@
def Main(argv):
- if not os.path.exists('./is_setup'):
- return 1
+ if not os.path.exists("./is_setup"):
+ return 1
- if len(argv) != 3:
- return 1
+ if len(argv) != 3:
+ return 1
- limit_flags = os.environ['LIMIT_FLAGS']
- opt_bisect_exist = False
- debug_counter_exist = False
+ limit_flags = os.environ["LIMIT_FLAGS"]
+ opt_bisect_exist = False
+ debug_counter_exist = False
- for option in limit_flags.split():
- if '-opt-bisect-limit' in option:
- opt_bisect_limit = int(option.split('=')[-1])
- opt_bisect_exist = True
- if '-debug-counter=' in option:
- debug_counter = int(option.split('=')[-1])
- debug_counter_exist = True
+ for option in limit_flags.split():
+ if "-opt-bisect-limit" in option:
+ opt_bisect_limit = int(option.split("=")[-1])
+ opt_bisect_exist = True
+ if "-debug-counter=" in option:
+ debug_counter = int(option.split("=")[-1])
+ debug_counter_exist = True
- if not opt_bisect_exist:
- return 1
+ if not opt_bisect_exist:
+ return 1
- # Manually set total number and bad number
- total_pass = 10
- total_transform = 20
- bad_pass = int(argv[1])
- bad_transform = int(argv[2])
+ # Manually set total number and bad number
+ total_pass = 10
+ total_transform = 20
+ bad_pass = int(argv[1])
+ bad_transform = int(argv[2])
- if opt_bisect_limit == -1:
- opt_bisect_limit = total_pass
+ if opt_bisect_limit == -1:
+ opt_bisect_limit = total_pass
- for i in range(1, total_pass + 1):
- bisect_str = 'BISECT: %srunning pass (%d) Combine redundant ' \
- 'instructions on function (f1)' \
- % ('NOT ' if i > opt_bisect_limit else '', i)
- print(bisect_str, file=sys.stderr)
+ for i in range(1, total_pass + 1):
+ bisect_str = (
+ "BISECT: %srunning pass (%d) Combine redundant "
+ "instructions on function (f1)"
+ % ("NOT " if i > opt_bisect_limit else "", i)
+ )
+ print(bisect_str, file=sys.stderr)
- if debug_counter_exist:
- print('Counters and values:', file=sys.stderr)
- print(
- 'instcombine-visit : {%d, 0, %d}' % (total_transform, debug_counter),
- file=sys.stderr)
+ if debug_counter_exist:
+ print("Counters and values:", file=sys.stderr)
+ print(
+ "instcombine-visit : {%d, 0, %d}"
+ % (total_transform, debug_counter),
+ file=sys.stderr,
+ )
- if opt_bisect_limit > bad_pass or \
- (debug_counter_exist and debug_counter > bad_transform):
- common.WriteWorkingSet([1])
- else:
- common.WriteWorkingSet([0])
+ if opt_bisect_limit > bad_pass or (
+ debug_counter_exist and debug_counter > bad_transform
+ ):
+ common.WriteWorkingSet([1])
+ else:
+ common.WriteWorkingSet([0])
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/cmd_script_no_support.py b/binary_search_tool/test/cmd_script_no_support.py
index badbedc..f1c2bcb 100644
--- a/binary_search_tool/test/cmd_script_no_support.py
+++ b/binary_search_tool/test/cmd_script_no_support.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,21 +9,21 @@
supported by compiler.
"""
-from __future__ import print_function
import os
import sys
def Main():
- if not os.path.exists('./is_setup'):
- return 1
- print(
- 'No support for -opt-bisect-limit or -print-debug-counter.',
- file=sys.stderr)
- return 0
+ if not os.path.exists("./is_setup"):
+ return 1
+ print(
+ "No support for -opt-bisect-limit or -print-debug-counter.",
+ file=sys.stderr,
+ )
+ return 0
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py
index cf5300f..6632a4c 100755
--- a/binary_search_tool/test/common.py
+++ b/binary_search_tool/test/common.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,35 +8,35 @@
DEFAULT_OBJECT_NUMBER = 1238
DEFAULT_BAD_OBJECT_NUMBER = 23
-OBJECTS_FILE = 'objects.txt'
-WORKING_SET_FILE = 'working_set.txt'
+OBJECTS_FILE = "objects.txt"
+WORKING_SET_FILE = "working_set.txt"
def ReadWorkingSet():
- working_set = []
- with open(WORKING_SET_FILE, 'r', encoding='utf-8') as f:
- for l in f:
- working_set.append(int(l))
- return working_set
+ working_set = []
+ with open(WORKING_SET_FILE, "r", encoding="utf-8") as f:
+ for l in f:
+ working_set.append(int(l))
+ return working_set
def WriteWorkingSet(working_set):
- with open(WORKING_SET_FILE, 'w', encoding='utf-8') as f:
- for o in working_set:
- f.write('{0}\n'.format(o))
+ with open(WORKING_SET_FILE, "w", encoding="utf-8") as f:
+ for o in working_set:
+ f.write("{0}\n".format(o))
def ReadObjectsFile():
- objects_file = []
- with open(OBJECTS_FILE, 'r', encoding='utf-8') as f:
- for l in f:
- objects_file.append(int(l))
- return objects_file
+ objects_file = []
+ with open(OBJECTS_FILE, "r", encoding="utf-8") as f:
+ for l in f:
+ objects_file.append(int(l))
+ return objects_file
def ReadObjectIndex(filename):
- object_index = []
- with open(filename, 'r', encoding='utf-8') as f:
- for o in f:
- object_index.append(int(o))
- return object_index
+ object_index = []
+ with open(filename, "r", encoding="utf-8") as f:
+ for o in f:
+ object_index.append(int(o))
+ return object_index
diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py
index bc5dd8f..138e949 100755
--- a/binary_search_tool/test/gen_init_list.py
+++ b/binary_search_tool/test/gen_init_list.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints out index for every object file, starting from 0."""
-from __future__ import print_function
import sys
@@ -15,13 +14,14 @@
def Main():
- ce = command_executer.GetCommandExecuter()
- _, l, _ = ce.RunCommandWOutput(
- 'cat {0} | wc -l'.format(common.OBJECTS_FILE), print_to_console=False)
- for i in range(0, int(l)):
- print(i)
+ ce = command_executer.GetCommandExecuter()
+ _, l, _ = ce.RunCommandWOutput(
+ "cat {0} | wc -l".format(common.OBJECTS_FILE), print_to_console=False
+ )
+ for i in range(0, int(l)):
+ print(i)
-if __name__ == '__main__':
- Main()
- sys.exit(0)
+if __name__ == "__main__":
+ Main()
+ sys.exit(0)
diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py
index 4f65c71..394445f 100755
--- a/binary_search_tool/test/gen_obj.py
+++ b/binary_search_tool/test/gen_obj.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,7 +10,6 @@
1 represents a bad object file.
"""
-from __future__ import print_function
import argparse
import os
@@ -21,81 +20,91 @@
def Main(argv):
- """Generates a list, the value of each element is 0 or 1.
+ """Generates a list, the value of each element is 0 or 1.
- The number of 1s in the list is specified by bad_obj_num.
- The others are all 0s. The total number of 0s and 1s is specified by obj_num.
+ The number of 1s in the list is specified by bad_obj_num.
+ The others are all 0s. The total number of 0s and 1s is specified by obj_num.
- Args:
- argv: argument from command line
+ Args:
+ argv: argument from command line
- Returns:
- 0 always.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-n',
- '--obj_num',
- dest='obj_num',
- default=common.DEFAULT_OBJECT_NUMBER,
- help=('Number of total objects.'))
- parser.add_argument(
- '-b',
- '--bad_obj_num',
- dest='bad_obj_num',
- default=common.DEFAULT_BAD_OBJECT_NUMBER,
- help=('Number of bad objects. Must be great than or '
- 'equal to zero and less than total object '
- 'number.'))
- parser.add_argument(
- '-o',
- '--obj_list',
- dest='obj_list',
- default='',
- help=('List of comma seperated objects to generate. '
- 'A 0 means the object is good, a 1 means the '
- 'object is bad.'))
- options = parser.parse_args(argv)
+ Returns:
+ 0 always.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-n",
+ "--obj_num",
+ dest="obj_num",
+ default=common.DEFAULT_OBJECT_NUMBER,
+ help=("Number of total objects."),
+ )
+ parser.add_argument(
+ "-b",
+ "--bad_obj_num",
+ dest="bad_obj_num",
+ default=common.DEFAULT_BAD_OBJECT_NUMBER,
+ help=(
+ "Number of bad objects. Must be great than or "
+ "equal to zero and less than total object "
+ "number."
+ ),
+ )
+ parser.add_argument(
+ "-o",
+ "--obj_list",
+ dest="obj_list",
+ default="",
+ help=(
+ "List of comma seperated objects to generate. "
+ "A 0 means the object is good, a 1 means the "
+ "object is bad."
+ ),
+ )
+ options = parser.parse_args(argv)
- obj_num = int(options.obj_num)
- bad_obj_num = int(options.bad_obj_num)
- bad_to_gen = int(options.bad_obj_num)
- obj_list = options.obj_list
- if not obj_list:
- obj_list = []
- for i in range(obj_num):
- if bad_to_gen > 0 and random.randint(1, obj_num) <= bad_obj_num:
- obj_list.append(1)
- bad_to_gen -= 1
- else:
- obj_list.append(0)
- while bad_to_gen > 0:
- t = random.randint(0, obj_num - 1)
- if obj_list[t] == 0:
- obj_list[t] = 1
- bad_to_gen -= 1
- else:
- obj_list = obj_list.split(',')
+ obj_num = int(options.obj_num)
+ bad_obj_num = int(options.bad_obj_num)
+ bad_to_gen = int(options.bad_obj_num)
+ obj_list = options.obj_list
+ if not obj_list:
+ obj_list = []
+ for i in range(obj_num):
+ if bad_to_gen > 0 and random.randint(1, obj_num) <= bad_obj_num:
+ obj_list.append(1)
+ bad_to_gen -= 1
+ else:
+ obj_list.append(0)
+ while bad_to_gen > 0:
+ t = random.randint(0, obj_num - 1)
+ if obj_list[t] == 0:
+ obj_list[t] = 1
+ bad_to_gen -= 1
+ else:
+ obj_list = obj_list.split(",")
- if os.path.isfile(common.OBJECTS_FILE):
- os.remove(common.OBJECTS_FILE)
- if os.path.isfile(common.WORKING_SET_FILE):
- os.remove(common.WORKING_SET_FILE)
+ if os.path.isfile(common.OBJECTS_FILE):
+ os.remove(common.OBJECTS_FILE)
+ if os.path.isfile(common.WORKING_SET_FILE):
+ os.remove(common.WORKING_SET_FILE)
- with open(common.OBJECTS_FILE, 'w', encoding='utf-8') as f:
- with open(common.WORKING_SET_FILE, 'w', encoding='utf-8') as w:
- for i in obj_list:
- f.write('{0}\n'.format(i))
- w.write('{0}\n'.format(i))
+ with open(common.OBJECTS_FILE, "w", encoding="utf-8") as f:
+ with open(common.WORKING_SET_FILE, "w", encoding="utf-8") as w:
+ for i in obj_list:
+ f.write("{0}\n".format(i))
+ w.write("{0}\n".format(i))
- obj_num = len(obj_list)
- bad_obj_num = obj_list.count(1)
- print('Generated {0} object files, with {1} bad ones.'.format(
- obj_num, bad_obj_num))
+ obj_num = len(obj_list)
+ bad_obj_num = obj_list.count(1)
+ print(
+ "Generated {0} object files, with {1} bad ones.".format(
+ obj_num, bad_obj_num
+ )
+ )
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/binary_search_tool/test/generate_cmd.py b/binary_search_tool/test/generate_cmd.py
index 51b36b0..96fa720 100755
--- a/binary_search_tool/test/generate_cmd.py
+++ b/binary_search_tool/test/generate_cmd.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,21 +10,20 @@
this script to verify if cmd_script.sh is generated correctly.
"""
-from __future__ import print_function
import os
import sys
def Main():
- if not os.path.exists('./is_setup'):
- return 1
- file_name = 'cmd_script.sh'
- with open(file_name, 'w', encoding='utf-8') as f:
- f.write('Generated by generate_cmd.py')
- return 0
+ if not os.path.exists("./is_setup"):
+ return 1
+ file_name = "cmd_script.sh"
+ with open(file_name, "w", encoding="utf-8") as f:
+ f.write("Generated by generate_cmd.py")
+ return 0
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py
index 662921e..fd3f908 100755
--- a/binary_search_tool/test/is_good.py
+++ b/binary_search_tool/test/is_good.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Check to see if the working set produces a good executable."""
-from __future__ import print_function
import os
import sys
@@ -15,15 +14,15 @@
def Main():
- if not os.path.exists('./is_setup'):
- return 1
- working_set = common.ReadWorkingSet()
- for w in working_set:
- if w == 1:
- return 1 ## False, linking failure
- return 0
+ if not os.path.exists("./is_setup"):
+ return 1
+ working_set = common.ReadWorkingSet()
+ for w in working_set:
+ if w == 1:
+ return 1 ## False, linking failure
+ return 0
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py
index c0e42bb..654fcd2 100755
--- a/binary_search_tool/test/is_good_noinc_prune.py
+++ b/binary_search_tool/test/is_good_noinc_prune.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -12,7 +12,6 @@
that noincremental always holds).
"""
-from __future__ import print_function
import os
import sys
@@ -21,31 +20,31 @@
def Main():
- working_set = common.ReadWorkingSet()
+ working_set = common.ReadWorkingSet()
- with open('noinc_prune_good', 'r', encoding='utf-8') as good_args:
- num_good_args = len(good_args.readlines())
+ with open("noinc_prune_good", "r", encoding="utf-8") as good_args:
+ num_good_args = len(good_args.readlines())
- with open('noinc_prune_bad', 'r', encoding='utf-8') as bad_args:
- num_bad_args = len(bad_args.readlines())
+ with open("noinc_prune_bad", "r", encoding="utf-8") as bad_args:
+ num_bad_args = len(bad_args.readlines())
- num_args = num_good_args + num_bad_args
- if num_args != len(working_set):
- print('Only %d args, expected %d' % (num_args, len(working_set)))
- print('%d good args, %d bad args' % (num_good_args, num_bad_args))
- return 3
+ num_args = num_good_args + num_bad_args
+ if num_args != len(working_set):
+ print("Only %d args, expected %d" % (num_args, len(working_set)))
+ print("%d good args, %d bad args" % (num_good_args, num_bad_args))
+ return 3
- os.remove('noinc_prune_bad')
- os.remove('noinc_prune_good')
+ os.remove("noinc_prune_bad")
+ os.remove("noinc_prune_good")
- if not os.path.exists('./is_setup'):
- return 1
- for w in working_set:
- if w == 1:
- return 1 ## False, linking failure
- return 0
+ if not os.path.exists("./is_setup"):
+ return 1
+ for w in working_set:
+ if w == 1:
+ return 1 ## False, linking failure
+ return 0
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py
index 0f3c423..acc0393 100755
--- a/binary_search_tool/test/switch_tmp.py
+++ b/binary_search_tool/test/switch_tmp.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -12,7 +12,6 @@
set to good (mark as 42).
"""
-from __future__ import print_function
import sys
@@ -20,20 +19,20 @@
def Main(argv):
- working_set = common.ReadWorkingSet()
- object_index = common.ReadObjectIndex(argv[1])
+ working_set = common.ReadWorkingSet()
+ object_index = common.ReadObjectIndex(argv[1])
- # Random number so the results can be checked
- for oi in object_index:
- working_set[int(oi)] = 42
+ # Random number so the results can be checked
+ for oi in object_index:
+ working_set[int(oi)] = 42
- common.WriteWorkingSet(working_set)
- with open('tmp_file', 'w', encoding='utf-8') as f:
- f.write(argv[1])
+ common.WriteWorkingSet(working_set)
+ with open("tmp_file", "w", encoding="utf-8") as f:
+ f.write(argv[1])
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py
index e3553eb..bc32f3c 100755
--- a/binary_search_tool/test/switch_to_bad.py
+++ b/binary_search_tool/test/switch_to_bad.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Switch part of the objects file in working set to (possible) bad ones."""
-from __future__ import print_function
import sys
@@ -14,19 +13,19 @@
def Main(argv):
- """Switch part of the objects file in working set to (possible) bad ones."""
- working_set = common.ReadWorkingSet()
- objects_file = common.ReadObjectsFile()
- object_index = common.ReadObjectIndex(argv[1])
+ """Switch part of the objects file in working set to (possible) bad ones."""
+ working_set = common.ReadWorkingSet()
+ objects_file = common.ReadObjectsFile()
+ object_index = common.ReadObjectIndex(argv[1])
- for oi in object_index:
- working_set[oi] = objects_file[oi]
+ for oi in object_index:
+ working_set[oi] = objects_file[oi]
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py
index 81b558e..e5574f9 100755
--- a/binary_search_tool/test/switch_to_bad_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -18,7 +18,6 @@
Warning: This switch script assumes the --file_args option
"""
-from __future__ import print_function
import shutil
import sys
@@ -27,21 +26,21 @@
def Main(argv):
- """Switch part of the objects file in working set to (possible) bad ones."""
- working_set = common.ReadWorkingSet()
- objects_file = common.ReadObjectsFile()
- object_index = common.ReadObjectIndex(argv[1])
+ """Switch part of the objects file in working set to (possible) bad ones."""
+ working_set = common.ReadWorkingSet()
+ objects_file = common.ReadObjectsFile()
+ object_index = common.ReadObjectIndex(argv[1])
- for oi in object_index:
- working_set[oi] = objects_file[oi]
+ for oi in object_index:
+ working_set[oi] = objects_file[oi]
- shutil.copy(argv[1], './noinc_prune_bad')
+ shutil.copy(argv[1], "./noinc_prune_bad")
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py
index 5b941c6..9d4bee6 100755
--- a/binary_search_tool/test/switch_to_bad_set_file.py
+++ b/binary_search_tool/test/switch_to_bad_set_file.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,7 +10,6 @@
the set files generated by binary_search_state to do the switching.
"""
-from __future__ import print_function
import os
import sys
@@ -19,24 +18,24 @@
def Main(_):
- """Switch part of the objects file in working set to (possible) bad ones."""
- working_set = common.ReadWorkingSet()
- objects_file = common.ReadObjectsFile()
+ """Switch part of the objects file in working set to (possible) bad ones."""
+ working_set = common.ReadWorkingSet()
+ objects_file = common.ReadObjectsFile()
- if not os.path.exists(os.environ['BISECT_BAD_SET']):
- print('Bad set file does not exist!')
- return 1
+ if not os.path.exists(os.environ["BISECT_BAD_SET"]):
+ print("Bad set file does not exist!")
+ return 1
- object_index = common.ReadObjectIndex(os.environ['BISECT_BAD_SET'])
+ object_index = common.ReadObjectIndex(os.environ["BISECT_BAD_SET"])
- for oi in object_index:
- working_set[int(oi)] = objects_file[oi]
+ for oi in object_index:
+ working_set[int(oi)] = objects_file[oi]
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py
index 9747932..61a59a2 100755
--- a/binary_search_tool/test/switch_to_good.py
+++ b/binary_search_tool/test/switch_to_good.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,7 +11,6 @@
set to good (mark as 0).
"""
-from __future__ import print_function
import sys
@@ -19,17 +18,17 @@
def Main(argv):
- working_set = common.ReadWorkingSet()
- object_index = common.ReadObjectIndex(argv[1])
+ working_set = common.ReadWorkingSet()
+ object_index = common.ReadObjectIndex(argv[1])
- for oi in object_index:
- working_set[int(oi)] = 0
+ for oi in object_index:
+ working_set[int(oi)] = 0
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py
index 0b91a0d..3bda1d7 100755
--- a/binary_search_tool/test/switch_to_good_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_good_noinc_prune.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -18,7 +18,6 @@
Warning: This switch script assumes the --file_args option
"""
-from __future__ import print_function
import shutil
import sys
@@ -27,19 +26,19 @@
def Main(argv):
- working_set = common.ReadWorkingSet()
- object_index = common.ReadObjectIndex(argv[1])
+ working_set = common.ReadWorkingSet()
+ object_index = common.ReadObjectIndex(argv[1])
- for oi in object_index:
- working_set[int(oi)] = 0
+ for oi in object_index:
+ working_set[int(oi)] = 0
- shutil.copy(argv[1], './noinc_prune_good')
+ shutil.copy(argv[1], "./noinc_prune_good")
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py
index 1cb05e0..b83cbe3 100755
--- a/binary_search_tool/test/switch_to_good_set_file.py
+++ b/binary_search_tool/test/switch_to_good_set_file.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,7 +14,6 @@
the set files generated by binary_search_state to do the switching.
"""
-from __future__ import print_function
import os
import sys
@@ -23,22 +22,22 @@
def Main(_):
- working_set = common.ReadWorkingSet()
+ working_set = common.ReadWorkingSet()
- if not os.path.exists(os.environ['BISECT_GOOD_SET']):
- print('Good set file does not exist!')
- return 1
+ if not os.path.exists(os.environ["BISECT_GOOD_SET"]):
+ print("Good set file does not exist!")
+ return 1
- object_index = common.ReadObjectIndex(os.environ['BISECT_GOOD_SET'])
+ object_index = common.ReadObjectIndex(os.environ["BISECT_GOOD_SET"])
- for oi in object_index:
- working_set[int(oi)] = 0
+ for oi in object_index:
+ working_set[int(oi)] = 0
- common.WriteWorkingSet(working_set)
+ common.WriteWorkingSet(working_set)
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py
index ecc8eb9..52486a2 100755
--- a/binary_search_tool/test/test_setup.py
+++ b/binary_search_tool/test/test_setup.py
@@ -1,24 +1,23 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Emulate running of test setup script, is_good.py should fail without this."""
-from __future__ import print_function
import sys
def Main():
- # create ./is_setup
- with open('./is_setup', 'w', encoding='utf-8'):
- pass
+ # create ./is_setup
+ with open("./is_setup", "w", encoding="utf-8"):
+ pass
- return 0
+ return 0
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py
index cbca3c2..518a69f 100755
--- a/binary_search_tool/test/test_setup_bad.py
+++ b/binary_search_tool/test/test_setup_bad.py
@@ -1,20 +1,19 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Emulate test setup that fails (i.e. failed flash to device)"""
-from __future__ import print_function
import sys
def Main():
- return 1 ## False, flashing failure
+ return 1 ## False, flashing failure
-if __name__ == '__main__':
- retval = Main()
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main()
+ sys.exit(retval)
diff --git a/build_chromeos.py b/build_chromeos.py
index e275da1..84ee0b8 100755
--- a/build_chromeos.py
+++ b/build_chromeos.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,12 +11,13 @@
particular release of ChromeOS.
"""
-from __future__ import print_function
-__author__ = ('asharif@google.com (Ahmad Sharif) '
- 'llozano@google.com (Luis Lozano) '
- 'raymes@google.com (Raymes Khoury) '
- 'shenhan@google.com (Han Shen)')
+__author__ = (
+ "asharif@google.com (Ahmad Sharif) "
+ "llozano@google.com (Luis Lozano) "
+ "raymes@google.com (Raymes Khoury) "
+ "shenhan@google.com (Han Shen)"
+)
import argparse
import os
@@ -28,262 +29,349 @@
def Usage(parser, message):
- print('ERROR: %s' % message)
- parser.print_help()
- sys.exit(0)
+ print("ERROR: %s" % message)
+ parser.print_help()
+ sys.exit(0)
def Main(argv):
- """Build ChromeOS."""
- # Common initializations
- cmd_executer = command_executer.GetCommandExecuter()
+ """Build ChromeOS."""
+ # Common initializations
+ cmd_executer = command_executer.GetCommandExecuter()
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--chromeos_root',
- dest='chromeos_root',
- help='Target directory for ChromeOS installation.')
- parser.add_argument(
- '--clobber_chroot',
- dest='clobber_chroot',
- action='store_true',
- help='Delete the chroot and start fresh',
- default=False)
- parser.add_argument(
- '--clobber_board',
- dest='clobber_board',
- action='store_true',
- help='Delete the board and start fresh',
- default=False)
- parser.add_argument(
- '--rebuild',
- dest='rebuild',
- action='store_true',
- help='Rebuild all board packages except the toolchain.',
- default=False)
- parser.add_argument(
- '--cflags',
- dest='cflags',
- default='',
- help='CFLAGS for the ChromeOS packages')
- parser.add_argument(
- '--cxxflags',
- dest='cxxflags',
- default='',
- help='CXXFLAGS for the ChromeOS packages')
- parser.add_argument(
- '--ldflags',
- dest='ldflags',
- default='',
- help='LDFLAGS for the ChromeOS packages')
- parser.add_argument(
- '--board', dest='board', help='ChromeOS target board, e.g. x86-generic')
- parser.add_argument(
- '--package', dest='package', help='The package needs to be built')
- parser.add_argument(
- '--label',
- dest='label',
- help='Optional label symlink to point to build dir.')
- parser.add_argument(
- '--dev',
- dest='dev',
- default=False,
- action='store_true',
- help=('Make the final image in dev mode (eg writable, '
- 'more space on image). Defaults to False.'))
- parser.add_argument(
- '--debug',
- dest='debug',
- default=False,
- action='store_true',
- help=('Optional. Build chrome browser with "-g -O0". '
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--chromeos_root",
+ dest="chromeos_root",
+ help="Target directory for ChromeOS installation.",
+ )
+ parser.add_argument(
+ "--clobber_chroot",
+ dest="clobber_chroot",
+ action="store_true",
+ help="Delete the chroot and start fresh",
+ default=False,
+ )
+ parser.add_argument(
+ "--clobber_board",
+ dest="clobber_board",
+ action="store_true",
+ help="Delete the board and start fresh",
+ default=False,
+ )
+ parser.add_argument(
+ "--rebuild",
+ dest="rebuild",
+ action="store_true",
+ help="Rebuild all board packages except the toolchain.",
+ default=False,
+ )
+ parser.add_argument(
+ "--cflags",
+ dest="cflags",
+ default="",
+ help="CFLAGS for the ChromeOS packages",
+ )
+ parser.add_argument(
+ "--cxxflags",
+ dest="cxxflags",
+ default="",
+ help="CXXFLAGS for the ChromeOS packages",
+ )
+ parser.add_argument(
+ "--ldflags",
+ dest="ldflags",
+ default="",
+ help="LDFLAGS for the ChromeOS packages",
+ )
+ parser.add_argument(
+ "--board", dest="board", help="ChromeOS target board, e.g. x86-generic"
+ )
+ parser.add_argument(
+ "--package", dest="package", help="The package needs to be built"
+ )
+ parser.add_argument(
+ "--label",
+ dest="label",
+ help="Optional label symlink to point to build dir.",
+ )
+ parser.add_argument(
+ "--dev",
+ dest="dev",
+ default=False,
+ action="store_true",
+ help=(
+ "Make the final image in dev mode (eg writable, "
+ "more space on image). Defaults to False."
+ ),
+ )
+ parser.add_argument(
+ "--debug",
+ dest="debug",
+ default=False,
+ action="store_true",
+ help=(
+ 'Optional. Build chrome browser with "-g -O0". '
"Notice, this also turns on '--dev'. "
- 'Defaults to False.'))
- parser.add_argument(
- '--env', dest='env', default='', help='Env to pass to build_packages.')
- parser.add_argument(
- '--vanilla',
- dest='vanilla',
- default=False,
- action='store_true',
- help='Use default ChromeOS toolchain.')
- parser.add_argument(
- '--vanilla_image',
- dest='vanilla_image',
- default=False,
- action='store_true',
- help=('Use prebuild packages for building the image. '
- 'It also implies the --vanilla option is set.'))
+ "Defaults to False."
+ ),
+ )
+ parser.add_argument(
+ "--env", dest="env", default="", help="Env to pass to build_packages."
+ )
+ parser.add_argument(
+ "--vanilla",
+ dest="vanilla",
+ default=False,
+ action="store_true",
+ help="Use default ChromeOS toolchain.",
+ )
+ parser.add_argument(
+ "--vanilla_image",
+ dest="vanilla_image",
+ default=False,
+ action="store_true",
+ help=(
+ "Use prebuild packages for building the image. "
+ "It also implies the --vanilla option is set."
+ ),
+ )
- options = parser.parse_args(argv[1:])
+ options = parser.parse_args(argv[1:])
- if options.chromeos_root is None:
- Usage(parser, '--chromeos_root must be set')
- options.chromeos_root = os.path.expanduser(options.chromeos_root)
- scripts_dir = os.path.join(options.chromeos_root, 'src', 'scripts')
- if not os.path.isdir(scripts_dir):
- Usage(parser, '--chromeos_root must be set up first. Use setup_chromeos.py')
+ if options.chromeos_root is None:
+ Usage(parser, "--chromeos_root must be set")
+ options.chromeos_root = os.path.expanduser(options.chromeos_root)
+ scripts_dir = os.path.join(options.chromeos_root, "src", "scripts")
+ if not os.path.isdir(scripts_dir):
+ Usage(
+ parser,
+ "--chromeos_root must be set up first. Use setup_chromeos.py",
+ )
- if options.board is None:
- Usage(parser, '--board must be set')
+ if options.board is None:
+ Usage(parser, "--board must be set")
- if options.debug:
- options.dev = True
+ if options.debug:
+ options.dev = True
- build_packages_env = options.env
- if build_packages_env.find('EXTRA_BOARD_FLAGS=') != -1:
- logger.GetLogger().LogFatal(
- ('Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. '
- 'This flags is used internally by this script. '
- 'Contact the author for more detail.'))
+ build_packages_env = options.env
+ if build_packages_env.find("EXTRA_BOARD_FLAGS=") != -1:
+ logger.GetLogger().LogFatal(
+ (
+ 'Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. '
+ "This flags is used internally by this script. "
+ "Contact the author for more detail."
+ )
+ )
- if options.rebuild:
- build_packages_env += ' EXTRA_BOARD_FLAGS=-e'
- # EXTRA_BOARD_FLAGS=-e should clean up the object files for the chrome
- # browser but it doesn't. So do it here.
- misc.RemoveChromeBrowserObjectFiles(options.chromeos_root, options.board)
+ if options.rebuild:
+ build_packages_env += " EXTRA_BOARD_FLAGS=-e"
+ # EXTRA_BOARD_FLAGS=-e should clean up the object files for the chrome
+ # browser but it doesn't. So do it here.
+ misc.RemoveChromeBrowserObjectFiles(
+ options.chromeos_root, options.board
+ )
- # Build with afdo_use by default.
- # To change the default use --env="USE=-afdo_use".
- build_packages_env = misc.MergeEnvStringWithDict(
- build_packages_env, {'USE': 'chrome_internal afdo_use -cros-debug'})
+ # Build with afdo_use by default.
+ # To change the default use --env="USE=-afdo_use".
+ build_packages_env = misc.MergeEnvStringWithDict(
+ build_packages_env, {"USE": "chrome_internal afdo_use -cros-debug"}
+ )
- build_packages_command = misc.GetBuildPackagesCommand(
- board=options.board, usepkg=options.vanilla_image, debug=options.debug)
+ build_packages_command = misc.GetBuildPackagesCommand(
+ board=options.board, usepkg=options.vanilla_image, debug=options.debug
+ )
- if options.package:
- build_packages_command += ' {0}'.format(options.package)
+ if options.package:
+ build_packages_command += " {0}".format(options.package)
- build_image_command = misc.GetBuildImageCommand(options.board, options.dev)
+ build_image_command = misc.GetBuildImageCommand(options.board, options.dev)
- if options.vanilla or options.vanilla_image:
- command = misc.GetSetupBoardCommand(
- options.board,
- usepkg=options.vanilla_image,
- force=options.clobber_board)
- command += '; ' + build_packages_env + ' ' + build_packages_command
- command += '&& ' + build_packages_env + ' ' + build_image_command
- ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
- return ret
+ if options.vanilla or options.vanilla_image:
+ command = misc.GetSetupBoardCommand(
+ options.board,
+ usepkg=options.vanilla_image,
+ force=options.clobber_board,
+ )
+ command += "; " + build_packages_env + " " + build_packages_command
+ command += "&& " + build_packages_env + " " + build_image_command
+ ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
+ return ret
- # Setup board
- if not os.path.isdir(options.chromeos_root + '/chroot/build/' +
- options.board) or options.clobber_board:
- # Run build_tc.py from binary package
+ # Setup board
+ if (
+ not os.path.isdir(
+ options.chromeos_root + "/chroot/build/" + options.board
+ )
+ or options.clobber_board
+ ):
+ # Run build_tc.py from binary package
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root,
+ misc.GetSetupBoardCommand(
+ options.board, force=options.clobber_board
+ ),
+ )
+ logger.GetLogger().LogFatalIf(ret, "setup_board failed")
+ else:
+ logger.GetLogger().LogOutput(
+ "Did not setup_board " "because it already exists"
+ )
+
+ if options.debug:
+ # Perform 2-step build_packages to build a debug chrome browser.
+
+ # Firstly, build everything that chromeos-chrome depends on normally.
+ if options.rebuild:
+ # Give warning about "--rebuild" and "--debug". Under this combination,
+ # only dependencies of "chromeos-chrome" get rebuilt.
+ logger.GetLogger().LogWarning(
+ '--rebuild" does not correctly re-build every package when '
+ '"--debug" is enabled. '
+ )
+
+ # Replace EXTRA_BOARD_FLAGS=-e with "-e --onlydeps"
+ build_packages_env = build_packages_env.replace(
+ "EXTRA_BOARD_FLAGS=-e", 'EXTRA_BOARD_FLAGS="-e --onlydeps"'
+ )
+ else:
+ build_packages_env += " EXTRA_BOARD_FLAGS=--onlydeps"
+
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root,
+ 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
+ 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" '
+ 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" '
+ "CHROME_ORIGIN=SERVER_SOURCE "
+ "%s "
+ "%s --skip_chroot_upgrade"
+ "chromeos-chrome"
+ % (
+ options.board,
+ options.cflags,
+ options.board,
+ options.cxxflags,
+ options.board,
+ options.ldflags,
+ build_packages_env,
+ build_packages_command,
+ ),
+ )
+
+ logger.GetLogger().LogFatalIf(
+ ret,
+ "build_packages failed while trying to build chromeos-chrome deps.",
+ )
+
+ # Secondly, build chromeos-chrome using debug mode.
+ # Replace '--onlydeps' with '--nodeps'.
+ if options.rebuild:
+ build_packages_env = build_packages_env.replace(
+ 'EXTRA_BOARD_FLAGS="-e --onlydeps"',
+ "EXTRA_BOARD_FLAGS=--nodeps",
+ )
+ else:
+ build_packages_env = build_packages_env.replace(
+ "EXTRA_BOARD_FLAGS=--onlydeps", "EXTRA_BOARD_FLAGS=--nodeps"
+ )
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root,
+ 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
+ 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" '
+ 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" '
+ "CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug "
+ "%s "
+ "%s --skip_chroot_upgrade"
+ "chromeos-chrome"
+ % (
+ options.board,
+ options.cflags,
+ options.board,
+ options.cxxflags,
+ options.board,
+ options.ldflags,
+ build_packages_env,
+ build_packages_command,
+ ),
+ )
+ logger.GetLogger().LogFatalIf(
+ ret,
+ "build_packages failed while trying to build debug chromeos-chrome.",
+ )
+
+ # Now, we have built chromeos-chrome and all dependencies.
+ # Finally, remove '-e' from EXTRA_BOARD_FLAGS,
+ # otherwise, chromeos-chrome gets rebuilt.
+ build_packages_env = build_packages_env.replace(
+ "EXTRA_BOARD_FLAGS=--nodeps", ""
+ )
+
+ # Up to now, we have a debug built chromos-chrome browser.
+ # Fall through to build the rest of the world.
+
+ # Build packages
ret = cmd_executer.ChrootRunCommand(
options.chromeos_root,
- misc.GetSetupBoardCommand(options.board, force=options.clobber_board))
- logger.GetLogger().LogFatalIf(ret, 'setup_board failed')
- else:
- logger.GetLogger().LogOutput('Did not setup_board '
- 'because it already exists')
-
- if options.debug:
- # Perform 2-step build_packages to build a debug chrome browser.
-
- # Firstly, build everything that chromeos-chrome depends on normally.
- if options.rebuild:
- # Give warning about "--rebuild" and "--debug". Under this combination,
- # only dependencies of "chromeos-chrome" get rebuilt.
- logger.GetLogger().LogWarning(
- '--rebuild" does not correctly re-build every package when '
- '"--debug" is enabled. ')
-
- # Replace EXTRA_BOARD_FLAGS=-e with "-e --onlydeps"
- build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS=-e', 'EXTRA_BOARD_FLAGS="-e --onlydeps"')
- else:
- build_packages_env += ' EXTRA_BOARD_FLAGS=--onlydeps'
-
- ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
+ 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" '
'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" '
- 'CHROME_ORIGIN=SERVER_SOURCE '
- '%s '
- '%s --skip_chroot_upgrade'
- 'chromeos-chrome' % (options.board, options.cflags, options.board,
- options.cxxflags, options.board, options.ldflags,
- build_packages_env, build_packages_command))
+ "CHROME_ORIGIN=SERVER_SOURCE "
+ "%s "
+ "%s --skip_chroot_upgrade"
+ % (
+ options.board,
+ options.cflags,
+ options.board,
+ options.cxxflags,
+ options.board,
+ options.ldflags,
+ build_packages_env,
+ build_packages_command,
+ ),
+ )
- logger.GetLogger().LogFatalIf(\
- ret, 'build_packages failed while trying to build chromeos-chrome deps.')
-
- # Secondly, build chromeos-chrome using debug mode.
- # Replace '--onlydeps' with '--nodeps'.
- if options.rebuild:
- build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS="-e --onlydeps"', 'EXTRA_BOARD_FLAGS=--nodeps')
- else:
- build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS=--onlydeps', 'EXTRA_BOARD_FLAGS=--nodeps')
+ logger.GetLogger().LogFatalIf(ret, "build_packages failed")
+ if options.package:
+ return 0
+ # Build image
ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
- 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" '
- 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" '
- 'CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug '
- '%s '
- '%s --skip_chroot_upgrade'
- 'chromeos-chrome' % (options.board, options.cflags, options.board,
- options.cxxflags, options.board, options.ldflags,
- build_packages_env, build_packages_command))
- logger.GetLogger().LogFatalIf(
- ret,
- 'build_packages failed while trying to build debug chromeos-chrome.')
+ options.chromeos_root, build_packages_env + " " + build_image_command
+ )
- # Now, we have built chromeos-chrome and all dependencies.
- # Finally, remove '-e' from EXTRA_BOARD_FLAGS,
- # otherwise, chromeos-chrome gets rebuilt.
- build_packages_env = build_packages_env.replace(\
- 'EXTRA_BOARD_FLAGS=--nodeps', '')
+ logger.GetLogger().LogFatalIf(ret, "build_image failed")
- # Up to now, we have a debug built chromos-chrome browser.
- # Fall through to build the rest of the world.
+ flags_file_name = "flags.txt"
+ flags_file_path = "%s/src/build/images/%s/latest/%s" % (
+ options.chromeos_root,
+ options.board,
+ flags_file_name,
+ )
+ with open(flags_file_path, "w", encoding="utf-8") as flags_file:
+ flags_file.write("CFLAGS=%s\n" % options.cflags)
+ flags_file.write("CXXFLAGS=%s\n" % options.cxxflags)
+ flags_file.write("LDFLAGS=%s\n" % options.ldflags)
- # Build packages
- ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root, 'CFLAGS="$(portageq-%s envvar CFLAGS) %s" '
- 'CXXFLAGS="$(portageq-%s envvar CXXFLAGS) %s" '
- 'LDFLAGS="$(portageq-%s envvar LDFLAGS) %s" '
- 'CHROME_ORIGIN=SERVER_SOURCE '
- '%s '
- '%s --skip_chroot_upgrade' %
- (options.board, options.cflags, options.board, options.cxxflags,
- options.board, options.ldflags, build_packages_env,
- build_packages_command))
+ if options.label:
+ image_dir_path = "%s/src/build/images/%s/latest" % (
+ options.chromeos_root,
+ options.board,
+ )
+ real_image_dir_path = os.path.realpath(image_dir_path)
+ command = "ln -sf -T %s %s/%s" % (
+ os.path.basename(real_image_dir_path),
+ os.path.dirname(real_image_dir_path),
+ options.label,
+ )
- logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
- if options.package:
- return 0
- # Build image
- ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root, build_packages_env + ' ' + build_image_command)
+ ret = cmd_executer.RunCommand(command)
+ logger.GetLogger().LogFatalIf(
+ ret, "Failed to apply symlink label %s" % options.label
+ )
- logger.GetLogger().LogFatalIf(ret, 'build_image failed')
-
- flags_file_name = 'flags.txt'
- flags_file_path = ('%s/src/build/images/%s/latest/%s' %
- (options.chromeos_root, options.board, flags_file_name))
- with open(flags_file_path, 'w', encoding='utf-8') as flags_file:
- flags_file.write('CFLAGS=%s\n' % options.cflags)
- flags_file.write('CXXFLAGS=%s\n' % options.cxxflags)
- flags_file.write('LDFLAGS=%s\n' % options.ldflags)
-
- if options.label:
- image_dir_path = ('%s/src/build/images/%s/latest' % (options.chromeos_root,
- options.board))
- real_image_dir_path = os.path.realpath(image_dir_path)
- command = ('ln -sf -T %s %s/%s' % (os.path.basename(real_image_dir_path),
- os.path.dirname(real_image_dir_path),
- options.label))
-
- ret = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(
- ret, 'Failed to apply symlink label %s' % options.label)
-
- return ret
+ return ret
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/build_tc.py b/build_tc.py
index 9b90f55..08f80e6 100755
--- a/build_tc.py
+++ b/build_tc.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2010 The Chromium OS Authors. All rights reserved.
+# Copyright 2010 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,9 +9,8 @@
This script sets up the toolchain if you give it the gcctools directory.
"""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
import argparse
import getpass
@@ -19,336 +18,400 @@
import sys
import tempfile
-import tc_enter_chroot
from cros_utils import command_executer
from cros_utils import constants
from cros_utils import misc
+import tc_enter_chroot
class ToolchainPart(object):
- """Class to hold the toolchain pieces."""
+ """Class to hold the toolchain pieces."""
- def __init__(self,
- name,
- source_path,
- chromeos_root,
- board,
- incremental,
- build_env,
- gcc_enable_ccache=False):
- self._name = name
- self._source_path = misc.CanonicalizePath(source_path)
- self._chromeos_root = chromeos_root
- self._board = board
- self._ctarget = misc.GetCtargetFromBoard(self._board, self._chromeos_root)
- self._gcc_libs_dest = misc.GetGccLibsDestForBoard(self._board,
- self._chromeos_root)
- self.tag = '%s-%s' % (name, self._ctarget)
- self._ce = command_executer.GetCommandExecuter()
- self._mask_file = os.path.join(
- self._chromeos_root, 'chroot',
- 'etc/portage/package.mask/cross-%s' % self._ctarget)
- self._new_mask_file = None
+ def __init__(
+ self,
+ name,
+ source_path,
+ chromeos_root,
+ board,
+ incremental,
+ build_env,
+ gcc_enable_ccache=False,
+ ):
+ self._name = name
+ self._source_path = misc.CanonicalizePath(source_path)
+ self._chromeos_root = chromeos_root
+ self._board = board
+ self._ctarget = misc.GetCtargetFromBoard(
+ self._board, self._chromeos_root
+ )
+ self._gcc_libs_dest = misc.GetGccLibsDestForBoard(
+ self._board, self._chromeos_root
+ )
+ self.tag = "%s-%s" % (name, self._ctarget)
+ self._ce = command_executer.GetCommandExecuter()
+ self._mask_file = os.path.join(
+ self._chromeos_root,
+ "chroot",
+ "etc/portage/package.mask/cross-%s" % self._ctarget,
+ )
+ self._new_mask_file = None
- self._chroot_source_path = os.path.join(constants.MOUNTED_TOOLCHAIN_ROOT,
- self._name).lstrip('/')
- self._incremental = incremental
- self._build_env = build_env
- self._gcc_enable_ccache = gcc_enable_ccache
+ self._chroot_source_path = os.path.join(
+ constants.MOUNTED_TOOLCHAIN_ROOT, self._name
+ ).lstrip("/")
+ self._incremental = incremental
+ self._build_env = build_env
+ self._gcc_enable_ccache = gcc_enable_ccache
- def RunSetupBoardIfNecessary(self):
- cross_symlink = os.path.join(self._chromeos_root, 'chroot',
- 'usr/local/bin/emerge-%s' % self._board)
- if not os.path.exists(cross_symlink):
- command = 'setup_board --board=%s' % self._board
- self._ce.ChrootRunCommand(self._chromeos_root, command)
+ def RunSetupBoardIfNecessary(self):
+ cross_symlink = os.path.join(
+ self._chromeos_root,
+ "chroot",
+ "usr/local/bin/emerge-%s" % self._board,
+ )
+ if not os.path.exists(cross_symlink):
+ command = "setup_board --board=%s" % self._board
+ self._ce.ChrootRunCommand(self._chromeos_root, command)
- def Build(self):
- rv = 1
- try:
- self.UninstallTool()
- self.MoveMaskFile()
- self.MountSources(False)
- self.RemoveCompiledFile()
- rv = self.BuildTool()
- finally:
- self.UnMoveMaskFile()
- return rv
+ def Build(self):
+ rv = 1
+ try:
+ self.UninstallTool()
+ self.MoveMaskFile()
+ self.MountSources(False)
+ self.RemoveCompiledFile()
+ rv = self.BuildTool()
+ finally:
+ self.UnMoveMaskFile()
+ return rv
- def RemoveCompiledFile(self):
- compiled_file = os.path.join(self._chromeos_root, 'chroot',
- 'var/tmp/portage/cross-%s' % self._ctarget,
- '%s-9999' % self._name, '.compiled')
- command = 'rm -f %s' % compiled_file
- self._ce.RunCommand(command)
+ def RemoveCompiledFile(self):
+ compiled_file = os.path.join(
+ self._chromeos_root,
+ "chroot",
+ "var/tmp/portage/cross-%s" % self._ctarget,
+ "%s-9999" % self._name,
+ ".compiled",
+ )
+ command = "rm -f %s" % compiled_file
+ self._ce.RunCommand(command)
- def MountSources(self, unmount_source):
- mount_points = []
- mounted_source_path = os.path.join(self._chromeos_root, 'chroot',
- self._chroot_source_path)
- src_mp = tc_enter_chroot.MountPoint(self._source_path, mounted_source_path,
- getpass.getuser(), 'ro')
- mount_points.append(src_mp)
+ def MountSources(self, unmount_source):
+ mount_points = []
+ mounted_source_path = os.path.join(
+ self._chromeos_root, "chroot", self._chroot_source_path
+ )
+ src_mp = tc_enter_chroot.MountPoint(
+ self._source_path, mounted_source_path, getpass.getuser(), "ro"
+ )
+ mount_points.append(src_mp)
- build_suffix = 'build-%s' % self._ctarget
- build_dir = '%s-%s' % (self._source_path, build_suffix)
+ build_suffix = "build-%s" % self._ctarget
+ build_dir = "%s-%s" % (self._source_path, build_suffix)
- if not self._incremental and os.path.exists(build_dir):
- command = 'rm -rf %s/*' % build_dir
- self._ce.RunCommand(command)
+ if not self._incremental and os.path.exists(build_dir):
+ command = "rm -rf %s/*" % build_dir
+ self._ce.RunCommand(command)
- # Create a -build directory for the objects.
- command = 'mkdir -p %s' % build_dir
- self._ce.RunCommand(command)
+ # Create a -build directory for the objects.
+ command = "mkdir -p %s" % build_dir
+ self._ce.RunCommand(command)
- mounted_build_dir = os.path.join(
- self._chromeos_root, 'chroot',
- '%s-%s' % (self._chroot_source_path, build_suffix))
- build_mp = tc_enter_chroot.MountPoint(build_dir, mounted_build_dir,
- getpass.getuser())
- mount_points.append(build_mp)
+ mounted_build_dir = os.path.join(
+ self._chromeos_root,
+ "chroot",
+ "%s-%s" % (self._chroot_source_path, build_suffix),
+ )
+ build_mp = tc_enter_chroot.MountPoint(
+ build_dir, mounted_build_dir, getpass.getuser()
+ )
+ mount_points.append(build_mp)
- if unmount_source:
- unmount_statuses = [mp.UnMount() == 0 for mp in mount_points]
- assert all(unmount_statuses), 'Could not unmount all mount points!'
- else:
- mount_statuses = [mp.DoMount() == 0 for mp in mount_points]
+ if unmount_source:
+ unmount_statuses = [mp.UnMount() == 0 for mp in mount_points]
+ assert all(unmount_statuses), "Could not unmount all mount points!"
+ else:
+ mount_statuses = [mp.DoMount() == 0 for mp in mount_points]
- if not all(mount_statuses):
- mounted = [
- mp for mp, status in zip(mount_points, mount_statuses) if status
- ]
- unmount_statuses = [mp.UnMount() == 0 for mp in mounted]
- assert all(unmount_statuses), 'Could not unmount all mount points!'
+ if not all(mount_statuses):
+ mounted = [
+ mp
+ for mp, status in zip(mount_points, mount_statuses)
+ if status
+ ]
+ unmount_statuses = [mp.UnMount() == 0 for mp in mounted]
+ assert all(
+ unmount_statuses
+ ), "Could not unmount all mount points!"
- def UninstallTool(self):
- command = 'sudo CLEAN_DELAY=0 emerge -C cross-%s/%s' % (self._ctarget,
- self._name)
- self._ce.ChrootRunCommand(self._chromeos_root, command)
+ def UninstallTool(self):
+ command = "sudo CLEAN_DELAY=0 emerge -C cross-%s/%s" % (
+ self._ctarget,
+ self._name,
+ )
+ self._ce.ChrootRunCommand(self._chromeos_root, command)
- def BuildTool(self):
- env = self._build_env
- # FEATURES=buildpkg adds minutes of time so we disable it.
- # TODO(shenhan): keep '-sandbox' for a while for compatibility, then remove
- # it after a while.
- features = ('nostrip userpriv userfetch -usersandbox -sandbox noclean '
- '-buildpkg')
- env['FEATURES'] = features
+ def BuildTool(self):
+ env = self._build_env
+ # FEATURES=buildpkg adds minutes of time so we disable it.
+ # TODO(shenhan): keep '-sandbox' for a while for compatibility, then remove
+ # it after a while.
+ features = (
+ "nostrip userpriv userfetch -usersandbox -sandbox noclean "
+ "-buildpkg"
+ )
+ env["FEATURES"] = features
- if self._incremental:
- env['FEATURES'] += ' keepwork'
+ if self._incremental:
+ env["FEATURES"] += " keepwork"
- if 'USE' in env:
- env['USE'] += ' multislot mounted_%s' % self._name
- else:
- env['USE'] = 'multislot mounted_%s' % self._name
+ if "USE" in env:
+ env["USE"] += " multislot mounted_%s" % self._name
+ else:
+ env["USE"] = "multislot mounted_%s" % self._name
- # Disable ccache in our compilers. cache may be problematic for us.
- # It ignores compiler environments settings and it is not clear if
- # the cache hit algorithm verifies all the compiler binaries or
- # just the driver.
- if self._name == 'gcc' and not self._gcc_enable_ccache:
- env['USE'] += ' -wrapper_ccache'
+ # Disable ccache in our compilers. cache may be problematic for us.
+ # It ignores compiler environments settings and it is not clear if
+ # the cache hit algorithm verifies all the compiler binaries or
+ # just the driver.
+ if self._name == "gcc" and not self._gcc_enable_ccache:
+ env["USE"] += " -wrapper_ccache"
- env['%s_SOURCE_PATH' % self._name.upper()] = (
- os.path.join('/', self._chroot_source_path))
- env['ACCEPT_KEYWORDS'] = '~*'
- env_string = ' '.join(['%s="%s"' % var for var in env.items()])
- command = 'emerge =cross-%s/%s-9999' % (self._ctarget, self._name)
- full_command = 'sudo %s %s' % (env_string, command)
- rv = self._ce.ChrootRunCommand(self._chromeos_root, full_command)
- if rv != 0:
- return rv
- if self._name == 'gcc':
- command = ('sudo cp -r /usr/lib/gcc/%s %s' % (self._ctarget,
- self._gcc_libs_dest))
- rv = self._ce.ChrootRunCommand(self._chromeos_root, command)
- return rv
+ env["%s_SOURCE_PATH" % self._name.upper()] = os.path.join(
+ "/", self._chroot_source_path
+ )
+ env["ACCEPT_KEYWORDS"] = "~*"
+ env_string = " ".join(['%s="%s"' % var for var in env.items()])
+ command = "emerge =cross-%s/%s-9999" % (self._ctarget, self._name)
+ full_command = "sudo %s %s" % (env_string, command)
+ rv = self._ce.ChrootRunCommand(self._chromeos_root, full_command)
+ if rv != 0:
+ return rv
+ if self._name == "gcc":
+ command = "sudo cp -r /usr/lib/gcc/%s %s" % (
+ self._ctarget,
+ self._gcc_libs_dest,
+ )
+ rv = self._ce.ChrootRunCommand(self._chromeos_root, command)
+ return rv
- def MoveMaskFile(self):
- self._new_mask_file = None
- if os.path.isfile(self._mask_file):
- self._new_mask_file = tempfile.mktemp()
- command = 'sudo mv %s %s' % (self._mask_file, self._new_mask_file)
- self._ce.RunCommand(command)
+ def MoveMaskFile(self):
+ self._new_mask_file = None
+ if os.path.isfile(self._mask_file):
+ self._new_mask_file = tempfile.mktemp()
+ command = "sudo mv %s %s" % (self._mask_file, self._new_mask_file)
+ self._ce.RunCommand(command)
- def UnMoveMaskFile(self):
- if self._new_mask_file:
- command = 'sudo mv %s %s' % (self._new_mask_file, self._mask_file)
- self._ce.RunCommand(command)
+ def UnMoveMaskFile(self):
+ if self._new_mask_file:
+ command = "sudo mv %s %s" % (self._new_mask_file, self._mask_file)
+ self._ce.RunCommand(command)
def Main(argv):
- """The main function."""
- # Common initializations
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-c',
- '--chromeos_root',
- dest='chromeos_root',
- default='../../',
- help=('ChromeOS root checkout directory'
- ' uses ../.. if none given.'))
- parser.add_argument(
- '-g',
- '--gcc_dir',
- dest='gcc_dir',
- help='The directory where gcc resides.')
- parser.add_argument(
- '--binutils_dir',
- dest='binutils_dir',
- help='The directory where binutils resides.')
- parser.add_argument(
- '-x',
- '--gdb_dir',
- dest='gdb_dir',
- help='The directory where gdb resides.')
- parser.add_argument(
- '-b',
- '--board',
- dest='board',
- default='x86-alex',
- help='The target board.')
- parser.add_argument(
- '-n',
- '--noincremental',
- dest='noincremental',
- default=False,
- action='store_true',
- help='Use FEATURES=keepwork to do incremental builds.')
- parser.add_argument(
- '--cflags',
- dest='cflags',
- default='',
- help='Build a compiler with specified CFLAGS')
- parser.add_argument(
- '--cxxflags',
- dest='cxxflags',
- default='',
- help='Build a compiler with specified CXXFLAGS')
- parser.add_argument(
- '--cflags_for_target',
- dest='cflags_for_target',
- default='',
- help='Build the target libraries with specified flags')
- parser.add_argument(
- '--cxxflags_for_target',
- dest='cxxflags_for_target',
- default='',
- help='Build the target libraries with specified flags')
- parser.add_argument(
- '--ldflags',
- dest='ldflags',
- default='',
- help='Build a compiler with specified LDFLAGS')
- parser.add_argument(
- '-d',
- '--debug',
- dest='debug',
- default=False,
- action='store_true',
- help='Build a compiler with -g3 -O0 appended to both'
- ' CFLAGS and CXXFLAGS.')
- parser.add_argument(
- '-m',
- '--mount_only',
- dest='mount_only',
- default=False,
- action='store_true',
- help='Just mount the tool directories.')
- parser.add_argument(
- '-u',
- '--unmount_only',
- dest='unmount_only',
- default=False,
- action='store_true',
- help='Just unmount the tool directories.')
- parser.add_argument(
- '--extra_use_flags',
- dest='extra_use_flags',
- default='',
- help='Extra flag for USE, to be passed to the ebuild. '
- "('multislot' and 'mounted_<tool>' are always passed.)")
- parser.add_argument(
- '--gcc_enable_ccache',
- dest='gcc_enable_ccache',
- default=False,
- action='store_true',
- help='Enable ccache for the gcc invocations')
+ """The main function."""
+ # Common initializations
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-c",
+ "--chromeos_root",
+ dest="chromeos_root",
+ default="../../",
+ help=("ChromeOS root checkout directory" " uses ../.. if none given."),
+ )
+ parser.add_argument(
+ "-g",
+ "--gcc_dir",
+ dest="gcc_dir",
+ help="The directory where gcc resides.",
+ )
+ parser.add_argument(
+ "--binutils_dir",
+ dest="binutils_dir",
+ help="The directory where binutils resides.",
+ )
+ parser.add_argument(
+ "-x",
+ "--gdb_dir",
+ dest="gdb_dir",
+ help="The directory where gdb resides.",
+ )
+ parser.add_argument(
+ "-b",
+ "--board",
+ dest="board",
+ default="x86-alex",
+ help="The target board.",
+ )
+ parser.add_argument(
+ "-n",
+ "--noincremental",
+ dest="noincremental",
+ default=False,
+ action="store_true",
+ help="Use FEATURES=keepwork to do incremental builds.",
+ )
+ parser.add_argument(
+ "--cflags",
+ dest="cflags",
+ default="",
+ help="Build a compiler with specified CFLAGS",
+ )
+ parser.add_argument(
+ "--cxxflags",
+ dest="cxxflags",
+ default="",
+ help="Build a compiler with specified CXXFLAGS",
+ )
+ parser.add_argument(
+ "--cflags_for_target",
+ dest="cflags_for_target",
+ default="",
+ help="Build the target libraries with specified flags",
+ )
+ parser.add_argument(
+ "--cxxflags_for_target",
+ dest="cxxflags_for_target",
+ default="",
+ help="Build the target libraries with specified flags",
+ )
+ parser.add_argument(
+ "--ldflags",
+ dest="ldflags",
+ default="",
+ help="Build a compiler with specified LDFLAGS",
+ )
+ parser.add_argument(
+ "-d",
+ "--debug",
+ dest="debug",
+ default=False,
+ action="store_true",
+ help="Build a compiler with -g3 -O0 appended to both"
+ " CFLAGS and CXXFLAGS.",
+ )
+ parser.add_argument(
+ "-m",
+ "--mount_only",
+ dest="mount_only",
+ default=False,
+ action="store_true",
+ help="Just mount the tool directories.",
+ )
+ parser.add_argument(
+ "-u",
+ "--unmount_only",
+ dest="unmount_only",
+ default=False,
+ action="store_true",
+ help="Just unmount the tool directories.",
+ )
+ parser.add_argument(
+ "--extra_use_flags",
+ dest="extra_use_flags",
+ default="",
+ help="Extra flag for USE, to be passed to the ebuild. "
+ "('multislot' and 'mounted_<tool>' are always passed.)",
+ )
+ parser.add_argument(
+ "--gcc_enable_ccache",
+ dest="gcc_enable_ccache",
+ default=False,
+ action="store_true",
+ help="Enable ccache for the gcc invocations",
+ )
- options = parser.parse_args(argv)
+ options = parser.parse_args(argv)
- chromeos_root = misc.CanonicalizePath(options.chromeos_root)
- if options.gcc_dir:
- gcc_dir = misc.CanonicalizePath(options.gcc_dir)
- assert gcc_dir and os.path.isdir(gcc_dir), 'gcc_dir does not exist!'
- if options.binutils_dir:
- binutils_dir = misc.CanonicalizePath(options.binutils_dir)
- assert os.path.isdir(binutils_dir), 'binutils_dir does not exist!'
- if options.gdb_dir:
- gdb_dir = misc.CanonicalizePath(options.gdb_dir)
- assert os.path.isdir(gdb_dir), 'gdb_dir does not exist!'
- if options.unmount_only:
- options.mount_only = False
- elif options.mount_only:
- options.unmount_only = False
- build_env = {}
- if options.cflags:
- build_env['CFLAGS'] = '`portageq envvar CFLAGS` ' + options.cflags
- if options.cxxflags:
- build_env['CXXFLAGS'] = '`portageq envvar CXXFLAGS` ' + options.cxxflags
- if options.cflags_for_target:
- build_env['CFLAGS_FOR_TARGET'] = options.cflags_for_target
- if options.cxxflags_for_target:
- build_env['CXXFLAGS_FOR_TARGET'] = options.cxxflags_for_target
- if options.ldflags:
- build_env['LDFLAGS'] = options.ldflags
- if options.debug:
- debug_flags = '-g3 -O0'
- if 'CFLAGS' in build_env:
- build_env['CFLAGS'] += ' %s' % (debug_flags)
- else:
- build_env['CFLAGS'] = debug_flags
- if 'CXXFLAGS' in build_env:
- build_env['CXXFLAGS'] += ' %s' % (debug_flags)
- else:
- build_env['CXXFLAGS'] = debug_flags
- if options.extra_use_flags:
- build_env['USE'] = options.extra_use_flags
-
- # Create toolchain parts
- toolchain_parts = {}
- for board in options.board.split(','):
+ chromeos_root = misc.CanonicalizePath(options.chromeos_root)
if options.gcc_dir:
- tp = ToolchainPart('gcc', gcc_dir, chromeos_root, board,
- not options.noincremental, build_env,
- options.gcc_enable_ccache)
- toolchain_parts[tp.tag] = tp
- tp.RunSetupBoardIfNecessary()
+ gcc_dir = misc.CanonicalizePath(options.gcc_dir)
+ assert gcc_dir and os.path.isdir(gcc_dir), "gcc_dir does not exist!"
if options.binutils_dir:
- tp = ToolchainPart('binutils', binutils_dir, chromeos_root, board,
- not options.noincremental, build_env)
- toolchain_parts[tp.tag] = tp
- tp.RunSetupBoardIfNecessary()
+ binutils_dir = misc.CanonicalizePath(options.binutils_dir)
+ assert os.path.isdir(binutils_dir), "binutils_dir does not exist!"
if options.gdb_dir:
- tp = ToolchainPart('gdb', gdb_dir, chromeos_root, board,
- not options.noincremental, build_env)
- toolchain_parts[tp.tag] = tp
- tp.RunSetupBoardIfNecessary()
+ gdb_dir = misc.CanonicalizePath(options.gdb_dir)
+ assert os.path.isdir(gdb_dir), "gdb_dir does not exist!"
+ if options.unmount_only:
+ options.mount_only = False
+ elif options.mount_only:
+ options.unmount_only = False
+ build_env = {}
+ if options.cflags:
+ build_env["CFLAGS"] = "`portageq envvar CFLAGS` " + options.cflags
+ if options.cxxflags:
+ build_env["CXXFLAGS"] = "`portageq envvar CXXFLAGS` " + options.cxxflags
+ if options.cflags_for_target:
+ build_env["CFLAGS_FOR_TARGET"] = options.cflags_for_target
+ if options.cxxflags_for_target:
+ build_env["CXXFLAGS_FOR_TARGET"] = options.cxxflags_for_target
+ if options.ldflags:
+ build_env["LDFLAGS"] = options.ldflags
+ if options.debug:
+ debug_flags = "-g3 -O0"
+ if "CFLAGS" in build_env:
+ build_env["CFLAGS"] += " %s" % (debug_flags)
+ else:
+ build_env["CFLAGS"] = debug_flags
+ if "CXXFLAGS" in build_env:
+ build_env["CXXFLAGS"] += " %s" % (debug_flags)
+ else:
+ build_env["CXXFLAGS"] = debug_flags
+ if options.extra_use_flags:
+ build_env["USE"] = options.extra_use_flags
- rv = 0
- try:
- for tag in toolchain_parts:
- tp = toolchain_parts[tag]
- if options.mount_only or options.unmount_only:
- tp.MountSources(options.unmount_only)
- else:
- rv = rv + tp.Build()
- finally:
- print('Exiting...')
- return rv
+ # Create toolchain parts
+ toolchain_parts = {}
+ for board in options.board.split(","):
+ if options.gcc_dir:
+ tp = ToolchainPart(
+ "gcc",
+ gcc_dir,
+ chromeos_root,
+ board,
+ not options.noincremental,
+ build_env,
+ options.gcc_enable_ccache,
+ )
+ toolchain_parts[tp.tag] = tp
+ tp.RunSetupBoardIfNecessary()
+ if options.binutils_dir:
+ tp = ToolchainPart(
+ "binutils",
+ binutils_dir,
+ chromeos_root,
+ board,
+ not options.noincremental,
+ build_env,
+ )
+ toolchain_parts[tp.tag] = tp
+ tp.RunSetupBoardIfNecessary()
+ if options.gdb_dir:
+ tp = ToolchainPart(
+ "gdb",
+ gdb_dir,
+ chromeos_root,
+ board,
+ not options.noincremental,
+ build_env,
+ )
+ toolchain_parts[tp.tag] = tp
+ tp.RunSetupBoardIfNecessary()
+
+ rv = 0
+ try:
+ for tag in toolchain_parts:
+ tp = toolchain_parts[tag]
+ if options.mount_only or options.unmount_only:
+ tp.MountSources(options.unmount_only)
+ else:
+ rv = rv + tp.Build()
+ finally:
+ print("Exiting...")
+ return rv
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/buildbot_test_llvm.py b/buildbot_test_llvm.py
index 1c7bb19..57f029c 100755
--- a/buildbot_test_llvm.py
+++ b/buildbot_test_llvm.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -16,7 +16,6 @@
# Script to test different toolchains against ChromeOS benchmarks.
-from __future__ import print_function
import argparse
import datetime
@@ -24,19 +23,19 @@
import sys
import time
+from cros_utils import buildbot_utils
from cros_utils import command_executer
from cros_utils import logger
-from cros_utils import buildbot_utils
-CROSTC_ROOT = '/usr/local/google/crostc'
-ROLE_ACCOUNT = 'mobiletc-prebuild'
+CROSTC_ROOT = "/usr/local/google/crostc"
+ROLE_ACCOUNT = "mobiletc-prebuild"
TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__))
-MAIL_PROGRAM = '~/var/bin/mail-detective'
-VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, 'validation_result')
+MAIL_PROGRAM = "~/var/bin/mail-detective"
+VALIDATION_RESULT_DIR = os.path.join(CROSTC_ROOT, "validation_result")
START_DATE = datetime.date(2016, 1, 1)
TEST_PER_DAY = 4
-DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/'
+DATA_DIR = "/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/"
# Information about Rotating Boards
# Board Arch Reference Platform Kernel
@@ -63,137 +62,157 @@
# winky x86_64 rambi baytrail 4.4.*
TEST_BOARD = [
- 'atlas',
- 'cave',
- 'coral',
- 'cyan',
- 'elm',
+ "atlas",
+ "cave",
+ "coral",
+ "cyan",
+ "elm",
# 'eve', tested by amd64-llvm-next-toolchain builder.
- 'gale',
- 'grunt',
- 'fizz-moblab',
+ "gale",
+ "grunt",
+ "fizz-moblab",
# 'kevin', tested by arm64-llvm-next-toolchain builder.
- 'kevin64',
- 'lakitu',
- 'nyan_kitty',
- 'octopus',
- 'sentry',
- 'tidus',
+ "kevin64",
+ "lakitu",
+ "nyan_kitty",
+ "octopus",
+ "sentry",
+ "tidus",
# 'veyron_mighty', tested by arm-llvm-next-toolchain builder.
- 'whirlwind',
- 'winky',
+ "whirlwind",
+ "winky",
]
class ToolchainVerifier(object):
- """Class for the toolchain verifier."""
+ """Class for the toolchain verifier."""
- def __init__(self, board, chromeos_root, weekday, patches, compiler):
- self._board = board
- self._chromeos_root = chromeos_root
- self._base_dir = os.getcwd()
- self._ce = command_executer.GetCommandExecuter()
- self._l = logger.GetLogger()
- self._compiler = compiler
- self._build = '%s-%s-toolchain-tryjob' % (board, compiler)
- self._patches = patches.split(',') if patches else []
- self._patches_string = '_'.join(str(p) for p in self._patches)
+ def __init__(self, board, chromeos_root, weekday, patches, compiler):
+ self._board = board
+ self._chromeos_root = chromeos_root
+ self._base_dir = os.getcwd()
+ self._ce = command_executer.GetCommandExecuter()
+ self._l = logger.GetLogger()
+ self._compiler = compiler
+ self._build = "%s-%s-toolchain-tryjob" % (board, compiler)
+ self._patches = patches.split(",") if patches else []
+ self._patches_string = "_".join(str(p) for p in self._patches)
- if not weekday:
- self._weekday = time.strftime('%a')
- else:
- self._weekday = weekday
- self._reports = os.path.join(VALIDATION_RESULT_DIR, compiler, board)
+ if not weekday:
+ self._weekday = time.strftime("%a")
+ else:
+ self._weekday = weekday
+ self._reports = os.path.join(VALIDATION_RESULT_DIR, compiler, board)
- def DoAll(self):
- """Main function inside ToolchainComparator class.
+ def DoAll(self):
+ """Main function inside ToolchainComparator class.
- Launch trybot, get image names, create crosperf experiment file, run
- crosperf, and copy images into seven-day report directories.
- """
- buildbucket_id, _ = buildbot_utils.GetTrybotImage(
- self._chromeos_root,
- self._build,
- self._patches,
- tryjob_flags=['--hwtest'],
- asynchronous=True)
+ Launch trybot, get image names, create crosperf experiment file, run
+ crosperf, and copy images into seven-day report directories.
+ """
+ buildbucket_id, _ = buildbot_utils.GetTrybotImage(
+ self._chromeos_root,
+ self._build,
+ self._patches,
+ tryjob_flags=["--hwtest"],
+ asynchronous=True,
+ )
- return buildbucket_id
+ return buildbucket_id
def WriteRotatingReportsData(results_dict, date):
- """Write data for waterfall report."""
- fname = '%d-%02d-%02d.builds' % (date.year, date.month, date.day)
- filename = os.path.join(DATA_DIR, 'rotating-builders', fname)
- with open(filename, 'w', encoding='utf-8') as out_file:
- for board in results_dict.keys():
- buildbucket_id = results_dict[board]
- out_file.write('%s,%s\n' % (buildbucket_id, board))
+ """Write data for waterfall report."""
+ fname = "%d-%02d-%02d.builds" % (date.year, date.month, date.day)
+ filename = os.path.join(DATA_DIR, "rotating-builders", fname)
+ with open(filename, "w", encoding="utf-8") as out_file:
+ for board in results_dict.keys():
+ buildbucket_id = results_dict[board]
+ out_file.write("%s,%s\n" % (buildbucket_id, board))
def Main(argv):
- """The main function."""
+ """The main function."""
- # Common initializations
- command_executer.InitCommandExecuter()
- parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- dest='chromeos_root',
- help='The chromeos root from which to run tests.')
- parser.add_argument('--weekday',
- default='',
- dest='weekday',
- help='The day of the week for which to run tests.')
- parser.add_argument('--board',
- default='',
- dest='board',
- help='The board to test.')
- parser.add_argument('--patch',
- dest='patches',
- default='',
- help='The patches to use for the testing, '
- "seprate the patch numbers with ',' "
- 'for more than one patches.')
- parser.add_argument(
- '--compiler',
- dest='compiler',
- help='Which compiler (llvm, llvm-next or gcc) to use for '
- 'testing.')
+ # Common initializations
+ command_executer.InitCommandExecuter()
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--chromeos_root",
+ dest="chromeos_root",
+ help="The chromeos root from which to run tests.",
+ )
+ parser.add_argument(
+ "--weekday",
+ default="",
+ dest="weekday",
+ help="The day of the week for which to run tests.",
+ )
+ parser.add_argument(
+ "--board", default="", dest="board", help="The board to test."
+ )
+ parser.add_argument(
+ "--patch",
+ dest="patches",
+ default="",
+ help="The patches to use for the testing, "
+ "seprate the patch numbers with ',' "
+ "for more than one patches.",
+ )
+ parser.add_argument(
+ "--compiler",
+ dest="compiler",
+ help="Which compiler (llvm, llvm-next or gcc) to use for " "testing.",
+ )
- options = parser.parse_args(argv[1:])
- if not options.chromeos_root:
- print('Please specify the ChromeOS root directory.')
- return 1
- if not options.compiler:
- print('Please specify which compiler to test (gcc, llvm, or llvm-next).')
- return 1
+ options = parser.parse_args(argv[1:])
+ if not options.chromeos_root:
+ print("Please specify the ChromeOS root directory.")
+ return 1
+ if not options.compiler:
+ print(
+ "Please specify which compiler to test (gcc, llvm, or llvm-next)."
+ )
+ return 1
- if options.board:
- fv = ToolchainVerifier(options.board, options.chromeos_root,
- options.weekday, options.patches, options.compiler)
- return fv.DoAll()
+ if options.board:
+ fv = ToolchainVerifier(
+ options.board,
+ options.chromeos_root,
+ options.weekday,
+ options.patches,
+ options.compiler,
+ )
+ return fv.DoAll()
- today = datetime.date.today()
- delta = today - START_DATE
- days = delta.days
+ today = datetime.date.today()
+ delta = today - START_DATE
+ days = delta.days
- start_board = (days * TEST_PER_DAY) % len(TEST_BOARD)
- results_dict = dict()
- for i in range(TEST_PER_DAY):
- try:
- board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)]
- fv = ToolchainVerifier(board, options.chromeos_root, options.weekday,
- options.patches, options.compiler)
- buildbucket_id = fv.DoAll()
- if buildbucket_id:
- results_dict[board] = buildbucket_id
- except SystemExit:
- logfile = os.path.join(VALIDATION_RESULT_DIR, options.compiler, board)
- with open(logfile, 'w', encoding='utf-8') as f:
- f.write('Verifier got an exception, please check the log.\n')
- WriteRotatingReportsData(results_dict, today)
+ start_board = (days * TEST_PER_DAY) % len(TEST_BOARD)
+ results_dict = dict()
+ for i in range(TEST_PER_DAY):
+ try:
+ board = TEST_BOARD[(start_board + i) % len(TEST_BOARD)]
+ fv = ToolchainVerifier(
+ board,
+ options.chromeos_root,
+ options.weekday,
+ options.patches,
+ options.compiler,
+ )
+ buildbucket_id = fv.DoAll()
+ if buildbucket_id:
+ results_dict[board] = buildbucket_id
+ except SystemExit:
+ logfile = os.path.join(
+ VALIDATION_RESULT_DIR, options.compiler, board
+ )
+ with open(logfile, "w", encoding="utf-8") as f:
+ f.write("Verifier got an exception, please check the log.\n")
+ WriteRotatingReportsData(results_dict, today)
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index 6c3bfef..19c31b5 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -16,7 +16,6 @@
# Script to test different toolchains against ChromeOS benchmarks.
-from __future__ import print_function
import argparse
import datetime
@@ -26,142 +25,148 @@
import sys
import time
+from cros_utils import buildbot_utils
from cros_utils import command_executer
from cros_utils import logger
-from cros_utils import buildbot_utils
-# CL that uses LLVM-Next to build the images (includes chrome).
-USE_LLVM_NEXT_PATCH = '513590'
-
-CROSTC_ROOT = '/usr/local/google/crostc'
-NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, 'nightly-tests')
-ROLE_ACCOUNT = 'mobiletc-prebuild'
+CROSTC_ROOT = "/usr/local/google/crostc"
+NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, "nightly-tests")
+ROLE_ACCOUNT = "mobiletc-prebuild"
TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__))
-TMP_TOOLCHAIN_TEST = '/tmp/toolchain-tests'
-MAIL_PROGRAM = '~/var/bin/mail-detective'
-PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, 'pending_archives')
-NIGHTLY_TESTS_RESULTS = os.path.join(CROSTC_ROOT, 'nightly_test_reports')
+TMP_TOOLCHAIN_TEST = "/tmp/toolchain-tests"
+MAIL_PROGRAM = "~/var/bin/mail-detective"
+PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, "pending_archives")
+NIGHTLY_TESTS_RESULTS = os.path.join(CROSTC_ROOT, "nightly_test_reports")
-IMAGE_DIR = '{board}-{image_type}'
-IMAGE_VERSION_STR = r'{chrome_version}-{tip}\.{branch}\.{branch_branch}'
-IMAGE_FS = IMAGE_DIR + '/' + IMAGE_VERSION_STR
-TRYBOT_IMAGE_FS = IMAGE_FS + '-{build_id}'
+IMAGE_DIR = "{board}-{image_type}"
+IMAGE_VERSION_STR = r"{chrome_version}-{tip}\.{branch}\.{branch_branch}"
+IMAGE_FS = IMAGE_DIR + "/" + IMAGE_VERSION_STR
+TRYBOT_IMAGE_FS = IMAGE_FS + "-{build_id}"
IMAGE_RE_GROUPS = {
- 'board': r'(?P<board>\S+)',
- 'image_type': r'(?P<image_type>\S+)',
- 'chrome_version': r'(?P<chrome_version>R\d+)',
- 'tip': r'(?P<tip>\d+)',
- 'branch': r'(?P<branch>\d+)',
- 'branch_branch': r'(?P<branch_branch>\d+)',
- 'build_id': r'(?P<build_id>b\d+)'
+ "board": r"(?P<board>\S+)",
+ "image_type": r"(?P<image_type>\S+)",
+ "chrome_version": r"(?P<chrome_version>R\d+)",
+ "tip": r"(?P<tip>\d+)",
+ "branch": r"(?P<branch>\d+)",
+ "branch_branch": r"(?P<branch_branch>\d+)",
+ "build_id": r"(?P<build_id>b\d+)",
}
TRYBOT_IMAGE_RE = TRYBOT_IMAGE_FS.format(**IMAGE_RE_GROUPS)
-RECIPE_IMAGE_FS = IMAGE_FS + '-{build_id}-{buildbucket_id}'
+RECIPE_IMAGE_FS = IMAGE_FS + "-{build_id}-{buildbucket_id}"
RECIPE_IMAGE_RE_GROUPS = {
- 'board': r'(?P<board>\S+)',
- 'image_type': r'(?P<image_type>\S+)',
- 'chrome_version': r'(?P<chrome_version>R\d+)',
- 'tip': r'(?P<tip>\d+)',
- 'branch': r'(?P<branch>\d+)',
- 'branch_branch': r'(?P<branch_branch>\d+)',
- 'build_id': r'(?P<build_id>\d+)',
- 'buildbucket_id': r'(?P<buildbucket_id>\d+)'
+ "board": r"(?P<board>\S+)",
+ "image_type": r"(?P<image_type>\S+)",
+ "chrome_version": r"(?P<chrome_version>R\d+)",
+ "tip": r"(?P<tip>\d+)",
+ "branch": r"(?P<branch>\d+)",
+ "branch_branch": r"(?P<branch_branch>\d+)",
+ "build_id": r"(?P<build_id>\d+)",
+ "buildbucket_id": r"(?P<buildbucket_id>\d+)",
}
RECIPE_IMAGE_RE = RECIPE_IMAGE_FS.format(**RECIPE_IMAGE_RE_GROUPS)
-TELEMETRY_AQUARIUM_UNSUPPORTED = ['bob', 'elm', 'veyron_tiger']
+# CL that uses LLVM-Next to build the images (includes chrome).
+USE_LLVM_NEXT_PATCH = "513590"
class ToolchainComparator(object):
- """Class for doing the nightly tests work."""
+ """Class for doing the nightly tests work."""
- def __init__(self,
- board,
- remotes,
- chromeos_root,
- weekday,
- patches,
- recipe=False,
- test=False,
- noschedv2=False):
- self._board = board
- self._remotes = remotes
- self._chromeos_root = chromeos_root
- self._base_dir = os.getcwd()
- self._ce = command_executer.GetCommandExecuter()
- self._l = logger.GetLogger()
- self._build = '%s-release-tryjob' % board
- self._patches = patches.split(',') if patches else []
- self._patches_string = '_'.join(str(p) for p in self._patches)
- self._recipe = recipe
- self._test = test
- self._noschedv2 = noschedv2
+ def __init__(
+ self,
+ board,
+ remotes,
+ chromeos_root,
+ weekday,
+ patches,
+ recipe=False,
+ test=False,
+ noschedv2=False,
+ ):
+ self._board = board
+ self._remotes = remotes
+ self._chromeos_root = chromeos_root
+ self._base_dir = os.getcwd()
+ self._ce = command_executer.GetCommandExecuter()
+ self._l = logger.GetLogger()
+ self._build = "%s-release-tryjob" % board
+ self._patches = patches.split(",") if patches else []
+ self._patches_string = "_".join(str(p) for p in self._patches)
+ self._recipe = recipe
+ self._test = test
+ self._noschedv2 = noschedv2
- if not weekday:
- self._weekday = time.strftime('%a')
- else:
- self._weekday = weekday
- self._date = datetime.date.today().strftime('%Y/%m/%d')
- timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
- self._reports_dir = os.path.join(
- TMP_TOOLCHAIN_TEST if self._test else NIGHTLY_TESTS_RESULTS,
- '%s.%s' % (timestamp, board),
- )
+ if not weekday:
+ self._weekday = time.strftime("%a")
+ else:
+ self._weekday = weekday
+ self._date = datetime.date.today().strftime("%Y/%m/%d")
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
+ self._reports_dir = os.path.join(
+ TMP_TOOLCHAIN_TEST if self._test else NIGHTLY_TESTS_RESULTS,
+ "%s.%s" % (timestamp, board),
+ )
- def _GetVanillaImageName(self, trybot_image):
- """Given a trybot artifact name, get latest vanilla image name.
+ def _GetVanillaImageName(self, trybot_image):
+ """Given a trybot artifact name, get latest vanilla image name.
- Args:
- trybot_image: artifact name such as
- 'daisy-release-tryjob/R40-6394.0.0-b1389'
- for recipe images, name is in this format:
- 'lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032/'
+ Args:
+ trybot_image: artifact name such as
+ 'daisy-release-tryjob/R40-6394.0.0-b1389'
+ for recipe images, name is in this format:
+ 'lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032/'
- Returns:
- Latest official image name, e.g. 'daisy-release/R57-9089.0.0'.
- """
- # For board names with underscores, we need to fix the trybot image name
- # to replace the hyphen (for the recipe builder) with the underscore.
- # Currently the only such board we use is 'veyron_tiger'.
- if trybot_image.find('veyron-tiger') != -1:
- trybot_image = trybot_image.replace('veyron-tiger', 'veyron_tiger')
- # We need to filter out -tryjob in the trybot_image.
- if self._recipe:
- trybot = re.sub('-llvm-next-nightly', '-release', trybot_image)
- mo = re.search(RECIPE_IMAGE_RE, trybot)
- else:
- trybot = re.sub('-tryjob', '', trybot_image)
- mo = re.search(TRYBOT_IMAGE_RE, trybot)
- assert mo
- dirname = IMAGE_DIR.replace('\\', '').format(**mo.groupdict())
- return buildbot_utils.GetLatestImage(self._chromeos_root, dirname)
+ Returns:
+ Latest official image name, e.g. 'daisy-release/R57-9089.0.0'.
+ """
+ # For board names with underscores, we need to fix the trybot image name
+ # to replace the hyphen (for the recipe builder) with the underscore.
+ # Currently the only such board we use is 'veyron_tiger'.
+ if trybot_image.find("veyron-tiger") != -1:
+ trybot_image = trybot_image.replace("veyron-tiger", "veyron_tiger")
+ # We need to filter out -tryjob in the trybot_image.
+ if self._recipe:
+ trybot = re.sub("-llvm-next-nightly", "-release", trybot_image)
+ mo = re.search(RECIPE_IMAGE_RE, trybot)
+ else:
+ trybot = re.sub("-tryjob", "", trybot_image)
+ mo = re.search(TRYBOT_IMAGE_RE, trybot)
+ assert mo
+ dirname = IMAGE_DIR.replace("\\", "").format(**mo.groupdict())
+ return buildbot_utils.GetLatestImage(self._chromeos_root, dirname)
- def _TestImages(self, trybot_image, vanilla_image):
- """Create crosperf experiment file.
+ def _TestImages(self, trybot_image, vanilla_image):
+ """Create crosperf experiment file.
- Given the names of the trybot, vanilla and non-AFDO images, create the
- appropriate crosperf experiment file and launch crosperf on it.
- """
- if self._test:
- experiment_file_dir = TMP_TOOLCHAIN_TEST
- else:
- experiment_file_dir = os.path.join(NIGHTLY_TESTS_DIR, self._weekday)
- experiment_file_name = '%s_toolchain_experiment.txt' % self._board
+ Given the names of the trybot, vanilla and non-AFDO images, create the
+ appropriate crosperf experiment file and launch crosperf on it.
+ """
+ if self._test:
+ experiment_file_dir = TMP_TOOLCHAIN_TEST
+ else:
+ experiment_file_dir = os.path.join(NIGHTLY_TESTS_DIR, self._weekday)
+ experiment_file_name = "%s_toolchain_experiment.txt" % self._board
- compiler_string = 'llvm'
- if USE_LLVM_NEXT_PATCH in self._patches_string:
- experiment_file_name = '%s_llvm_next_experiment.txt' % self._board
- compiler_string = 'llvm_next'
+ compiler_string = "llvm"
+ if USE_LLVM_NEXT_PATCH in self._patches_string:
+ experiment_file_name = "%s_llvm_next_experiment.txt" % self._board
+ compiler_string = "llvm_next"
- experiment_file = os.path.join(experiment_file_dir, experiment_file_name)
- experiment_header = """
+ experiment_file = os.path.join(
+ experiment_file_dir, experiment_file_name
+ )
+ experiment_header = """
board: %s
remote: %s
retries: 1
- """ % (self._board, self._remotes)
- experiment_tests = """
+ """ % (
+ self._board,
+ self._remotes,
+ )
+ # TODO(b/244607231): Add graphic benchmarks removed in crrev.com/c/3869851.
+ experiment_tests = """
benchmark: all_toolchain_perf {
suite: telemetry_Crosperf
iterations: 5
@@ -176,206 +181,227 @@
retries: 0
}
"""
- telemetry_aquarium_tests = """
- benchmark: rendering.desktop {
- run_local: False
- suite: telemetry_Crosperf
- test_args: --story-filter=aquarium$
- iterations: 5
- }
- benchmark: rendering.desktop {
- run_local: False
- suite: telemetry_Crosperf
- test_args: --story-filter=aquarium_20k$
- iterations: 3
- }
- """
+ with open(experiment_file, "w", encoding="utf-8") as f:
+ f.write(experiment_header)
+ f.write(experiment_tests)
- with open(experiment_file, 'w', encoding='utf-8') as f:
- f.write(experiment_header)
- f.write(experiment_tests)
-
- if self._board not in TELEMETRY_AQUARIUM_UNSUPPORTED:
- f.write(telemetry_aquarium_tests)
-
- # Now add vanilla to test file.
- official_image = """
+ # Now add vanilla to test file.
+ official_image = """
vanilla_image {
chromeos_root: %s
build: %s
compiler: llvm
}
- """ % (self._chromeos_root, vanilla_image)
- f.write(official_image)
+ """ % (
+ self._chromeos_root,
+ vanilla_image,
+ )
+ f.write(official_image)
- label_string = '%s_trybot_image' % compiler_string
+ label_string = "%s_trybot_image" % compiler_string
- # Reuse autotest files from vanilla image for trybot images
- autotest_files = os.path.join('/tmp', vanilla_image, 'autotest_files')
- experiment_image = """
+ # Reuse autotest files from vanilla image for trybot images
+ autotest_files = os.path.join(
+ "/tmp", vanilla_image, "autotest_files"
+ )
+ experiment_image = """
%s {
chromeos_root: %s
build: %s
autotest_path: %s
compiler: %s
}
- """ % (label_string, self._chromeos_root, trybot_image, autotest_files,
- compiler_string)
- f.write(experiment_image)
+ """ % (
+ label_string,
+ self._chromeos_root,
+ trybot_image,
+ autotest_files,
+ compiler_string,
+ )
+ f.write(experiment_image)
- crosperf = os.path.join(TOOLCHAIN_DIR, 'crosperf', 'crosperf')
- noschedv2_opts = '--noschedv2' if self._noschedv2 else ''
- command = ('{crosperf} --no_email={no_email} --results_dir={r_dir} '
- '--logging_level=verbose --json_report=True {noschedv2_opts} '
- '{exp_file}').format(crosperf=crosperf,
- no_email=not self._test,
- r_dir=self._reports_dir,
- noschedv2_opts=noschedv2_opts,
- exp_file=experiment_file)
+ crosperf = os.path.join(TOOLCHAIN_DIR, "crosperf", "crosperf")
+ noschedv2_opts = "--noschedv2" if self._noschedv2 else ""
+ no_email = not self._test
+ command = (
+ f"{crosperf} --no_email={no_email} "
+ f"--results_dir={self._reports_dir} --logging_level=verbose "
+ f"--json_report=True {noschedv2_opts} {experiment_file}"
+ )
- return self._ce.RunCommand(command)
+ return self._ce.RunCommand(command)
- def _SendEmail(self):
- """Find email message generated by crosperf and send it."""
- filename = os.path.join(self._reports_dir, 'msg_body.html')
- if (os.path.exists(filename)
- and os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
- email_title = 'buildbot llvm test results'
- if USE_LLVM_NEXT_PATCH in self._patches_string:
- email_title = 'buildbot llvm_next test results'
- command = (
- 'cat %s | %s -s "%s, %s %s" -team -html' %
- (filename, MAIL_PROGRAM, email_title, self._board, self._date))
- self._ce.RunCommand(command)
+ def _SendEmail(self):
+ """Find email message generated by crosperf and send it."""
+ filename = os.path.join(self._reports_dir, "msg_body.html")
+ if os.path.exists(filename) and os.path.exists(
+ os.path.expanduser(MAIL_PROGRAM)
+ ):
+ email_title = "buildbot llvm test results"
+ if USE_LLVM_NEXT_PATCH in self._patches_string:
+ email_title = "buildbot llvm_next test results"
+ command = 'cat %s | %s -s "%s, %s %s" -team -html' % (
+ filename,
+ MAIL_PROGRAM,
+ email_title,
+ self._board,
+ self._date,
+ )
+ self._ce.RunCommand(command)
- def _CopyJson(self):
- # Make sure a destination directory exists.
- os.makedirs(PENDING_ARCHIVES_DIR, exist_ok=True)
- # Copy json report to pending archives directory.
- command = 'cp %s/*.json %s/.' % (self._reports_dir, PENDING_ARCHIVES_DIR)
- ret = self._ce.RunCommand(command)
- # Failing to access json report means that crosperf terminated or all tests
- # failed, raise an error.
- if ret != 0:
- raise RuntimeError(
- 'Crosperf failed to run tests, cannot copy json report!')
+ def _CopyJson(self):
+ # Make sure a destination directory exists.
+ os.makedirs(PENDING_ARCHIVES_DIR, exist_ok=True)
+ # Copy json report to pending archives directory.
+ command = "cp %s/*.json %s/." % (
+ self._reports_dir,
+ PENDING_ARCHIVES_DIR,
+ )
+ ret = self._ce.RunCommand(command)
+ # Failing to access json report means that crosperf terminated or all tests
+ # failed, raise an error.
+ if ret != 0:
+ raise RuntimeError(
+ "Crosperf failed to run tests, cannot copy json report!"
+ )
- def DoAll(self):
- """Main function inside ToolchainComparator class.
+ def DoAll(self):
+ """Main function inside ToolchainComparator class.
- Launch trybot, get image names, create crosperf experiment file, run
- crosperf, and copy images into seven-day report directories.
- """
- if self._recipe:
- print('Using recipe buckets to get latest image.')
- # crbug.com/1077313: Some boards are not consistently
- # spelled, having underscores in some places and dashes in others.
- # The image directories consistenly use dashes, so convert underscores
- # to dashes to work around this.
- trybot_image = buildbot_utils.GetLatestRecipeImage(
- self._chromeos_root,
- '%s-llvm-next-nightly' % self._board.replace('_', '-'))
- else:
- # Launch tryjob and wait to get image location.
- buildbucket_id, trybot_image = buildbot_utils.GetTrybotImage(
- self._chromeos_root,
- self._build,
- self._patches,
- tryjob_flags=['--notests'],
- build_toolchain=True)
- print('trybot_url: \
- http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=%s'
- % buildbucket_id)
+ Launch trybot, get image names, create crosperf experiment file, run
+ crosperf, and copy images into seven-day report directories.
+ """
+ if self._recipe:
+ print("Using recipe buckets to get latest image.")
+ # crbug.com/1077313: Some boards are not consistently
+ # spelled, having underscores in some places and dashes in others.
+ # The image directories consistenly use dashes, so convert underscores
+ # to dashes to work around this.
+ trybot_image = buildbot_utils.GetLatestRecipeImage(
+ self._chromeos_root,
+ "%s-llvm-next-nightly" % self._board.replace("_", "-"),
+ )
+ else:
+ # Launch tryjob and wait to get image location.
+ buildbucket_id, trybot_image = buildbot_utils.GetTrybotImage(
+ self._chromeos_root,
+ self._build,
+ self._patches,
+ tryjob_flags=["--notests"],
+ build_toolchain=True,
+ )
+ print(
+ "trybot_url: \
+ http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=%s"
+ % buildbucket_id
+ )
- if not trybot_image:
- self._l.LogError('Unable to find trybot_image!')
- return 2
+ if not trybot_image:
+ self._l.LogError("Unable to find trybot_image!")
+ return 2
- vanilla_image = self._GetVanillaImageName(trybot_image)
+ vanilla_image = self._GetVanillaImageName(trybot_image)
- print('trybot_image: %s' % trybot_image)
- print('vanilla_image: %s' % vanilla_image)
+ print("trybot_image: %s" % trybot_image)
+ print("vanilla_image: %s" % vanilla_image)
- ret = self._TestImages(trybot_image, vanilla_image)
- # Always try to send report email as crosperf will generate report when
- # tests partially succeeded.
- if not self._test:
- self._SendEmail()
- self._CopyJson()
- # Non-zero ret here means crosperf tests partially failed, raise error here
- # so that toolchain summary report can catch it.
- if ret != 0:
- raise RuntimeError('Crosperf tests partially failed!')
+ ret = self._TestImages(trybot_image, vanilla_image)
+ # Always try to send report email as crosperf will generate report when
+ # tests partially succeeded.
+ if not self._test:
+ self._SendEmail()
+ self._CopyJson()
+ # Non-zero ret here means crosperf tests partially failed, raise error here
+ # so that toolchain summary report can catch it.
+ if ret != 0:
+ raise RuntimeError("Crosperf tests partially failed!")
- return 0
+ return 0
def Main(argv):
- """The main function."""
+ """The main function."""
- # Common initializations
- command_executer.InitCommandExecuter()
- parser = argparse.ArgumentParser()
- parser.add_argument('--remote',
- dest='remote',
- help='Remote machines to run tests on.')
- parser.add_argument('--board',
- dest='board',
- default='x86-zgb',
- help='The target board.')
- parser.add_argument('--chromeos_root',
- dest='chromeos_root',
- help='The chromeos root from which to run tests.')
- parser.add_argument('--weekday',
- default='',
- dest='weekday',
- help='The day of the week for which to run tests.')
- parser.add_argument('--patch',
- dest='patches',
- help='The patches to use for the testing, '
- "seprate the patch numbers with ',' "
- 'for more than one patches.')
- parser.add_argument('--noschedv2',
- dest='noschedv2',
- action='store_true',
- default=False,
- help='Pass --noschedv2 to crosperf.')
- parser.add_argument('--recipe',
- dest='recipe',
- default=True,
- help='Use images generated from recipe rather than'
- 'launching tryjob to get images.')
- parser.add_argument('--test',
- dest='test',
- default=False,
- help='Test this script on local desktop, '
- 'disabling mobiletc checking and email sending.'
- 'Artifacts stored in /tmp/toolchain-tests')
+ # Common initializations
+ command_executer.InitCommandExecuter()
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--remote", dest="remote", help="Remote machines to run tests on."
+ )
+ parser.add_argument(
+ "--board", dest="board", default="x86-zgb", help="The target board."
+ )
+ parser.add_argument(
+ "--chromeos_root",
+ dest="chromeos_root",
+ help="The chromeos root from which to run tests.",
+ )
+ parser.add_argument(
+ "--weekday",
+ default="",
+ dest="weekday",
+ help="The day of the week for which to run tests.",
+ )
+ parser.add_argument(
+ "--patch",
+ dest="patches",
+ help="The patches to use for the testing, "
+ "seprate the patch numbers with ',' "
+ "for more than one patches.",
+ )
+ parser.add_argument(
+ "--noschedv2",
+ dest="noschedv2",
+ action="store_true",
+ default=False,
+ help="Pass --noschedv2 to crosperf.",
+ )
+ parser.add_argument(
+ "--recipe",
+ dest="recipe",
+ default=True,
+ help="Use images generated from recipe rather than"
+ "launching tryjob to get images.",
+ )
+ parser.add_argument(
+ "--test",
+ dest="test",
+ default=False,
+ help="Test this script on local desktop, "
+ "disabling mobiletc checking and email sending."
+ "Artifacts stored in /tmp/toolchain-tests",
+ )
- options = parser.parse_args(argv[1:])
- if not options.board:
- print('Please give a board.')
- return 1
- if not options.remote:
- print('Please give at least one remote machine.')
- return 1
- if not options.chromeos_root:
- print('Please specify the ChromeOS root directory.')
- return 1
- if options.test:
- print('Cleaning local test directory for this script.')
- if os.path.exists(TMP_TOOLCHAIN_TEST):
- shutil.rmtree(TMP_TOOLCHAIN_TEST)
- os.mkdir(TMP_TOOLCHAIN_TEST)
+ options = parser.parse_args(argv[1:])
+ if not options.board:
+ print("Please give a board.")
+ return 1
+ if not options.remote:
+ print("Please give at least one remote machine.")
+ return 1
+ if not options.chromeos_root:
+ print("Please specify the ChromeOS root directory.")
+ return 1
+ if options.test:
+ print("Cleaning local test directory for this script.")
+ if os.path.exists(TMP_TOOLCHAIN_TEST):
+ shutil.rmtree(TMP_TOOLCHAIN_TEST)
+ os.mkdir(TMP_TOOLCHAIN_TEST)
- fc = ToolchainComparator(options.board, options.remote,
- options.chromeos_root, options.weekday,
- options.patches, options.recipe, options.test,
- options.noschedv2)
- return fc.DoAll()
+ fc = ToolchainComparator(
+ options.board,
+ options.remote,
+ options.chromeos_root,
+ options.weekday,
+ options.patches,
+ options.recipe,
+ options.test,
+ options.noschedv2,
+ )
+ return fc.DoAll()
-if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py
index 3d54100..ed840cb 100755
--- a/chromiumos_image_diff.py
+++ b/chromiumos_image_diff.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -20,9 +20,8 @@
And this script should be executed outside chroot.
"""
-from __future__ import print_function
-__author__ = 'shenhan@google.com (Han Shen)'
+__author__ = "shenhan@google.com (Han Shen)"
import argparse
import os
@@ -30,338 +29,390 @@
import sys
import tempfile
-import image_chromeos
from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
+import image_chromeos
class CrosImage(object):
- """A cros image object."""
+ """A cros image object."""
- def __init__(self, image, chromeos_root, no_unmount):
- self.image = image
- self.chromeos_root = chromeos_root
- self.mounted = False
- self._ce = command_executer.GetCommandExecuter()
- self.logger = logger.GetLogger()
- self.elf_files = []
- self.no_unmount = no_unmount
- self.unmount_script = ''
- self.stateful = ''
- self.rootfs = ''
+ def __init__(self, image, chromeos_root, no_unmount):
+ self.image = image
+ self.chromeos_root = chromeos_root
+ self.mounted = False
+ self._ce = command_executer.GetCommandExecuter()
+ self.logger = logger.GetLogger()
+ self.elf_files = []
+ self.no_unmount = no_unmount
+ self.unmount_script = ""
+ self.stateful = ""
+ self.rootfs = ""
- def MountImage(self, mount_basename):
- """Mount/unpack the image."""
+ def MountImage(self, mount_basename):
+ """Mount/unpack the image."""
- if mount_basename:
- self.rootfs = '/tmp/{0}.rootfs'.format(mount_basename)
- self.stateful = '/tmp/{0}.stateful'.format(mount_basename)
- self.unmount_script = '/tmp/{0}.unmount.sh'.format(mount_basename)
- else:
- self.rootfs = tempfile.mkdtemp(
- suffix='.rootfs', prefix='chromiumos_image_diff')
- ## rootfs is like /tmp/tmpxyz012.rootfs.
- match = re.match(r'^(.*)\.rootfs$', self.rootfs)
- basename = match.group(1)
- self.stateful = basename + '.stateful'
- os.mkdir(self.stateful)
- self.unmount_script = '{0}.unmount.sh'.format(basename)
+ if mount_basename:
+ self.rootfs = "/tmp/{0}.rootfs".format(mount_basename)
+ self.stateful = "/tmp/{0}.stateful".format(mount_basename)
+ self.unmount_script = "/tmp/{0}.unmount.sh".format(mount_basename)
+ else:
+ self.rootfs = tempfile.mkdtemp(
+ suffix=".rootfs", prefix="chromiumos_image_diff"
+ )
+ ## rootfs is like /tmp/tmpxyz012.rootfs.
+ match = re.match(r"^(.*)\.rootfs$", self.rootfs)
+ basename = match.group(1)
+ self.stateful = basename + ".stateful"
+ os.mkdir(self.stateful)
+ self.unmount_script = "{0}.unmount.sh".format(basename)
- self.logger.LogOutput('Mounting "{0}" onto "{1}" and "{2}"'.format(
- self.image, self.rootfs, self.stateful))
- ## First of all creating an unmount image
- self.CreateUnmountScript()
- command = image_chromeos.GetImageMountCommand(self.image, self.rootfs,
- self.stateful)
- rv = self._ce.RunCommand(command, print_to_console=True)
- self.mounted = (rv == 0)
- if not self.mounted:
- self.logger.LogError('Failed to mount "{0}" onto "{1}" and "{2}".'.format(
- self.image, self.rootfs, self.stateful))
- return self.mounted
+ self.logger.LogOutput(
+ 'Mounting "{0}" onto "{1}" and "{2}"'.format(
+ self.image, self.rootfs, self.stateful
+ )
+ )
+ ## First of all creating an unmount image
+ self.CreateUnmountScript()
+ command = image_chromeos.GetImageMountCommand(
+ self.image, self.rootfs, self.stateful
+ )
+ rv = self._ce.RunCommand(command, print_to_console=True)
+ self.mounted = rv == 0
+ if not self.mounted:
+ self.logger.LogError(
+ 'Failed to mount "{0}" onto "{1}" and "{2}".'.format(
+ self.image, self.rootfs, self.stateful
+ )
+ )
+ return self.mounted
- def CreateUnmountScript(self):
- command = ('sudo umount {r}/usr/local {r}/usr/share/oem '
- '{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; '
- 'rmdir {r} ; rmdir {s}\n').format(
- r=self.rootfs, s=self.stateful)
- f = open(self.unmount_script, 'w', encoding='utf-8')
- f.write(command)
- f.close()
- self._ce.RunCommand(
- 'chmod +x {}'.format(self.unmount_script), print_to_console=False)
- self.logger.LogOutput('Created an unmount script - "{0}"'.format(
- self.unmount_script))
+ def CreateUnmountScript(self):
+ command = (
+ "sudo umount {r}/usr/local {r}/usr/share/oem "
+ "{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; "
+ "rmdir {r} ; rmdir {s}\n"
+ ).format(r=self.rootfs, s=self.stateful)
+ f = open(self.unmount_script, "w", encoding="utf-8")
+ f.write(command)
+ f.close()
+ self._ce.RunCommand(
+ "chmod +x {}".format(self.unmount_script), print_to_console=False
+ )
+ self.logger.LogOutput(
+ 'Created an unmount script - "{0}"'.format(self.unmount_script)
+ )
- def UnmountImage(self):
- """Unmount the image and delete mount point."""
+ def UnmountImage(self):
+ """Unmount the image and delete mount point."""
- self.logger.LogOutput('Unmounting image "{0}" from "{1}" and "{2}"'.format(
- self.image, self.rootfs, self.stateful))
- if self.mounted:
- command = 'bash "{0}"'.format(self.unmount_script)
- if self.no_unmount:
- self.logger.LogOutput(('Please unmount manually - \n'
- '\t bash "{0}"'.format(self.unmount_script)))
- else:
- if self._ce.RunCommand(command, print_to_console=True) == 0:
- self._ce.RunCommand('rm {0}'.format(self.unmount_script))
- self.mounted = False
- self.rootfs = None
- self.stateful = None
- self.unmount_script = None
+ self.logger.LogOutput(
+ 'Unmounting image "{0}" from "{1}" and "{2}"'.format(
+ self.image, self.rootfs, self.stateful
+ )
+ )
+ if self.mounted:
+ command = 'bash "{0}"'.format(self.unmount_script)
+ if self.no_unmount:
+ self.logger.LogOutput(
+ (
+ "Please unmount manually - \n"
+ '\t bash "{0}"'.format(self.unmount_script)
+ )
+ )
+ else:
+ if self._ce.RunCommand(command, print_to_console=True) == 0:
+ self._ce.RunCommand("rm {0}".format(self.unmount_script))
+ self.mounted = False
+ self.rootfs = None
+ self.stateful = None
+ self.unmount_script = None
- return not self.mounted
+ return not self.mounted
- def FindElfFiles(self):
- """Find all elf files for the image.
+ def FindElfFiles(self):
+ """Find all elf files for the image.
- Returns:
- Always true
- """
+ Returns:
+ Always true
+ """
- self.logger.LogOutput('Finding all elf files in "{0}" ...'.format(
- self.rootfs))
- # Note '\;' must be prefixed by 'r'.
- command = ('find "{0}" -type f -exec '
- 'bash -c \'file -b "{{}}" | grep -q "ELF"\''
- r' \; '
- r'-exec echo "{{}}" \;').format(self.rootfs)
- self.logger.LogCmd(command)
- _, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
- self.elf_files = out.splitlines()
- self.logger.LogOutput('Total {0} elf files found.'.format(
- len(self.elf_files)))
- return True
+ self.logger.LogOutput(
+ 'Finding all elf files in "{0}" ...'.format(self.rootfs)
+ )
+ # Note '\;' must be prefixed by 'r'.
+ command = (
+ 'find "{0}" -type f -exec '
+ 'bash -c \'file -b "{{}}" | grep -q "ELF"\''
+ r" \; "
+ r'-exec echo "{{}}" \;'
+ ).format(self.rootfs)
+ self.logger.LogCmd(command)
+ _, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
+ self.elf_files = out.splitlines()
+ self.logger.LogOutput(
+ "Total {0} elf files found.".format(len(self.elf_files))
+ )
+ return True
class ImageComparator(object):
- """A class that wraps comparsion actions."""
+ """A class that wraps comparsion actions."""
- def __init__(self, images, diff_file):
- self.images = images
- self.logger = logger.GetLogger()
- self.diff_file = diff_file
- self.tempf1 = None
- self.tempf2 = None
+ def __init__(self, images, diff_file):
+ self.images = images
+ self.logger = logger.GetLogger()
+ self.diff_file = diff_file
+ self.tempf1 = None
+ self.tempf2 = None
- def Cleanup(self):
- if self.tempf1 and self.tempf2:
- command_executer.GetCommandExecuter().RunCommand('rm {0} {1}'.format(
- self.tempf1, self.tempf2))
- logger.GetLogger('Removed "{0}" and "{1}".'.format(
- self.tempf1, self.tempf2))
+ def Cleanup(self):
+ if self.tempf1 and self.tempf2:
+ command_executer.GetCommandExecuter().RunCommand(
+ "rm {0} {1}".format(self.tempf1, self.tempf2)
+ )
+ logger.GetLogger(
+ 'Removed "{0}" and "{1}".'.format(self.tempf1, self.tempf2)
+ )
- def CheckElfFileSetEquality(self):
- """Checking whether images have exactly number of elf files."""
+ def CheckElfFileSetEquality(self):
+ """Checking whether images have exactly number of elf files."""
- self.logger.LogOutput('Checking elf file equality ...')
- i1 = self.images[0]
- i2 = self.images[1]
- t1 = i1.rootfs + '/'
- elfset1 = {e.replace(t1, '') for e in i1.elf_files}
- t2 = i2.rootfs + '/'
- elfset2 = {e.replace(t2, '') for e in i2.elf_files}
- dif1 = elfset1.difference(elfset2)
- msg = None
- if dif1:
- msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format(
- image=i2.image, rootfs=i2.rootfs)
- for d in dif1:
- msg += '\t' + d + '\n'
- dif2 = elfset2.difference(elfset1)
- if dif2:
- msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format(
- image=i1.image, rootfs=i1.rootfs)
- for d in dif2:
- msg += '\t' + d + '\n'
- if msg:
- self.logger.LogError(msg)
- return False
- return True
+ self.logger.LogOutput("Checking elf file equality ...")
+ i1 = self.images[0]
+ i2 = self.images[1]
+ t1 = i1.rootfs + "/"
+ elfset1 = {e.replace(t1, "") for e in i1.elf_files}
+ t2 = i2.rootfs + "/"
+ elfset2 = {e.replace(t2, "") for e in i2.elf_files}
+ dif1 = elfset1.difference(elfset2)
+ msg = None
+ if dif1:
+ msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format(
+ image=i2.image, rootfs=i2.rootfs
+ )
+ for d in dif1:
+ msg += "\t" + d + "\n"
+ dif2 = elfset2.difference(elfset1)
+ if dif2:
+ msg = 'The following files are not in "{image}" - "{rootfs}":\n'.format(
+ image=i1.image, rootfs=i1.rootfs
+ )
+ for d in dif2:
+ msg += "\t" + d + "\n"
+ if msg:
+ self.logger.LogError(msg)
+ return False
+ return True
- def CompareImages(self):
- """Do the comparsion work."""
+ def CompareImages(self):
+ """Do the comparsion work."""
- if not self.CheckElfFileSetEquality():
- return False
+ if not self.CheckElfFileSetEquality():
+ return False
- mismatch_list = []
- match_count = 0
- i1 = self.images[0]
- i2 = self.images[1]
- self.logger.LogOutput('Start comparing {0} elf file by file ...'.format(
- len(i1.elf_files)))
- ## Note - i1.elf_files and i2.elf_files have exactly the same entries here.
+ mismatch_list = []
+ match_count = 0
+ i1 = self.images[0]
+ i2 = self.images[1]
+ self.logger.LogOutput(
+ "Start comparing {0} elf file by file ...".format(len(i1.elf_files))
+ )
+ ## Note - i1.elf_files and i2.elf_files have exactly the same entries here.
- ## Create 2 temp files to be used for all disassembed files.
- handle, self.tempf1 = tempfile.mkstemp()
- os.close(handle) # We do not need the handle
- handle, self.tempf2 = tempfile.mkstemp()
- os.close(handle)
+ ## Create 2 temp files to be used for all disassembed files.
+ handle, self.tempf1 = tempfile.mkstemp()
+ os.close(handle) # We do not need the handle
+ handle, self.tempf2 = tempfile.mkstemp()
+ os.close(handle)
- cmde = command_executer.GetCommandExecuter()
- for elf1 in i1.elf_files:
- tmp_rootfs = i1.rootfs + '/'
- f1 = elf1.replace(tmp_rootfs, '')
- full_path1 = elf1
- full_path2 = elf1.replace(i1.rootfs, i2.rootfs)
+ cmde = command_executer.GetCommandExecuter()
+ for elf1 in i1.elf_files:
+ tmp_rootfs = i1.rootfs + "/"
+ f1 = elf1.replace(tmp_rootfs, "")
+ full_path1 = elf1
+ full_path2 = elf1.replace(i1.rootfs, i2.rootfs)
- if full_path1 == full_path2:
- self.logger.LogError(
- "Error: We're comparing the SAME file - {0}".format(f1))
- continue
+ if full_path1 == full_path2:
+ self.logger.LogError(
+ "Error: We're comparing the SAME file - {0}".format(f1)
+ )
+ continue
- command = (
- 'objdump -d "{f1}" > {tempf1} ; '
- 'objdump -d "{f2}" > {tempf2} ; '
- # Remove path string inside the dissemble
- "sed -i 's!{rootfs1}!!g' {tempf1} ; "
- "sed -i 's!{rootfs2}!!g' {tempf2} ; "
- 'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format(
- f1=full_path1,
- f2=full_path2,
- rootfs1=i1.rootfs,
- rootfs2=i2.rootfs,
- tempf1=self.tempf1,
- tempf2=self.tempf2)
- ret = cmde.RunCommand(command, print_to_console=False)
- if ret != 0:
- self.logger.LogOutput('*** Not match - "{0}" "{1}"'.format(
- full_path1, full_path2))
- mismatch_list.append(f1)
- if self.diff_file:
- command = ('echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" '
- '>> {diff_file} ; diff {tempf1} {tempf2} '
- '>> {diff_file}').format(
- f1=full_path1,
- f2=full_path2,
- diff_file=self.diff_file,
- tempf1=self.tempf1,
- tempf2=self.tempf2)
- cmde.RunCommand(command, print_to_console=False)
- else:
- match_count += 1
- ## End of comparing every elf files.
+ command = (
+ 'objdump -d "{f1}" > {tempf1} ; '
+ 'objdump -d "{f2}" > {tempf2} ; '
+ # Remove path string inside the dissemble
+ "sed -i 's!{rootfs1}!!g' {tempf1} ; "
+ "sed -i 's!{rootfs2}!!g' {tempf2} ; "
+ "diff {tempf1} {tempf2} 1>/dev/null 2>&1"
+ ).format(
+ f1=full_path1,
+ f2=full_path2,
+ rootfs1=i1.rootfs,
+ rootfs2=i2.rootfs,
+ tempf1=self.tempf1,
+ tempf2=self.tempf2,
+ )
+ ret = cmde.RunCommand(command, print_to_console=False)
+ if ret != 0:
+ self.logger.LogOutput(
+ '*** Not match - "{0}" "{1}"'.format(full_path1, full_path2)
+ )
+ mismatch_list.append(f1)
+ if self.diff_file:
+ command = (
+ 'echo "Diffs of disassemble of "{f1}" and "{f2}"" '
+ ">> {diff_file} ; diff {tempf1} {tempf2} "
+ ">> {diff_file}"
+ ).format(
+ f1=full_path1,
+ f2=full_path2,
+ diff_file=self.diff_file,
+ tempf1=self.tempf1,
+ tempf2=self.tempf2,
+ )
+ cmde.RunCommand(command, print_to_console=False)
+ else:
+ match_count += 1
+ ## End of comparing every elf files.
- if not mismatch_list:
- self.logger.LogOutput(
- '** COOL, ALL {0} BINARIES MATCHED!! **'.format(match_count))
- return True
+ if not mismatch_list:
+ self.logger.LogOutput(
+ "** COOL, ALL {0} BINARIES MATCHED!! **".format(match_count)
+ )
+ return True
- mismatch_str = 'Found {0} mismatch:\n'.format(len(mismatch_list))
- for b in mismatch_list:
- mismatch_str += '\t' + b + '\n'
+ mismatch_str = "Found {0} mismatch:\n".format(len(mismatch_list))
+ for b in mismatch_list:
+ mismatch_str += "\t" + b + "\n"
- self.logger.LogOutput(mismatch_str)
- return False
+ self.logger.LogOutput(mismatch_str)
+ return False
def Main(argv):
- """The main function."""
+ """The main function."""
- command_executer.InitCommandExecuter()
- images = []
+ command_executer.InitCommandExecuter()
+ images = []
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--no_unmount',
- action='store_true',
- dest='no_unmount',
- default=False,
- help='Do not unmount after finish, this is useful for debugging.')
- parser.add_argument(
- '--chromeos_root',
- dest='chromeos_root',
- default=None,
- action='store',
- help=('[Optional] Specify a chromeos tree instead of '
- 'deducing it from image path so that we can compare '
- '2 images that are downloaded.'))
- parser.add_argument(
- '--mount_basename',
- dest='mount_basename',
- default=None,
- action='store',
- help=('Specify a meaningful name for the mount point. With this being '
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--no_unmount",
+ action="store_true",
+ dest="no_unmount",
+ default=False,
+ help="Do not unmount after finish, this is useful for debugging.",
+ )
+ parser.add_argument(
+ "--chromeos_root",
+ dest="chromeos_root",
+ default=None,
+ action="store",
+ help=(
+ "[Optional] Specify a chromeos tree instead of "
+ "deducing it from image path so that we can compare "
+ "2 images that are downloaded."
+ ),
+ )
+ parser.add_argument(
+ "--mount_basename",
+ dest="mount_basename",
+ default=None,
+ action="store",
+ help=(
+ "Specify a meaningful name for the mount point. With this being "
'set, the mount points would be "/tmp/mount_basename.x.rootfs" '
- ' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).'))
- parser.add_argument(
- '--diff_file',
- dest='diff_file',
- default=None,
- help='Dumping all the diffs (if any) to the diff file')
- parser.add_argument(
- '--image1',
- dest='image1',
- default=None,
- required=True,
- help=('Image 1 file name.'))
- parser.add_argument(
- '--image2',
- dest='image2',
- default=None,
- required=True,
- help=('Image 2 file name.'))
- options = parser.parse_args(argv[1:])
+ ' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).'
+ ),
+ )
+ parser.add_argument(
+ "--diff_file",
+ dest="diff_file",
+ default=None,
+ help="Dumping all the diffs (if any) to the diff file",
+ )
+ parser.add_argument(
+ "--image1",
+ dest="image1",
+ default=None,
+ required=True,
+ help=("Image 1 file name."),
+ )
+ parser.add_argument(
+ "--image2",
+ dest="image2",
+ default=None,
+ required=True,
+ help=("Image 2 file name."),
+ )
+ options = parser.parse_args(argv[1:])
- if options.mount_basename and options.mount_basename.find('/') >= 0:
- logger.GetLogger().LogError(
- '"--mount_basename" must be a name, not a path.')
- parser.print_help()
- return 1
-
- result = False
- image_comparator = None
- try:
- for i, image_path in enumerate([options.image1, options.image2], start=1):
- image_path = os.path.realpath(image_path)
- if not os.path.isfile(image_path):
- logger.GetLogger().LogError('"{0}" is not a file.'.format(image_path))
- return 1
-
- chromeos_root = None
- if options.chromeos_root:
- chromeos_root = options.chromeos_root
- else:
- ## Deduce chromeos root from image
- t = image_path
- while t != '/':
- if misc.IsChromeOsTree(t):
- break
- t = os.path.dirname(t)
- if misc.IsChromeOsTree(t):
- chromeos_root = t
-
- if not chromeos_root:
+ if options.mount_basename and options.mount_basename.find("/") >= 0:
logger.GetLogger().LogError(
- 'Please provide a valid chromeos root via --chromeos_root')
+ '"--mount_basename" must be a name, not a path.'
+ )
+ parser.print_help()
return 1
- image = CrosImage(image_path, chromeos_root, options.no_unmount)
+ result = False
+ image_comparator = None
+ try:
+ for i, image_path in enumerate(
+ [options.image1, options.image2], start=1
+ ):
+ image_path = os.path.realpath(image_path)
+ if not os.path.isfile(image_path):
+ logger.GetLogger().LogError(
+ '"{0}" is not a file.'.format(image_path)
+ )
+ return 1
- if options.mount_basename:
- mount_basename = '{basename}.{index}'.format(
- basename=options.mount_basename, index=i)
- else:
- mount_basename = None
+ chromeos_root = None
+ if options.chromeos_root:
+ chromeos_root = options.chromeos_root
+ else:
+ ## Deduce chromeos root from image
+ t = image_path
+ while t != "/":
+ if misc.IsChromeOsTree(t):
+ break
+ t = os.path.dirname(t)
+ if misc.IsChromeOsTree(t):
+ chromeos_root = t
- if image.MountImage(mount_basename):
- images.append(image)
- image.FindElfFiles()
+ if not chromeos_root:
+ logger.GetLogger().LogError(
+ "Please provide a valid chromeos root via --chromeos_root"
+ )
+ return 1
- if len(images) == 2:
- image_comparator = ImageComparator(images, options.diff_file)
- result = image_comparator.CompareImages()
- finally:
- for image in images:
- image.UnmountImage()
- if image_comparator:
- image_comparator.Cleanup()
+ image = CrosImage(image_path, chromeos_root, options.no_unmount)
- return 0 if result else 1
+ if options.mount_basename:
+ mount_basename = "{basename}.{index}".format(
+ basename=options.mount_basename, index=i
+ )
+ else:
+ mount_basename = None
+
+ if image.MountImage(mount_basename):
+ images.append(image)
+ image.FindElfFiles()
+
+ if len(images) == 2:
+ image_comparator = ImageComparator(images, options.diff_file)
+ result = image_comparator.CompareImages()
+ finally:
+ for image in images:
+ image.UnmountImage()
+ if image_comparator:
+ image_comparator.Cleanup()
+
+ return 0 if result else 1
-if __name__ == '__main__':
- Main(sys.argv)
+if __name__ == "__main__":
+ Main(sys.argv)
diff --git a/compiler_wrapper/README.md b/compiler_wrapper/README.md
index 12ae313..bb63798 100644
--- a/compiler_wrapper/README.md
+++ b/compiler_wrapper/README.md
@@ -7,7 +7,7 @@
- build: builds the actual go binary, assuming it is executed
from the folder created by `bundle.py`.
-This allows to copy the sources to a Chrome OS / Android
+This allows to copy the sources to a ChromeOS / Android
package, including the build script, and then
build from there without a dependency on toolchain-utils
itself.
@@ -24,7 +24,7 @@
Then perform the tests, e.g. build with the new compiler.
-## Updating the Wrapper for Chrome OS
+## Updating the Wrapper for ChromeOS
To update the wrapper for everyone, the new wrapper configuration must be copied
into chromiumos-overlay, and new revisions of the gcc and llvm ebuilds must be
@@ -73,3 +73,31 @@
`/usr/bin/clang_host_wrapper`
- Gcc host wrapper:
`/usr/x86_64-pc-linux-gnu/gcc-bin/10.2.0/host_wrapper`
+
+## Using the compiler wrapper to crash arbitrary compilations
+
+When Clang crashes, its output can be extremely useful. Often, it will provide
+the user with a stack trace, and messages like:
+
+```
+clang-15: unable to execute command: Illegal instruction
+clang-15: note: diagnostic msg: /tmp/clang_crash_diagnostics/foo-5420d2.c
+clang-15: note: diagnostic msg: /tmp/clang_crash_diagnostics/foo-5420d2.sh
+```
+
+Where the artifacts at `/tmp/clang_crash_diagnostics/foo-*` are a full,
+self-contained reproducer of the inputs that caused the crash in question.
+Often, such a reproducer is very valuable to have even for cases where a crash
+_doesn't_ happen (e.g., maybe Clang is now emitting an error where it used to
+not do so, and we want to bisect upstream LLVM with that info). Normally,
+collecting and crafting such a reproducer is a multi-step process, and can be
+error-prone; compile commands may rely on env vars, they may be done within
+`chroot`s, they may rely on being executed in a particular directory, they may
+rely on intermediate state, etc.
+
+Because of the usefulness of these crash reports, our wrapper supports crashing
+Clang even on files that ordinarily don't cause Clang to crash. For various
+reasons (b/236736327), this support currently requires rebuilding and
+redeploying the wrapper in order to work. That said, this could be a valuable
+tool for devs interested in creating a self-contained reproducer without having
+to manually reproduce the environment in which a particular build was performed.
diff --git a/compiler_wrapper/android_config_test.go b/compiler_wrapper/android_config_test.go
index c61490f..6c62c35 100644
--- a/compiler_wrapper/android_config_test.go
+++ b/compiler_wrapper/android_config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/bisect_flag.go b/compiler_wrapper/bisect_flag.go
index adfa8b0..2dc8daf 100644
--- a/compiler_wrapper/bisect_flag.go
+++ b/compiler_wrapper/bisect_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/bisect_flag_test.go b/compiler_wrapper/bisect_flag_test.go
index cc203a0..2071a5b 100644
--- a/compiler_wrapper/bisect_flag_test.go
+++ b/compiler_wrapper/bisect_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py
index f98b254..930c2cf 100755
--- a/compiler_wrapper/build.py
+++ b/compiler_wrapper/build.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build script that builds a binary from a bundle."""
-from __future__ import print_function
import argparse
import os.path
@@ -16,87 +15,114 @@
def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--config',
- required=True,
- choices=['cros.hardened', 'cros.nonhardened', 'cros.host', 'android'])
- parser.add_argument('--use_ccache', required=True, choices=['true', 'false'])
- parser.add_argument(
- '--use_llvm_next', required=True, choices=['true', 'false'])
- parser.add_argument('--output_file', required=True, type=str)
- parser.add_argument(
- '--static',
- choices=['true', 'false'],
- help='If true, produce a static wrapper. Autodetects a good value if '
- 'unspecified.')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--config",
+ required=True,
+ choices=["cros.hardened", "cros.nonhardened", "cros.host", "android"],
+ )
+ parser.add_argument(
+ "--use_ccache", required=True, choices=["true", "false"]
+ )
+ parser.add_argument(
+ "--version_suffix",
+ help="A string appended to the computed version of the wrapper. This "
+ "is appeneded directly without any delimiter.",
+ )
+ parser.add_argument(
+ "--use_llvm_next", required=True, choices=["true", "false"]
+ )
+ parser.add_argument("--output_file", required=True, type=str)
+ parser.add_argument(
+ "--static",
+ choices=["true", "false"],
+ help="If true, produce a static wrapper. Autodetects a good value if "
+ "unspecified.",
+ )
+ args = parser.parse_args()
- if args.static is None:
- args.static = 'cros' not in args.config
- else:
- args.static = args.static == 'true'
+ if args.static is None:
+ args.static = "cros" not in args.config
+ else:
+ args.static = args.static == "true"
- return args
+ return args
def calc_go_args(args, version, build_dir):
- ldFlags = [
- '-X',
- 'main.ConfigName=' + args.config,
- '-X',
- 'main.UseCCache=' + args.use_ccache,
- '-X',
- 'main.UseLlvmNext=' + args.use_llvm_next,
- '-X',
- 'main.Version=' + version,
- ]
+ # These seem unnecessary, and might lead to breakages with Go's ldflag
+ # parsing. Don't allow them.
+ if "'" in version:
+ raise ValueError("`version` should not contain single quotes")
- # If the wrapper is intended for Chrome OS, we need to use libc's exec.
- extra_args = []
- if not args.static:
- extra_args += ['-tags', 'libc_exec']
+ ldFlags = [
+ "-X",
+ "main.ConfigName=" + args.config,
+ "-X",
+ "main.UseCCache=" + args.use_ccache,
+ "-X",
+ "main.UseLlvmNext=" + args.use_llvm_next,
+ "-X",
+ # Quote this, as `version` may have spaces in it.
+ "'main.Version=" + version + "'",
+ ]
- if args.config == 'android':
- # If android_llvm_next_flags.go DNE, we'll get an obscure "no
- # llvmNextFlags" build error; complaining here is clearer.
- if not os.path.exists(
- os.path.join(build_dir, 'android_llvm_next_flags.go')):
- sys.exit('In order to build the Android wrapper, you must have a local '
- 'android_llvm_next_flags.go file; please see '
- 'cros_llvm_next_flags.go.')
- extra_args += ['-tags', 'android_llvm_next_flags']
+ # If the wrapper is intended for ChromeOS, we need to use libc's exec.
+ extra_args = []
+ if not args.static:
+ extra_args += ["-tags", "libc_exec"]
- return [
- 'go', 'build', '-o',
- os.path.abspath(args.output_file), '-ldflags', ' '.join(ldFlags)
- ] + extra_args
+ if args.config == "android":
+ # If android_llvm_next_flags.go DNE, we'll get an obscure "no
+ # llvmNextFlags" build error; complaining here is clearer.
+ if not os.path.exists(
+ os.path.join(build_dir, "android_llvm_next_flags.go")
+ ):
+ sys.exit(
+ "In order to build the Android wrapper, you must have a local "
+ "android_llvm_next_flags.go file; please see "
+ "cros_llvm_next_flags.go."
+ )
+ extra_args += ["-tags", "android_llvm_next_flags"]
+
+ return [
+ "go",
+ "build",
+ "-o",
+ os.path.abspath(args.output_file),
+ "-ldflags",
+ " ".join(ldFlags),
+ ] + extra_args
def read_version(build_dir):
- version_path = os.path.join(build_dir, 'VERSION')
- if os.path.exists(version_path):
- with open(version_path, 'r') as r:
- return r.read()
+ version_path = os.path.join(build_dir, "VERSION")
+ if os.path.exists(version_path):
+ with open(version_path, "r") as r:
+ return r.read()
- last_commit_msg = subprocess.check_output(
- ['git', '-C', build_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8')
- # Use last found change id to support reverts as well.
- change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg)
- if not change_ids:
- sys.exit("Couldn't find Change-Id in last commit message.")
- return change_ids[-1]
+ last_commit_msg = subprocess.check_output(
+ ["git", "-C", build_dir, "log", "-1", "--pretty=%B"], encoding="utf-8"
+ )
+ # Use last found change id to support reverts as well.
+ change_ids = re.findall(r"Change-Id: (\w+)", last_commit_msg)
+ if not change_ids:
+ sys.exit("Couldn't find Change-Id in last commit message.")
+ return change_ids[-1]
def main():
- args = parse_args()
- build_dir = os.path.dirname(__file__)
- version = read_version(build_dir)
- # Note: Go does not support using absolute package names.
- # So we run go inside the directory of the the build file.
- sys.exit(
- subprocess.call(calc_go_args(args, version, build_dir), cwd=build_dir))
+ args = parse_args()
+ build_dir = os.path.dirname(__file__)
+ version = read_version(build_dir)
+ if args.version_suffix:
+ version += args.version_suffix
+ # Note: Go does not support using absolute package names.
+ # So we run go inside the directory of the the build file.
+ sys.exit(
+ subprocess.call(calc_go_args(args, version, build_dir), cwd=build_dir)
+ )
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/compiler_wrapper/bundle.README b/compiler_wrapper/bundle.README
index 10a28ee..1ffaedd 100644
--- a/compiler_wrapper/bundle.README
+++ b/compiler_wrapper/bundle.README
@@ -1,4 +1,4 @@
-Copyright 2019 The Chromium OS Authors. All rights reserved.
+Copyright 2019 The ChromiumOS Authors
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py
index 6df8214..90386c8 100755
--- a/compiler_wrapper/bundle.py
+++ b/compiler_wrapper/bundle.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build script that copies the go sources to a build destination."""
-from __future__ import print_function
import argparse
import os.path
@@ -17,67 +16,78 @@
def parse_args():
- parser = argparse.ArgumentParser()
- default_output_dir = os.path.normpath(
- os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- '../../chromiumos-overlay/sys-devel/llvm/files/compiler_wrapper'))
- parser.add_argument(
- '--output_dir',
- default=default_output_dir,
- help='Output directory to place bundled files (default: %(default)s)')
- parser.add_argument(
- '--create',
- action='store_true',
- help='Create output_dir if it does not already exist')
- return parser.parse_args()
+ parser = argparse.ArgumentParser()
+ default_output_dir = os.path.normpath(
+ os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ "../../chromiumos-overlay/sys-devel/llvm/files/compiler_wrapper",
+ )
+ )
+ parser.add_argument(
+ "--output_dir",
+ default=default_output_dir,
+ help="Output directory to place bundled files (default: %(default)s)",
+ )
+ parser.add_argument(
+ "--create",
+ action="store_true",
+ help="Create output_dir if it does not already exist",
+ )
+ return parser.parse_args()
def copy_files(input_dir, output_dir):
- for filename in os.listdir(input_dir):
- if ((filename.endswith('.go') and not filename.endswith('_test.go')) or
- filename in ('build.py', 'go.mod')):
- shutil.copy(
- os.path.join(input_dir, filename), os.path.join(output_dir, filename))
+ for filename in os.listdir(input_dir):
+ if (
+ filename.endswith(".go") and not filename.endswith("_test.go")
+ ) or filename in ("build.py", "go.mod"):
+ shutil.copy(
+ os.path.join(input_dir, filename),
+ os.path.join(output_dir, filename),
+ )
def read_change_id(input_dir):
- last_commit_msg = subprocess.check_output(
- ['git', '-C', input_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8')
- # Use last found change id to support reverts as well.
- change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg)
- if not change_ids:
- sys.exit("Couldn't find Change-Id in last commit message.")
- return change_ids[-1]
+ last_commit_msg = subprocess.check_output(
+ ["git", "-C", input_dir, "log", "-1", "--pretty=%B"], encoding="utf-8"
+ )
+ # Use last found change id to support reverts as well.
+ change_ids = re.findall(r"Change-Id: (\w+)", last_commit_msg)
+ if not change_ids:
+ sys.exit("Couldn't find Change-Id in last commit message.")
+ return change_ids[-1]
def write_readme(input_dir, output_dir, change_id):
- with open(
- os.path.join(input_dir, 'bundle.README'), 'r', encoding='utf-8') as r:
- with open(os.path.join(output_dir, 'README'), 'w', encoding='utf-8') as w:
- content = r.read()
- w.write(content.format(change_id=change_id))
+ with open(
+ os.path.join(input_dir, "bundle.README"), "r", encoding="utf-8"
+ ) as r:
+ with open(
+ os.path.join(output_dir, "README"), "w", encoding="utf-8"
+ ) as w:
+ content = r.read()
+ w.write(content.format(change_id=change_id))
def write_version(output_dir, change_id):
- with open(os.path.join(output_dir, 'VERSION'), 'w', encoding='utf-8') as w:
- w.write(change_id)
+ with open(os.path.join(output_dir, "VERSION"), "w", encoding="utf-8") as w:
+ w.write(change_id)
def main():
- args = parse_args()
- input_dir = os.path.dirname(__file__)
- change_id = read_change_id(input_dir)
- if not args.create:
- assert os.path.exists(
- args.output_dir
- ), f'Specified output directory ({args.output_dir}) does not exist'
- shutil.rmtree(args.output_dir, ignore_errors=True)
- os.makedirs(args.output_dir)
- copy_files(input_dir, args.output_dir)
- write_readme(input_dir, args.output_dir, change_id)
- write_version(args.output_dir, change_id)
+ args = parse_args()
+ input_dir = os.path.dirname(__file__)
+ change_id = read_change_id(input_dir)
+ if not args.create:
+ assert os.path.exists(
+ args.output_dir
+ ), f"Specified output directory ({args.output_dir}) does not exist"
+ shutil.rmtree(args.output_dir, ignore_errors=True)
+ os.makedirs(args.output_dir)
+ copy_files(input_dir, args.output_dir)
+ write_readme(input_dir, args.output_dir, change_id)
+ write_version(args.output_dir, change_id)
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/compiler_wrapper/ccache_flag.go b/compiler_wrapper/ccache_flag.go
index 02fb43a..0371f10 100644
--- a/compiler_wrapper/ccache_flag.go
+++ b/compiler_wrapper/ccache_flag.go
@@ -1,9 +1,14 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
+func isInConfigureStage(env env) bool {
+ val, present := env.getenv("EBUILD_PHASE")
+ return present && val == "configure"
+}
+
func processCCacheFlag(builder *commandBuilder) {
// We should be able to share the objects across compilers as
// the pre-processed output will differ. This allows boards
@@ -22,7 +27,7 @@
// Disable ccache during portage's src_configure phase. Using ccache here is generally a
// waste of time, since these files are very small. Experimentally, this speeds up
// configuring by ~13%.
- if val, present := builder.env.getenv("EBUILD_PHASE"); present && val == "configure" {
+ if isInConfigureStage(builder.env) {
useCCache = false
}
diff --git a/compiler_wrapper/ccache_flag_test.go b/compiler_wrapper/ccache_flag_test.go
index d6eeb92..330d1a1 100644
--- a/compiler_wrapper/ccache_flag_test.go
+++ b/compiler_wrapper/ccache_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/clang_flags.go b/compiler_wrapper/clang_flags.go
index e25ed74..1c45935 100644
--- a/compiler_wrapper/clang_flags.go
+++ b/compiler_wrapper/clang_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/clang_flags_test.go b/compiler_wrapper/clang_flags_test.go
index 23aed7e..08e8a8d 100644
--- a/compiler_wrapper/clang_flags_test.go
+++ b/compiler_wrapper/clang_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/clang_syntax_flag.go b/compiler_wrapper/clang_syntax_flag.go
index 53240c7..4d5bd4d 100644
--- a/compiler_wrapper/clang_syntax_flag.go
+++ b/compiler_wrapper/clang_syntax_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/clang_syntax_flag_test.go b/compiler_wrapper/clang_syntax_flag_test.go
index 8ee9c22..728168c 100644
--- a/compiler_wrapper/clang_syntax_flag_test.go
+++ b/compiler_wrapper/clang_syntax_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/clang_tidy_flag.go b/compiler_wrapper/clang_tidy_flag.go
index 01387fd..b19976d 100644
--- a/compiler_wrapper/clang_tidy_flag.go
+++ b/compiler_wrapper/clang_tidy_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -114,9 +114,7 @@
fixesFilePath := f.Name() + ".yaml"
fixesMetadataPath := f.Name() + ".json"
- // FIXME(gbiv): Remove `-checks=*` when testing is complete; we should defer to .clang-tidy
- // files, which are both more expressive and more approachable than `-checks=*`.
- extraTidyFlags = append(extraTidyFlags, "-checks=*", "--export-fixes="+fixesFilePath)
+ extraTidyFlags = append(extraTidyFlags, "--export-fixes="+fixesFilePath, "--header-filter=.*")
clangTidyCmd, err := calcClangTidyInvocation(env, clangCmd, cSrcFile, extraTidyFlags...)
if err != nil {
return fmt.Errorf("calculating tidy invocation: %v", err)
diff --git a/compiler_wrapper/clang_tidy_flag_test.go b/compiler_wrapper/clang_tidy_flag_test.go
index 4293bb2..73dec25 100644
--- a/compiler_wrapper/clang_tidy_flag_test.go
+++ b/compiler_wrapper/clang_tidy_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/command.go b/compiler_wrapper/command.go
index eb040b2..e2a5176 100644
--- a/compiler_wrapper/command.go
+++ b/compiler_wrapper/command.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -76,7 +76,7 @@
cmdCtx.Stderr = env.stderr()
if err := cmdCtx.Start(); err != nil {
- return newErrorwithSourceLocf("exec error: %v", err)
+ return fmt.Errorf("exec error: %w", err)
}
err := cmdCtx.Wait()
if ctx.Err() == nil {
@@ -265,6 +265,21 @@
builder.args = newArgs
}
+// Allows to filter arg pairs, useful for eg when having adjacent unsupported args
+// like "-Wl,-z -Wl,defs"
+func (builder *commandBuilder) filterArgPairs(keepPair func(arg1, arg2 builderArg) bool) {
+ newArgs := builder.args[:0]
+ for i := 0; i < len(builder.args); i++ {
+ if i == len(builder.args)-1 || keepPair(builder.args[i], builder.args[i+1]) {
+ newArgs = append(newArgs, builder.args[i])
+ } else {
+ // skip builder.args[i]) as well as next item
+ i++
+ }
+ }
+ builder.args = newArgs
+}
+
func (builder *commandBuilder) updateEnv(updates ...string) {
builder.envUpdates = append(builder.envUpdates, updates...)
}
diff --git a/compiler_wrapper/command_test.go b/compiler_wrapper/command_test.go
index 18d05a9..031872c 100644
--- a/compiler_wrapper/command_test.go
+++ b/compiler_wrapper/command_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/compile_with_fallback.go b/compiler_wrapper/compile_with_fallback.go
index 8b4b5b4..d0b6a16 100644
--- a/compiler_wrapper/compile_with_fallback.go
+++ b/compiler_wrapper/compile_with_fallback.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/compile_with_fallback_test.go b/compiler_wrapper/compile_with_fallback_test.go
index f9da441..67530a2 100644
--- a/compiler_wrapper/compile_with_fallback_test.go
+++ b/compiler_wrapper/compile_with_fallback_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/compiler_wrapper.go b/compiler_wrapper/compiler_wrapper.go
index 986eaba..dcaada9 100644
--- a/compiler_wrapper/compiler_wrapper.go
+++ b/compiler_wrapper/compiler_wrapper.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -151,6 +151,7 @@
}
} else {
cSrcFile, tidyFlags, tidyMode := processClangTidyFlags(mainBuilder)
+ cSrcFile, iwyuFlags, iwyuMode := processIWYUFlags(mainBuilder)
if mainBuilder.target.compilerType == clangType {
err := prepareClangCommand(mainBuilder)
if err != nil {
@@ -176,6 +177,20 @@
return 0, err
}
}
+
+ if iwyuMode != iwyuModeNone {
+ if iwyuMode == iwyuModeError {
+ panic(fmt.Sprintf("Unknown IWYU mode"))
+ }
+
+ allowCCache = false
+ clangCmdWithoutRemoteBuildAndCCache := mainBuilder.build()
+ err := runIWYU(env, clangCmdWithoutRemoteBuildAndCCache, cSrcFile, iwyuFlags)
+ if err != nil {
+ return 0, err
+ }
+ }
+
if remoteBuildUsed, err = processRemoteBuildAndCCacheFlags(allowCCache, mainBuilder); err != nil {
return 0, err
}
@@ -201,6 +216,12 @@
}
}
+ // If builds matching some heuristic should crash, crash them. Since this is purely a
+ // debugging tool, don't offer any nice features with it (e.g., rusage, ...).
+ if shouldUseCrashBuildsHeuristic && mainBuilder.target.compilerType == clangType {
+ return buildWithAutocrash(env, cfg, compilerCmd)
+ }
+
bisectStage := getBisectStage(env)
if rusageEnabled {
@@ -354,7 +375,6 @@
builder.addPreUserArgs(builder.cfg.commonFlags...)
if !builder.cfg.isHostWrapper {
processLibGCCFlags(builder)
- processPieFlags(builder)
processThumbCodeFlags(builder)
processStackProtectorFlags(builder)
processX86Flags(builder)
diff --git a/compiler_wrapper/compiler_wrapper_test.go b/compiler_wrapper/compiler_wrapper_test.go
index 74fe3f5..a560c9c 100644
--- a/compiler_wrapper/compiler_wrapper_test.go
+++ b/compiler_wrapper/compiler_wrapper_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/config.go b/compiler_wrapper/config.go
index 6c28287..6c3fcf5 100644
--- a/compiler_wrapper/config.go
+++ b/compiler_wrapper/config.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -34,7 +34,7 @@
triciumNitsDir string
// Directory to store crash artifacts in.
crashArtifactsDir string
- // Version. Only used for printing via -print-cmd.
+ // Version. Only exposed via -print-config.
version string
}
@@ -83,13 +83,13 @@
cfg := config{}
switch configName {
case "cros.hardened":
- cfg = *crosHardenedConfig
+ cfg = crosHardenedConfig
case "cros.nonhardened":
- cfg = *crosNonHardenedConfig
+ cfg = crosNonHardenedConfig
case "cros.host":
- cfg = *crosHostConfig
+ cfg = crosHostConfig
case "android":
- cfg = *androidConfig
+ cfg = androidConfig
default:
return nil, newErrorwithSourceLocf("unknown config name: %s", configName)
}
@@ -103,9 +103,42 @@
return &cfg, nil
}
+func crosCommonClangFlags() []string {
+ // Temporarily disable tautological-*-compare chromium:778316.
+ // Temporarily add no-unknown-warning-option to deal with old clang versions.
+ // Temporarily disable Wdeprecated-declarations. b/193860318
+ // b/230345382: Temporarily disable Wimplicit-function-declaration.
+ // b/231987783: Temporarily disable Wimplicit-int.
+ return []string{
+ "-Qunused-arguments",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ }
+}
+
+func crosCommonClangPostFlags() []string {
+ // Temporarily disable Wdeprecated-copy. b/191479033
+ return []string{
+ "-Wno-compound-token-split-by-space",
+ "-Wno-deprecated-copy",
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
+ }
+}
+
// Full hardening.
// Temporarily disable function splitting because of chromium:434751.
-var crosHardenedConfig = &config{
+var crosHardenedConfig = config{
clangRootRelPath: "../..",
gccRootRelPath: "../../../../..",
// Pass "-fcommon" till the packages are fixed to work with new clang/gcc
@@ -113,8 +146,6 @@
commonFlags: []string{
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
},
@@ -123,46 +154,27 @@
"-Wno-unused-local-typedefs",
"-Wno-maybe-uninitialized",
},
- // Temporarily disable tautological-*-compare chromium:778316.
- // Temporarily add no-unknown-warning-option to deal with old clang versions.
// Temporarily disable Wsection since kernel gets a bunch of these. chromium:778867
// Disable "-faddrsig" since it produces object files that strip doesn't understand, chromium:915742.
// crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache;
// removed that flag for now.
- // Temporarily disable Wdeprecated-declarations. b/193860318
-
- clangFlags: []string{
- "-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-unknown-warning-option",
+ clangFlags: append(
+ crosCommonClangFlags(),
+ "--unwindlib=libunwind",
"-Wno-section",
+ "-fno-addrsig",
"-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
- "-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
- },
-
- // Temporarily disable Wdeprecated-copy. b/191479033
- clangPostFlags: []string{
- "-Wno-implicit-int-float-conversion",
- "-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
- "-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable",
- },
+ "-ftrivial-auto-var-init=zero",
+ "-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang",
+ ),
+ clangPostFlags: crosCommonClangPostFlags(),
newWarningsDir: "/tmp/fatal_clang_warnings",
triciumNitsDir: "/tmp/linting_output/clang-tidy",
crashArtifactsDir: "/tmp/clang_crash_diagnostics",
}
// Flags to be added to non-hardened toolchain.
-var crosNonHardenedConfig = &config{
+var crosNonHardenedConfig = config{
clangRootRelPath: "../..",
gccRootRelPath: "../../../../..",
commonFlags: []string{},
@@ -172,39 +184,19 @@
"-Wno-deprecated-declarations",
"-Wtrampolines",
},
- // Temporarily disable tautological-*-compare chromium:778316.
- // Temporarily add no-unknown-warning-option to deal with old clang versions.
// Temporarily disable Wsection since kernel gets a bunch of these. chromium:778867
- // Temporarily disable Wdeprecated-declarations. b/193860318
- clangFlags: []string{
- "-Qunused-arguments",
- "-fdebug-default-version=5",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-unknown-warning-option",
+ clangFlags: append(
+ crosCommonClangFlags(),
"-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
- "-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
- },
-
- // Temporarily disable Wdeprecated-copy. b/191479033
- clangPostFlags: []string{
- "-Wno-implicit-int-float-conversion",
- "-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
- "-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable",
- },
+ ),
+ clangPostFlags: crosCommonClangPostFlags(),
newWarningsDir: "/tmp/fatal_clang_warnings",
triciumNitsDir: "/tmp/linting_output/clang-tidy",
crashArtifactsDir: "/tmp/clang_crash_diagnostics",
}
// Flags to be added to host toolchain.
-var crosHostConfig = &config{
+var crosHostConfig = config{
isHostWrapper: true,
clangRootRelPath: "../..",
gccRootRelPath: "../..",
@@ -218,41 +210,22 @@
"-Wno-unused-local-typedefs",
"-Wno-deprecated-declarations",
},
- // Temporarily disable tautological-*-compare chromium:778316.
- // Temporarily add no-unknown-warning-option to deal with old clang versions.
// crbug.com/1103065: -grecord-gcc-switches pollutes the Goma cache;
// removed that flag for now.
- // Temporarily disable Wdeprecated-declarations. b/193860318
- clangFlags: []string{
- "-Qunused-arguments",
+ clangFlags: append(
+ crosCommonClangFlags(),
+ "-Wno-unused-local-typedefs",
"-fno-addrsig",
"-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
- },
-
+ ),
// Temporarily disable Wdeprecated-copy. b/191479033
- clangPostFlags: []string{
- "-Wno-implicit-int-float-conversion",
- "-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
- "-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable",
- },
+ clangPostFlags: crosCommonClangPostFlags(),
newWarningsDir: "/tmp/fatal_clang_warnings",
triciumNitsDir: "/tmp/linting_output/clang-tidy",
crashArtifactsDir: "/tmp/clang_crash_diagnostics",
}
-var androidConfig = &config{
+var androidConfig = config{
isHostWrapper: false,
isAndroidWrapper: true,
gccRootRelPath: "./",
diff --git a/compiler_wrapper/config_test.go b/compiler_wrapper/config_test.go
index 86a7892..0e6b44c 100644
--- a/compiler_wrapper/config_test.go
+++ b/compiler_wrapper/config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -119,7 +119,7 @@
func isSysrootHardened(cfg *config) bool {
for _, arg := range cfg.commonFlags {
- if arg == "-pie" {
+ if arg == "-D_FORTIFY_SOURCE=2" {
return true
}
}
diff --git a/compiler_wrapper/crash_builds.go b/compiler_wrapper/crash_builds.go
new file mode 100644
index 0000000..76a5412
--- /dev/null
+++ b/compiler_wrapper/crash_builds.go
@@ -0,0 +1,154 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+)
+
+// ** HEY YOU, PERSON READING THIS! **
+//
+// Are you a dev who wants to make this work locally? Awesome! Please note that this **only** works
+// for Clang. If that's OK, here's a checklist for you:
+// [ ] Set `shouldUseCrashBuildsHeuristic = true` below.
+// [ ] If you want this heuristic to operate during `src_configure` (rare), also set
+// `allowAutoCrashInConfigure` to true.
+// [ ] Modify `shouldAutocrashPostExec` to return `true` when the compiler's output/flags match what
+// you want to crash on, and `false` otherwise.
+// [ ] Run `./install_compiler_wrapper.sh` to install the updated wrapper.
+// [ ] Run whatever command reproduces the error.
+//
+// If you need to make changes to your heuristic, repeat the above steps starting at
+// `./install_compiler_wrapper.sh` until things seem to do what you want.
+const (
+ // Set this to true to use autocrashing logic.
+ shouldUseCrashBuildsHeuristic = false
+ // Set this to true to allow `shouldAutocrashPostExec` to check+crash configure steps.
+ allowAutoCrashInConfigure = false
+)
+
+// shouldAutocrashPostExec returns true if we should automatically crash the compiler. This is
+// called after the compiler is run. If it returns true, we'll re-execute the compiler with the bit
+// of extra code necessary to crash it.
+func shouldAutocrashPostExec(env env, cfg *config, originalCmd *command, runInfo compilerExecInfo) bool {
+ // ** TODO, DEAR READER: ** Fill this in. Below are a few `if false {` blocks that should
+ // work for common use-cases. You're encouraged to change them to `if true {` if they suit
+ // your needs.
+
+ // Return true if `error: some error message` is contained in the run's stderr.
+ if false {
+ return bytes.Contains(runInfo.stderr, []byte("error: some error message"))
+ }
+
+ // Return true if `foo.c:${line_number}: error: some error message` appears in the run's
+ // stderr. Otherwise, return false.
+ if false {
+ r := regexp.MustCompile(`foo\.c:\d+: error: some error message`)
+ return r.Match(runInfo.stderr)
+ }
+
+ // Return true if there's a `-fjust-give-up` flag in the compiler's invocation.
+ if false {
+ for _, flag := range originalCmd.Args {
+ if flag == "-fjust-give-up" {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ panic("Please fill in `shouldAutocrashPostExec` with meaningful logic.")
+}
+
+type compilerExecInfo struct {
+ exitCode int
+ stdout, stderr []byte
+}
+
+// ** Below here are implementation details. If all you want is autocrashing behavior, you don't
+// need to keep reading. **
+const (
+ autocrashProgramLine = "\n#pragma clang __debug parser_crash"
+)
+
+type buildWithAutocrashPredicates struct {
+ allowInConfigure bool
+ shouldAutocrash func(env, *config, *command, compilerExecInfo) bool
+}
+
+func buildWithAutocrash(env env, cfg *config, originalCmd *command) (exitCode int, err error) {
+ return buildWithAutocrashImpl(env, cfg, originalCmd, buildWithAutocrashPredicates{
+ allowInConfigure: allowAutoCrashInConfigure,
+ shouldAutocrash: shouldAutocrashPostExec,
+ })
+}
+
+func buildWithAutocrashImpl(env env, cfg *config, originalCmd *command, preds buildWithAutocrashPredicates) (exitCode int, err error) {
+ stdinBuffer := (*bytes.Buffer)(nil)
+ subprocStdin := io.Reader(nil)
+ invocationUsesStdinAsAFile := needStdinTee(originalCmd)
+ if invocationUsesStdinAsAFile {
+ stdinBuffer = &bytes.Buffer{}
+ if _, err := stdinBuffer.ReadFrom(env.stdin()); err != nil {
+ return 0, wrapErrorwithSourceLocf(err, "prebuffering stdin")
+ }
+ subprocStdin = stdinBuffer
+ } else {
+ subprocStdin = env.stdin()
+ }
+
+ stdoutBuffer := &bytes.Buffer{}
+ stderrBuffer := &bytes.Buffer{}
+ exitCode, err = wrapSubprocessErrorWithSourceLoc(originalCmd,
+ env.run(originalCmd, subprocStdin, stdoutBuffer, stderrBuffer))
+ if err != nil {
+ return 0, err
+ }
+
+ autocrashAllowed := preds.allowInConfigure || !isInConfigureStage(env)
+ crash := autocrashAllowed && preds.shouldAutocrash(env, cfg, originalCmd, compilerExecInfo{
+ exitCode: exitCode,
+ stdout: stdoutBuffer.Bytes(),
+ stderr: stderrBuffer.Bytes(),
+ })
+ if !crash {
+ stdoutBuffer.WriteTo(env.stdout())
+ stderrBuffer.WriteTo(env.stderr())
+ return exitCode, nil
+ }
+
+ fmt.Fprintln(env.stderr(), "** Autocrash requested; crashing the compiler...**")
+
+ // `stdinBuffer == nil` implies that `-` wasn't used as a flag. If `-` isn't used as a
+ // flag, clang will ignore stdin. We want to write our #pragma to stdin, since we can't
+ // reasonably modify the files we're currently compiling.
+ if stdinBuffer == nil {
+ newArgs := []string{}
+ // Clang can't handle `-o ${target}` when handed multiple input files. Since
+ // we expect to crash before emitting anything, remove `-o ${file}` entirely.
+ for i, e := 0, len(originalCmd.Args); i < e; i++ {
+ a := originalCmd.Args[i]
+ if a == "-o" {
+ // Skip the -o here, then skip the following arg in the loop header.
+ i++
+ } else {
+ newArgs = append(newArgs, a)
+ }
+ }
+ // And now add args that instruct clang to read from stdin. In this case, we also
+ // need to tell Clang what language the file is written in; C is as good as anything
+ // for this.
+ originalCmd.Args = append(newArgs, "-x", "c", "-")
+ stdinBuffer = &bytes.Buffer{}
+ }
+
+ stdinBuffer.WriteString(autocrashProgramLine)
+ return wrapSubprocessErrorWithSourceLoc(originalCmd,
+ env.run(originalCmd, stdinBuffer, env.stdout(), env.stderr()))
+}
diff --git a/compiler_wrapper/crash_builds_test.go b/compiler_wrapper/crash_builds_test.go
new file mode 100644
index 0000000..3d33d93
--- /dev/null
+++ b/compiler_wrapper/crash_builds_test.go
@@ -0,0 +1,260 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestBuildWithAutoCrashDoesNothingIfCrashIsNotRequested(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ neverAutoCrash := buildWithAutocrashPredicates{
+ allowInConfigure: true,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return false
+ },
+ }
+
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), neverAutoCrash)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+ if ctx.cmdCount != 1 {
+ t.Errorf("expected 1 call. Got: %d", ctx.cmdCount)
+ }
+ })
+}
+
+func TestBuildWithAutoCrashSkipsAutocrashLogicIfInConfigureAndConfigureChecksDisabled(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ alwaysAutocrash := buildWithAutocrashPredicates{
+ allowInConfigure: false,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return true
+ },
+ }
+
+ ctx.env = append(ctx.env, "EBUILD_PHASE=configure")
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), alwaysAutocrash)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+ if ctx.cmdCount != 1 {
+ t.Errorf("expected 1 call. Got: %d", ctx.cmdCount)
+ }
+ })
+}
+
+func TestBuildWithAutoCrashRerunsIfPredicateRequestsCrash(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ autocrashPostCmd := buildWithAutocrashPredicates{
+ allowInConfigure: true,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return true
+ },
+ }
+
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ hasDash := false
+ for _, arg := range cmd.Args {
+ if arg == "-" {
+ hasDash = true
+ break
+ }
+ }
+
+ switch ctx.cmdCount {
+ case 1:
+ if hasDash {
+ t.Error("Got `-` on command 1; didn't want that.")
+ }
+ return nil
+ case 2:
+ if !hasDash {
+ t.Error("Didn't get `-` on command 2; wanted that.")
+ } else {
+ input := stdin.(*bytes.Buffer)
+ if s := input.String(); !strings.Contains(s, autocrashProgramLine) {
+ t.Errorf("Input was %q; expected %q to be in it", s, autocrashProgramLine)
+ }
+ }
+ return nil
+ default:
+ t.Fatalf("Unexpected command count: %d", ctx.cmdCount)
+ panic("Unreachable")
+ }
+ }
+
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), autocrashPostCmd)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+
+ if ctx.cmdCount != 2 {
+ t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount)
+ }
+ })
+}
+
+func TestBuildWithAutoCrashAddsDashAndWritesToStdinIfInputFileIsNotStdin(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ autocrashPostCmd := buildWithAutocrashPredicates{
+ allowInConfigure: true,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return true
+ },
+ }
+
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ numDashes := 0
+ for _, arg := range cmd.Args {
+ if arg == "-" {
+ numDashes++
+ }
+ }
+
+ switch ctx.cmdCount {
+ case 1:
+ if numDashes != 0 {
+ t.Errorf("Got %d dashes on command 1; want 0", numDashes)
+ }
+ return nil
+ case 2:
+ if numDashes != 1 {
+ t.Errorf("Got %d dashes on command 2; want 1", numDashes)
+ }
+
+ input := stdin.(*bytes.Buffer).String()
+ stdinHasAutocrashLine := strings.Contains(input, autocrashProgramLine)
+ if !stdinHasAutocrashLine {
+ t.Error("Got no autocrash line on the second command; wanted that")
+ }
+ return nil
+ default:
+ t.Fatalf("Unexpected command count: %d", ctx.cmdCount)
+ panic("Unreachable")
+ }
+ }
+
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc), autocrashPostCmd)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+
+ if ctx.cmdCount != 2 {
+ t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount)
+ }
+ })
+}
+
+func TestBuildWithAutoCrashAppendsToStdinIfStdinIsTheOnlyInputFile(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ autocrashPostCmd := buildWithAutocrashPredicates{
+ allowInConfigure: true,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return true
+ },
+ }
+
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ numDashes := 0
+ for _, arg := range cmd.Args {
+ if arg == "-" {
+ numDashes++
+ }
+ }
+
+ if numDashes != 1 {
+ t.Errorf("Got %d dashes on command %d (args: %#v); want 1", numDashes, ctx.cmdCount, cmd.Args)
+ }
+
+ input := stdin.(*bytes.Buffer).String()
+ stdinHasAutocrashLine := strings.Contains(input, autocrashProgramLine)
+
+ switch ctx.cmdCount {
+ case 1:
+ if stdinHasAutocrashLine {
+ t.Error("Got autocrash line on the first command; did not want that")
+ }
+ return nil
+ case 2:
+ if !stdinHasAutocrashLine {
+ t.Error("Got no autocrash line on the second command; wanted that")
+ }
+ return nil
+ default:
+ t.Fatalf("Unexpected command count: %d", ctx.cmdCount)
+ panic("Unreachable")
+ }
+ }
+
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, "-x", "c", "-"), autocrashPostCmd)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+
+ if ctx.cmdCount != 2 {
+ t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount)
+ }
+ })
+}
+
+func TestCrashBuildFiltersObjectFileOptionOnCrashes(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ autocrashPostCmd := buildWithAutocrashPredicates{
+ allowInConfigure: true,
+ shouldAutocrash: func(env, *config, *command, compilerExecInfo) bool {
+ return true
+ },
+ }
+
+ const outputFileName = "/path/to/foo.o"
+
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ cmdOutputArg := (*string)(nil)
+ for i, e := range cmd.Args {
+ if e == "-o" {
+ // Assume something follows. If not, we'll crash and the
+ // test will fail.
+ cmdOutputArg = &cmd.Args[i+1]
+ }
+ }
+
+ switch ctx.cmdCount {
+ case 1:
+ if cmdOutputArg == nil || *cmdOutputArg != outputFileName {
+ t.Errorf("Got command args %q; want `-o %q` in them", cmd.Args, outputFileName)
+ }
+ return nil
+ case 2:
+ if cmdOutputArg != nil {
+ t.Errorf("Got command args %q; want no mention of `-o %q` in them", cmd.Args, outputFileName)
+ }
+ return nil
+ default:
+ t.Fatalf("Unexpected command count: %d", ctx.cmdCount)
+ panic("Unreachable")
+ }
+ }
+
+ exitCode, err := buildWithAutocrashImpl(ctx, ctx.cfg, ctx.newCommand(clangX86_64, "-o", outputFileName, mainCc), autocrashPostCmd)
+ if err != nil {
+ t.Fatalf("unexpectedly failed with %v", err)
+ }
+ ctx.must(exitCode)
+
+ if ctx.cmdCount != 2 {
+ t.Errorf("expected 2 calls. Got: %d", ctx.cmdCount)
+ }
+ })
+}
diff --git a/compiler_wrapper/cros_hardened_config_test.go b/compiler_wrapper/cros_hardened_config_test.go
index 337b27f..80a261c 100644
--- a/compiler_wrapper/cros_hardened_config_test.go
+++ b/compiler_wrapper/cros_hardened_config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/cros_host_config_test.go b/compiler_wrapper/cros_host_config_test.go
index 4f3b5cb..4eb9027 100644
--- a/compiler_wrapper/cros_host_config_test.go
+++ b/compiler_wrapper/cros_host_config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/cros_llvm_next_flags.go b/compiler_wrapper/cros_llvm_next_flags.go
index 870e288..4b21ad3 100644
--- a/compiler_wrapper/cros_llvm_next_flags.go
+++ b/compiler_wrapper/cros_llvm_next_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,7 +7,7 @@
package main
-// This file defines extra flags for llvm-next testing for Chrome OS. Importantly, these flags don't
+// This file defines extra flags for llvm-next testing for ChromeOS. Importantly, these flags don't
// apply to Android's llvm-next wrapper. Android's toolchain-utils copy has a
// `android_llvm_next_flags.go` file downstream that defines its llvm-next arguments. As you can
// probably infer, `android_llvm_next_flags.go` is only compiled if the `android_llvm_next_flags`
diff --git a/compiler_wrapper/cros_nonhardened_config_test.go b/compiler_wrapper/cros_nonhardened_config_test.go
index 3d413fb..9428254 100644
--- a/compiler_wrapper/cros_nonhardened_config_test.go
+++ b/compiler_wrapper/cros_nonhardened_config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/disable_werror_flag.go b/compiler_wrapper/disable_werror_flag.go
index cb770b7..1707767 100644
--- a/compiler_wrapper/disable_werror_flag.go
+++ b/compiler_wrapper/disable_werror_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/disable_werror_flag_test.go b/compiler_wrapper/disable_werror_flag_test.go
index 592c35b..d3be921 100644
--- a/compiler_wrapper/disable_werror_flag_test.go
+++ b/compiler_wrapper/disable_werror_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/env.go b/compiler_wrapper/env.go
index c8f6ceb..6b25d96 100644
--- a/compiler_wrapper/env.go
+++ b/compiler_wrapper/env.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/env_test.go b/compiler_wrapper/env_test.go
index b5bf65a..6b00a8b 100644
--- a/compiler_wrapper/env_test.go
+++ b/compiler_wrapper/env_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/errors.go b/compiler_wrapper/errors.go
index 18e0fac..30a9ffd 100644
--- a/compiler_wrapper/errors.go
+++ b/compiler_wrapper/errors.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/errors_test.go b/compiler_wrapper/errors_test.go
index 957fae3..096ae37 100644
--- a/compiler_wrapper/errors_test.go
+++ b/compiler_wrapper/errors_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/gcc_flags.go b/compiler_wrapper/gcc_flags.go
index 2c553e6..01f7a3c 100644
--- a/compiler_wrapper/gcc_flags.go
+++ b/compiler_wrapper/gcc_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/gcc_flags_test.go b/compiler_wrapper/gcc_flags_test.go
index adf7201..45ad866 100644
--- a/compiler_wrapper/gcc_flags_test.go
+++ b/compiler_wrapper/gcc_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/go_exec.go b/compiler_wrapper/go_exec.go
index 2f2e5ad..8833505 100644
--- a/compiler_wrapper/go_exec.go
+++ b/compiler_wrapper/go_exec.go
@@ -1,7 +1,8 @@
-// Copyright 2020 The Chromium OS Authors. All rights reserved.
+// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//go:build !libc_exec
// +build !libc_exec
package main
diff --git a/compiler_wrapper/goldenutil_test.go b/compiler_wrapper/goldenutil_test.go
index 2b391d7..16e2b7e 100644
--- a/compiler_wrapper/goldenutil_test.go
+++ b/compiler_wrapper/goldenutil_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/install_compiler_wrapper.sh b/compiler_wrapper/install_compiler_wrapper.sh
index 3a5b741..8145908 100755
--- a/compiler_wrapper/install_compiler_wrapper.sh
+++ b/compiler_wrapper/install_compiler_wrapper.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,18 +11,41 @@
exit 1
fi
set -e
+
+# Use a unique value here, since folks doing wrapper dev _likely_ want builds
+# to always be redone.
+version_suffix="manually_installed_wrapper_at_unix_$(date +%s.%6N)"
+echo "Using toolchain hash: ${version_suffix}"
cd "$(dirname "$(readlink -m "$0")")"
+
+build_py() {
+ ./build.py --version_suffix="${version_suffix}" "$@"
+}
+
echo "Updated files:"
# Update the host wrapper
-./build.py --config=cros.host --use_ccache=false --use_llvm_next=false --output_file=./clang_host_wrapper
+build_py \
+ --config=cros.host \
+ --use_ccache=false \
+ --use_llvm_next=false \
+ --output_file=./clang_host_wrapper
sudo mv ./clang_host_wrapper /usr/bin/clang_host_wrapper
echo "/usr/bin/clang_host_wrapper"
sudo cp ../binary_search_tool/bisect_driver.py /usr/bin
echo "/usr/bin/clang_host_wrapper/bisect_driver.py"
# Update the target wrappers
-./build.py --config=cros.hardened --use_ccache=false --use_llvm_next=false --output_file=./sysroot_wrapper.hardened.noccache
-./build.py --config=cros.hardened --use_ccache=true --use_llvm_next=false --output_file=./sysroot_wrapper.hardened.ccache
+build_py \
+ --config=cros.hardened \
+ --use_ccache=false \
+ --use_llvm_next=false \
+ --output_file=./sysroot_wrapper.hardened.noccache
+build_py \
+ --config=cros.hardened \
+ --use_ccache=true \
+ --use_llvm_next=false \
+ --output_file=./sysroot_wrapper.hardened.ccache
+
# Update clang target wrappers.
sudo cp ./sysroot_wrapper.hardened.noccache ./sysroot_wrapper.hardened.ccache /usr/bin
echo "Updated clang wrapper /usr/bin/sysroot_wrapper.hardened.noccache"
@@ -30,7 +53,7 @@
# Update GCC target wrappers.
for GCC in cross-x86_64-cros-linux-gnu/gcc cross-armv7a-cros-linux-gnueabihf/gcc cross-aarch64-cros-linux-gnu/gcc; do
- if ! FILES="$(equery f ${GCC})"; then
+ if ! FILES="$(equery f "${GCC}")"; then
if [[ $(equery l "${GCC}" 2>&1 | wc -c) -eq 0 ]]; then
echo "no ${GCC} package found; skipping" >&2
continue
diff --git a/compiler_wrapper/iwyu_flag.go b/compiler_wrapper/iwyu_flag.go
new file mode 100644
index 0000000..d13d114
--- /dev/null
+++ b/compiler_wrapper/iwyu_flag.go
@@ -0,0 +1,156 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+type useIWYUMode int
+
+const iwyuCrashSubstring = "PLEASE submit a bug report"
+
+const (
+ iwyuModeNone useIWYUMode = iota
+ iwyuModeAll
+ iwyuModeError
+)
+
+var srcFileSuffixes = []string{
+ ".c",
+ ".cc",
+ ".cpp",
+ ".C",
+ ".cxx",
+ ".c++",
+}
+
+func findWithIWYUFlag(args []builderArg) (string, []builderArg) {
+ for i := range args {
+ if args[i].value == "--with-iwyu" {
+ args = append(args[:i], args[i+1:]...)
+ return "1", args
+ }
+ }
+ return "", args
+}
+
+func processIWYUFlags(builder *commandBuilder) (cSrcFile string, iwyuFlags []string, mode useIWYUMode) {
+ builder.transformArgs(func(arg builderArg) string {
+ const prefix = "-iwyu-flag="
+ if !strings.HasPrefix(arg.value, prefix) {
+ return arg.value
+ }
+
+ iwyuFlags = append(iwyuFlags, arg.value[len(prefix):])
+ return ""
+ })
+
+ cSrcFile = ""
+ lastArg := ""
+ for _, arg := range builder.args {
+ if lastArg != "-o" {
+ for _, suffix := range srcFileSuffixes {
+ if strings.HasSuffix(arg.value, suffix) {
+ cSrcFile = arg.value
+ break
+ }
+ }
+ }
+ lastArg = arg.value
+ }
+
+ if cSrcFile == "" {
+ return "", iwyuFlags, iwyuModeNone
+ }
+
+ withIWYU, _ := builder.env.getenv("WITH_IWYU")
+ if withIWYU == "" {
+ withIWYU, builder.args = findWithIWYUFlag(builder.args)
+ if withIWYU == "" {
+ return cSrcFile, iwyuFlags, iwyuModeNone
+ }
+ }
+
+ if withIWYU != "1" {
+ return cSrcFile, iwyuFlags, iwyuModeError
+ }
+
+ return cSrcFile, iwyuFlags, iwyuModeAll
+}
+
+func calcIWYUInvocation(env env, clangCmd *command, cSrcFile string, iwyuFlags ...string) (*command, error) {
+ resourceDir, err := getClangResourceDir(env, clangCmd.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ iwyuPath := filepath.Join(filepath.Dir(clangCmd.Path), "include-what-you-use")
+ args := append([]string{}, iwyuFlags...)
+ args = append(args, "-resource-dir="+resourceDir)
+ args = append(args, clangCmd.Args...)
+
+ for i := 0; i < len(args); i++ {
+ for j := 0; j < len(srcFileSuffixes); j++ {
+ if strings.HasSuffix(args[i], srcFileSuffixes[j]) {
+ args = append(args[:i], args[i+1:]...)
+ break
+ }
+ }
+ }
+ args = append(args, cSrcFile)
+
+ return &command{
+ Path: iwyuPath,
+ Args: args,
+ EnvUpdates: clangCmd.EnvUpdates,
+ }, nil
+}
+
+func runIWYU(env env, clangCmd *command, cSrcFile string, extraIWYUFlags []string) error {
+ extraIWYUFlags = append(extraIWYUFlags, "-Xiwyu", "--mapping_file=/usr/share/include-what-you-use/libcxx.imp", "-Xiwyu", "--no_fwd_decls")
+ iwyuCmd, err := calcIWYUInvocation(env, clangCmd, cSrcFile, extraIWYUFlags...)
+ if err != nil {
+ return fmt.Errorf("calculating include-what-you-use invocation: %v", err)
+ }
+
+ // Note: We pass nil as stdin as we checked before that the compiler
+ // was invoked with a source file argument.
+ var stderr bytes.Buffer
+ stderrWriter := bufio.NewWriter(&stderr)
+ exitCode, err := wrapSubprocessErrorWithSourceLoc(iwyuCmd,
+ env.run(iwyuCmd, nil, nil, stderrWriter))
+ stderrMessage := stderr.String()
+ fmt.Fprintln(env.stderr(), stderrMessage)
+
+ if err == nil && exitCode != 0 {
+ // Note: We continue on purpose when include-what-you-use fails
+ // to maintain compatibility with the previous wrapper.
+ fmt.Fprintln(env.stderr(), "include-what-you-use failed")
+ }
+
+ var path strings.Builder
+ path.WriteString(strings.TrimSuffix(iwyuCmd.Path, "include-what-you-use"))
+ path.WriteString("fix_includes.py")
+ fixIncludesCmd := &command{
+ Path: path.String(),
+ Args: []string{"--nocomment"},
+ EnvUpdates: clangCmd.EnvUpdates,
+ }
+
+ exitCode, err = wrapSubprocessErrorWithSourceLoc(fixIncludesCmd,
+ env.run(fixIncludesCmd, strings.NewReader(stderrMessage), env.stdout(), env.stderr()))
+ if err == nil && exitCode != 0 {
+ // Note: We continue on purpose when include-what-you-use fails
+ // to maintain compatibility with the previous wrapper.
+ fmt.Fprint(env.stderr(), "include-what-you-use failed")
+ }
+
+ return err
+}
diff --git a/compiler_wrapper/iwyu_flag_test.go b/compiler_wrapper/iwyu_flag_test.go
new file mode 100644
index 0000000..7613594
--- /dev/null
+++ b/compiler_wrapper/iwyu_flag_test.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package main
+
+import (
+ "errors"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestIWYUArgOrder(t *testing.T) {
+ withIWYUTestContext(t, func(ctx *testContext) {
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ if ctx.cmdCount == 2 {
+ if err := verifyArgOrder(cmd, "-checks=.*", mainCc, "--", "-resource-dir=.*", mainCc, "--some_arg"); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(clangX86_64, mainCc, "--some_arg")))
+ if ctx.cmdCount < 2 {
+ t.Error("expected multiple calls.")
+ }
+ })
+}
+
+func TestIgnoreNonZeroExitCodeFromIWYU(t *testing.T) {
+ withIWYUTestContext(t, func(ctx *testContext) {
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ if ctx.cmdCount == 2 {
+ return newExitCodeError(23)
+ }
+ return nil
+ }
+ ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(clangX86_64, mainCc)))
+ stderr := ctx.stderrString()
+ if err := verifyNonInternalError(stderr, "include-what-you-use failed"); err != nil {
+ t.Error(err)
+ }
+ })
+}
+
+func TestReportGeneralErrorsFromIWYU(t *testing.T) {
+ withIWYUTestContext(t, func(ctx *testContext) {
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ if ctx.cmdCount > 1 {
+ return errors.New("someerror")
+ }
+ return nil
+ }
+ stderr := ctx.mustFail(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(clangX86_64, mainCc)))
+ if err := verifyInternalError(stderr); err != nil {
+ t.Fatal(err)
+ }
+ if !strings.Contains(stderr, "someerror") {
+ t.Errorf("unexpected error. Got: %s", stderr)
+ }
+ })
+}
+
+func TestUseIWYUBasedOnFileExtension(t *testing.T) {
+ withIWYUTestContext(t, func(ctx *testContext) {
+ testData := []struct {
+ args []string
+ iwyu bool
+ }{
+ {[]string{"main.cc"}, true},
+ {[]string{"main.cc"}, true},
+ {[]string{"main.C"}, true},
+ {[]string{"main.cxx"}, true},
+ {[]string{"main.c++"}, true},
+ {[]string{"main.xy"}, false},
+ {[]string{"-o", "main.cc"}, false},
+ {[]string{}, false},
+ }
+ for _, tt := range testData {
+ ctx.cmdCount = 0
+ ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(clangX86_64, tt.args...)))
+ if ctx.cmdCount > 1 && !tt.iwyu {
+ t.Errorf("expected a call to iwyu but got none for args %s", tt.args)
+ }
+ if ctx.cmdCount == 1 && tt.iwyu {
+ t.Errorf("expected no call to iwyu but got one for args %s", tt.args)
+ }
+ }
+ })
+}
+
+func TestIWYUFiltersIWYUFlags(t *testing.T) {
+ withIWYUTestContext(t, func(ctx *testContext) {
+ addedFlag := "--some_iwyu_flag=flag"
+ ctx.cmdMock = func(cmd *command, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+ switch ctx.cmdCount {
+ case 1:
+ if err := verifyPath(cmd, "usr/bin/clang"); err != nil {
+ t.Error(err)
+ } else if err := verifyArgCount(cmd, 0, addedFlag); err != nil {
+ t.Error(err)
+ }
+ return nil
+ case 2:
+ if err := verifyPath(cmd, "usr/bin/include-what-you-use"); err != nil {
+ t.Error(err)
+ } else if verifyArgCount(cmd, 1, addedFlag); err != nil {
+ t.Error(err)
+ }
+ return nil
+ default:
+ return nil
+ }
+ }
+ cmd := ctx.must(callCompiler(ctx, ctx.cfg, ctx.newCommand(clangX86_64, mainCc, "-iwyu-flag="+addedFlag)))
+ if ctx.cmdCount < 2 {
+ t.Errorf("expected multiple calls.")
+ }
+ if err := verifyPath(cmd, "usr/bin/clang"); err != nil {
+ t.Error(err)
+ }
+ })
+}
+
+func withIWYUTestContext(t *testing.T, work func(ctx *testContext)) {
+ withTestContext(t, func(ctx *testContext) {
+ ctx.env = []string{"WITH_IWYU=1"}
+ work(ctx)
+ })
+}
diff --git a/compiler_wrapper/kernel_bug.go b/compiler_wrapper/kernel_bug.go
index 55817cb..857dae0 100644
--- a/compiler_wrapper/kernel_bug.go
+++ b/compiler_wrapper/kernel_bug.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Chromium OS Authors. All rights reserved.
+// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
diff --git a/compiler_wrapper/kernel_bug_test.go b/compiler_wrapper/kernel_bug_test.go
index 3c7bccf..3b36384 100644
--- a/compiler_wrapper/kernel_bug_test.go
+++ b/compiler_wrapper/kernel_bug_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Chromium OS Authors. All rights reserved.
+// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
diff --git a/compiler_wrapper/libc_exec.go b/compiler_wrapper/libc_exec.go
index a7a561b..e4bcad8 100644
--- a/compiler_wrapper/libc_exec.go
+++ b/compiler_wrapper/libc_exec.go
@@ -1,7 +1,8 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//go:build libc_exec
// +build libc_exec
package main
diff --git a/compiler_wrapper/libgcc_flags.go b/compiler_wrapper/libgcc_flags.go
index 72fa838..a87223e 100644
--- a/compiler_wrapper/libgcc_flags.go
+++ b/compiler_wrapper/libgcc_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Chromium OS Authors. All rights reserved.
+// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/libgcc_flags_test.go b/compiler_wrapper/libgcc_flags_test.go
index 717c0e5..d30bf73 100644
--- a/compiler_wrapper/libgcc_flags_test.go
+++ b/compiler_wrapper/libgcc_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Chromium OS Authors. All rights reserved.
+// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/main.go b/compiler_wrapper/main.go
index 046cf5a..8cfa436 100644
--- a/compiler_wrapper/main.go
+++ b/compiler_wrapper/main.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/pie_flags.go b/compiler_wrapper/pie_flags.go
deleted file mode 100644
index 9675f6e..0000000
--- a/compiler_wrapper/pie_flags.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package main
-
-func processPieFlags(builder *commandBuilder) {
- fpieMap := map[string]bool{"-D__KERNEL__": true, "-fPIC": true, "-fPIE": true, "-fno-PIC": true, "-fno-PIE": true,
- "-fno-pic": true, "-fno-pie": true, "-fpic": true, "-fpie": true, "-nopie": true,
- "-nostartfiles": true, "-nostdlib": true, "-pie": true, "-static": true}
-
- pieMap := map[string]bool{"-D__KERNEL__": true, "-A": true, "-fno-PIC": true, "-fno-PIE": true, "-fno-pic": true, "-fno-pie": true,
- "-nopie": true, "-nostartfiles": true, "-nostdlib": true, "-pie": true, "-r": true, "--shared": true,
- "-shared": true, "-static": true}
-
- pie := false
- fpie := false
- if builder.target.abi != "eabi" {
- for _, arg := range builder.args {
- if arg.fromUser {
- if fpieMap[arg.value] {
- fpie = true
- }
- if pieMap[arg.value] {
- pie = true
- }
- }
- }
- }
- builder.transformArgs(func(arg builderArg) string {
- // Remove -nopie as it is a non-standard flag.
- if arg.value == "-nopie" {
- return ""
- }
- if fpie && !arg.fromUser && arg.value == "-fPIE" {
- return ""
- }
- if pie && !arg.fromUser && arg.value == "-pie" {
- return ""
- }
- return arg.value
- })
-}
diff --git a/compiler_wrapper/pie_flags_test.go b/compiler_wrapper/pie_flags_test.go
deleted file mode 100644
index 77a0fc8..0000000
--- a/compiler_wrapper/pie_flags_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package main
-
-import (
- "testing"
-)
-
-func TestAddPieFlags(t *testing.T) {
- withTestContext(t, func(ctx *testContext) {
- initPieConfig(ctx.cfg)
- cmd := ctx.must(callCompiler(ctx, ctx.cfg,
- ctx.newCommand(gccX86_64, mainCc)))
- if err := verifyArgOrder(cmd, "-pie", mainCc); err != nil {
- t.Error(err)
- }
- if err := verifyArgOrder(cmd, "-fPIE", mainCc); err != nil {
- t.Error(err)
- }
- })
-}
-
-func TestOmitPieFlagsWhenNoPieArgGiven(t *testing.T) {
- withTestContext(t, func(ctx *testContext) {
- initPieConfig(ctx.cfg)
- cmd := ctx.must(callCompiler(ctx, ctx.cfg,
- ctx.newCommand(gccX86_64, "-nopie", mainCc)))
- if err := verifyArgCount(cmd, 0, "-nopie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 0, "-pie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil {
- t.Error(err)
- }
-
- cmd = ctx.must(callCompiler(ctx, ctx.cfg,
- ctx.newCommand(gccX86_64, "-fno-pie", mainCc)))
- if err := verifyArgCount(cmd, 0, "-pie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil {
- t.Error(err)
- }
- })
-}
-
-func TestOmitPieFlagsWhenKernelDefined(t *testing.T) {
- withTestContext(t, func(ctx *testContext) {
- initPieConfig(ctx.cfg)
- cmd := ctx.must(callCompiler(ctx, ctx.cfg,
- ctx.newCommand(gccX86_64, "-D__KERNEL__", mainCc)))
- if err := verifyArgCount(cmd, 0, "-pie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 0, "-fPIE"); err != nil {
- t.Error(err)
- }
- })
-}
-
-func TestAddPieFlagsForEabiEvenIfNoPieGiven(t *testing.T) {
- withTestContext(t, func(ctx *testContext) {
- initPieConfig(ctx.cfg)
- cmd := ctx.must(callCompiler(ctx, ctx.cfg,
- ctx.newCommand(gccX86_64Eabi, "-nopie", mainCc)))
- if err := verifyArgCount(cmd, 0, "-nopie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 1, "-pie"); err != nil {
- t.Error(err)
- }
- if err := verifyArgCount(cmd, 1, "-fPIE"); err != nil {
- t.Error(err)
- }
- })
-}
-
-func initPieConfig(cfg *config) {
- cfg.commonFlags = []string{"-fPIE", "-pie"}
-}
diff --git a/compiler_wrapper/print_cmdline_flag.go b/compiler_wrapper/print_cmdline_flag.go
index e2092ed..9c744c3 100644
--- a/compiler_wrapper/print_cmdline_flag.go
+++ b/compiler_wrapper/print_cmdline_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/print_cmdline_flag_test.go b/compiler_wrapper/print_cmdline_flag_test.go
index 8f6fc22..54bd70c 100644
--- a/compiler_wrapper/print_cmdline_flag_test.go
+++ b/compiler_wrapper/print_cmdline_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/print_config_flag.go b/compiler_wrapper/print_config_flag.go
index 9ab9f6b..37e5407 100644
--- a/compiler_wrapper/print_config_flag.go
+++ b/compiler_wrapper/print_config_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/print_config_flag_test.go b/compiler_wrapper/print_config_flag_test.go
index 63451ed..1984723 100644
--- a/compiler_wrapper/print_config_flag_test.go
+++ b/compiler_wrapper/print_config_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/remote_build_flag_test.go b/compiler_wrapper/remote_build_flag_test.go
index 4a89417..23a22e1 100644
--- a/compiler_wrapper/remote_build_flag_test.go
+++ b/compiler_wrapper/remote_build_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/remote_build_flags.go b/compiler_wrapper/remote_build_flags.go
index fc26c93..7a5a765 100644
--- a/compiler_wrapper/remote_build_flags.go
+++ b/compiler_wrapper/remote_build_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/reset_compiler_wrapper.sh b/compiler_wrapper/reset_compiler_wrapper.sh
index 523e972..3206199 100755
--- a/compiler_wrapper/reset_compiler_wrapper.sh
+++ b/compiler_wrapper/reset_compiler_wrapper.sh
@@ -1,6 +1,6 @@
#!/bin/bash -eux
#
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/compiler_wrapper/rusage_flag.go b/compiler_wrapper/rusage_flag.go
index 6346960..ed59b11 100644
--- a/compiler_wrapper/rusage_flag.go
+++ b/compiler_wrapper/rusage_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/rusage_flag_test.go b/compiler_wrapper/rusage_flag_test.go
index 439cfd1..6702166 100644
--- a/compiler_wrapper/rusage_flag_test.go
+++ b/compiler_wrapper/rusage_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/sanitizer_flags.go b/compiler_wrapper/sanitizer_flags.go
index da0a64b..58312cc 100644
--- a/compiler_wrapper/sanitizer_flags.go
+++ b/compiler_wrapper/sanitizer_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,34 +8,79 @@
"strings"
)
+// Returns whether the flag turns on 'invasive' sanitizers. These are sanitizers incompatible with
+// things like FORTIFY, since they require meaningful runtime support, intercept libc calls, etc.
+func isInvasiveSanitizerFlag(flag string) bool {
+ // There are a few valid spellings here:
+ // -fsanitize=${sanitizer_list}, which enables the given sanitizers
+ // -fsanitize-trap=${sanitizer_list}, which specifies sanitizer behavior _if_ these
+ // sanitizers are already enabled.
+ // -fsanitize-recover=${sanitizer_list}, which also specifies sanitizer behavior _if_
+ // these sanitizers are already enabled.
+ // -fsanitize-ignorelist=/path/to/file, which designates a config file for sanitizers.
+ //
+ // All we care about is the first one, since that's what actually enables sanitizers. Clang
+ // does not accept a `-fsanitize ${sanitizer_list}` spelling of this flag.
+ fsanitize := "-fsanitize="
+ if !strings.HasPrefix(flag, fsanitize) {
+ return false
+ }
+
+ sanitizers := flag[len(fsanitize):]
+ if sanitizers == "" {
+ return false
+ }
+
+ for _, sanitizer := range strings.Split(sanitizers, ",") {
+ // Keep an allowlist of sanitizers known to not cause issues.
+ switch sanitizer {
+ case "alignment", "array-bounds", "bool", "bounds", "builtin", "enum",
+ "float-cast-overflow", "integer-divide-by-zero", "local-bounds",
+ "nullability", "nullability-arg", "nullability-assign",
+ "nullability-return", "null", "return", "returns-nonnull-attribute",
+ "shift-base", "shift-exponent", "shift", "unreachable", "vla-bound":
+ // These sanitizers are lightweight. Ignore them.
+ default:
+ return true
+ }
+ }
+ return false
+}
+
func processSanitizerFlags(builder *commandBuilder) {
hasSanitizeFlags := false
+ // TODO: This doesn't take -fno-sanitize flags into account. This doesn't seem to be an
+ // issue in practice.
for _, arg := range builder.args {
- // TODO: This should probably be -fsanitize= to not match on
- // e.g. -fsanitize-blocklist
- if arg.fromUser {
- if strings.HasPrefix(arg.value, "-fsanitize") {
- hasSanitizeFlags = true
- }
+ if arg.fromUser && isInvasiveSanitizerFlag(arg.value) {
+ hasSanitizeFlags = true
+ break
}
}
- if hasSanitizeFlags {
- // Flags not supported by sanitizers (ASan etc.)
- unsupportedSanitizerFlags := map[string]bool{
- "-D_FORTIFY_SOURCE=1": true,
- "-D_FORTIFY_SOURCE=2": true,
- "-Wl,--no-undefined": true,
- "-Wl,-z,defs": true,
- }
- builder.transformArgs(func(arg builderArg) string {
- // TODO: This is a bug in the old wrapper to not filter
- // non user args for gcc. Fix this once we don't compare to the old wrapper anymore.
- if (builder.target.compilerType != gccType || arg.fromUser) &&
- unsupportedSanitizerFlags[arg.value] {
- return ""
- }
- return arg.value
- })
+ if !hasSanitizeFlags {
+ return
}
+
+ // Flags not supported by sanitizers (ASan etc.)
+ unsupportedSanitizerFlags := map[string]bool{
+ "-D_FORTIFY_SOURCE=1": true,
+ "-D_FORTIFY_SOURCE=2": true,
+ "-Wl,--no-undefined": true,
+ "-Wl,-z,defs": true,
+ }
+
+ builder.transformArgs(func(arg builderArg) string {
+ // TODO: This is a bug in the old wrapper to not filter
+ // non user args for gcc. Fix this once we don't compare to the old wrapper anymore.
+ if (builder.target.compilerType != gccType || arg.fromUser) &&
+ unsupportedSanitizerFlags[arg.value] {
+ return ""
+ }
+ return arg.value
+ })
+
+ builder.filterArgPairs(func(arg1, arg2 builderArg) bool {
+ return !(arg1.value == "-Wl,-z" && arg2.value == "-Wl,defs")
+ })
}
diff --git a/compiler_wrapper/sanitizer_flags_test.go b/compiler_wrapper/sanitizer_flags_test.go
index a401d58..b4b1fd8 100644
--- a/compiler_wrapper/sanitizer_flags_test.go
+++ b/compiler_wrapper/sanitizer_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,6 +8,28 @@
"testing"
)
+func TestFortifyIsKeptIfSanitizerIsTrivial(t *testing.T) {
+ withTestContext(t, func(ctx *testContext) {
+ cmd := ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(gccX86_64, "-fsanitize=return", "-D_FORTIFY_SOURCE=1", mainCc)))
+ if err := verifyArgCount(cmd, 1, "-D_FORTIFY_SOURCE=1"); err != nil {
+ t.Error(err)
+ }
+
+ cmd = ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(gccX86_64, "-fsanitize=return,address", "-D_FORTIFY_SOURCE=1", mainCc)))
+ if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil {
+ t.Error(err)
+ }
+
+ cmd = ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(gccX86_64, "-fsanitize=address,return", "-D_FORTIFY_SOURCE=1", mainCc)))
+ if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil {
+ t.Error(err)
+ }
+ })
+}
+
func TestFilterUnsupportedSanitizerFlagsIfSanitizeGiven(t *testing.T) {
withTestContext(t, func(ctx *testContext) {
cmd := ctx.must(callCompiler(ctx, ctx.cfg,
@@ -23,6 +45,15 @@
}
cmd = ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(gccX86_64, "-fsanitize=kernel-address", "-Wl,-z -Wl,defs", mainCc)))
+ if err := verifyArgCount(cmd, 0, "-Wl,-z"); err != nil {
+ t.Error(err)
+ }
+ if err := verifyArgCount(cmd, 0, "-Wl,defs"); err != nil {
+ t.Error(err)
+ }
+
+ cmd = ctx.must(callCompiler(ctx, ctx.cfg,
ctx.newCommand(gccX86_64, "-fsanitize=kernel-address", "-D_FORTIFY_SOURCE=1", mainCc)))
if err := verifyArgCount(cmd, 0, "-D_FORTIFY_SOURCE=1"); err != nil {
t.Error(err)
@@ -75,6 +106,15 @@
}
cmd = ctx.must(callCompiler(ctx, ctx.cfg,
+ ctx.newCommand(gccX86_64, "-Wl,-z", "-Wl,defs", mainCc)))
+ if err := verifyArgCount(cmd, 1, "-Wl,-z"); err != nil {
+ t.Error(err)
+ }
+ if err := verifyArgCount(cmd, 1, "-Wl,defs"); err != nil {
+ t.Error(err)
+ }
+
+ cmd = ctx.must(callCompiler(ctx, ctx.cfg,
ctx.newCommand(gccX86_64, "-D_FORTIFY_SOURCE=1", mainCc)))
if err := verifyArgCount(cmd, 1, "-D_FORTIFY_SOURCE=1"); err != nil {
t.Error(err)
diff --git a/compiler_wrapper/stackprotector_flags.go b/compiler_wrapper/stackprotector_flags.go
index 2460572..0e620b5 100644
--- a/compiler_wrapper/stackprotector_flags.go
+++ b/compiler_wrapper/stackprotector_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/stackprotector_flags_test.go b/compiler_wrapper/stackprotector_flags_test.go
index a875757..c13862a 100644
--- a/compiler_wrapper/stackprotector_flags_test.go
+++ b/compiler_wrapper/stackprotector_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/sysroot_flag.go b/compiler_wrapper/sysroot_flag.go
index e0583b2..597153a 100644
--- a/compiler_wrapper/sysroot_flag.go
+++ b/compiler_wrapper/sysroot_flag.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/sysroot_flag_test.go b/compiler_wrapper/sysroot_flag_test.go
index b05a627..9fea684 100644
--- a/compiler_wrapper/sysroot_flag_test.go
+++ b/compiler_wrapper/sysroot_flag_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json
index e237c7c..6993499 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/bisect.json
@@ -25,26 +25,28 @@
"/tmp/sysroot_bisect",
"/tmp/stable/clang",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
],
"env_updates": [
"PYTHONPATH=/somepath/test_binary"
@@ -80,26 +82,28 @@
"someBisectDir",
"/tmp/stable/clang",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
],
"env_updates": [
"PYTHONPATH=/somepath/test_binary"
@@ -138,26 +142,28 @@
"someBisectDir",
"/tmp/stable/clang",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
],
"env_updates": [
"PYTHONPATH=/somepath/test_binary"
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json
index 07fccc6..2665403 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_ftrapv_maincc_target_specific.json
@@ -16,26 +16,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -58,26 +60,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -100,26 +104,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -142,26 +148,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -184,26 +192,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -226,26 +236,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -268,26 +280,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -310,26 +324,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -352,26 +368,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json
index a221605..b151051 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_host_wrapper.json
@@ -15,26 +15,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json
index 2130d52..396bb95 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_maincc_target_specific.json
@@ -15,26 +15,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -56,26 +58,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -97,26 +101,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -138,26 +144,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -179,26 +187,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -220,26 +230,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -261,26 +273,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -302,26 +316,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -343,26 +359,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json
index 43ae728..f32a704 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_path.json
@@ -15,26 +15,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -59,26 +61,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stdout": "somemessage",
@@ -103,26 +107,28 @@
"path": "/tmp/stable/clang++",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -147,26 +153,28 @@
"path": "somepath/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -198,28 +206,30 @@
"path": "/somedir/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-resource-dir=someResourceDir",
"--gcc-toolchain=/usr",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -254,28 +264,30 @@
"path": "/somedir/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-resource-dir=someResourceDir",
"--gcc-toolchain=/usr",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -310,28 +322,30 @@
"path": "/somedir/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-resource-dir=someResourceDir",
"--gcc-toolchain=/usr",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stdout": "somemessage",
@@ -356,26 +370,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -397,26 +413,28 @@
"path": "/tmp/stable/a/b/c/d/e/f/g/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -438,26 +456,28 @@
"path": "/tmp/stable/a/b/c/d/e/f/g/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -479,26 +499,28 @@
"path": "/tmp/stable/somedir/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -523,26 +545,28 @@
"path": "/tmp/stable/pathenv/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json
index b8b28cd..8df5109 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_sanitizer_args.json
@@ -17,27 +17,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=kernel-address",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -61,27 +63,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=kernel-address",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -105,27 +109,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=kernel-address",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -149,27 +155,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=kernel-address",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -192,27 +200,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=fuzzer",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -236,28 +246,30 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=address",
"-fprofile-instr-generate",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -280,27 +292,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fsanitize=address",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -323,27 +337,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fprofile-instr-generate",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json
index 7c4afd3..7c296af 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clang_specific_args.json
@@ -25,18 +25,20 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-mno-movbe",
@@ -48,11 +50,11 @@
"-Woverride-init",
"-Wunsafe-loop-optimizations",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -75,27 +77,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-Wno-#warnings",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -118,27 +122,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-Wno-error=uninitialized",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -161,27 +167,29 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-someflag",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json
index f678ba6..c1cf050 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/clangtidy.json
@@ -31,26 +31,28 @@
"--",
"-resource-dir=someResourceDir",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
},
@@ -59,26 +61,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -117,26 +121,28 @@
"--",
"-resource-dir=someResourceDir",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
},
@@ -145,26 +151,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -205,26 +213,28 @@
"--",
"-resource-dir=someResourceDir",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stdout": "somemessage",
@@ -236,26 +246,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -297,26 +309,28 @@
"--",
"-resource-dir=someResourceDir",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
},
@@ -325,26 +339,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stdout": "somemessage",
diff --git a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json
index f5a7771..ad290b4 100644
--- a/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json
+++ b/compiler_wrapper/testdata/cros_clang_host_golden/force_disable_werror.json
@@ -18,26 +18,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
}
}
@@ -62,26 +64,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stderr": "-Werror originalerror",
@@ -92,26 +96,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-Wno-error",
"-Wno-error=poison-system-directories"
]
@@ -140,26 +146,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
- "-Wno-unused-but-set-variable"
+ "-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation"
]
},
"stderr": "-Werror originalerror",
@@ -170,26 +178,28 @@
"path": "/tmp/stable/clang",
"args": [
"-Qunused-arguments",
- "-fno-addrsig",
- "-fuse-ld=lld",
- "-fdebug-default-version=5",
- "-Wno-unused-local-typedefs",
- "-Wno-tautological-constant-compare",
- "-Wno-tautological-unsigned-enum-zero-compare",
- "-Wno-final-dtor-non-final-class",
"-Werror=poison-system-directories",
- "-Wno-unknown-warning-option",
- "-fexperimental-new-pass-manager",
"-Wno-compound-token-split-by-macro",
"-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
+ "-Wno-tautological-constant-compare",
+ "-Wno-tautological-unsigned-enum-zero-compare",
+ "-Wno-unknown-warning-option",
+ "-fdebug-default-version=5",
+ "-fexperimental-new-pass-manager",
+ "-Wno-unused-local-typedefs",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"main.cc",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-Wno-error",
"-Wno-error=poison-system-directories"
]
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json
index 05aea31..8bd823e 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/bisect.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/bisect.json
@@ -27,35 +27,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -100,35 +100,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -176,35 +176,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json
index 2b04d0c..56f78ef 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_ftrapv_maincc_target_specific.json
@@ -18,24 +18,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -43,11 +43,11 @@
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -81,24 +81,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -106,11 +106,11 @@
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-eabi/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -144,24 +144,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -169,11 +169,11 @@
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-win-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -207,24 +207,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -232,11 +232,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-linux-gnu"
@@ -269,24 +269,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -294,11 +294,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-eabi"
@@ -331,24 +331,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -356,11 +356,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-win-gnu"
@@ -393,24 +393,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -418,11 +418,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-linux-gnu"
@@ -455,24 +455,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -480,11 +480,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-eabi"
@@ -517,24 +517,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -542,11 +542,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-win-gnu"
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json
index ea83417..db2f59a 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_maincc_target_specific.json
@@ -17,35 +17,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -78,35 +78,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-eabi-",
"main.cc",
"-L/usr/x86_64-cros-eabi/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -139,35 +139,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-win-gnu-",
"main.cc",
"-L/usr/x86_64-cros-win-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -200,35 +200,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv7m-cros-linux-gnu-",
"main.cc",
"-L/usr/armv7m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-linux-gnu"
@@ -260,35 +260,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/armv7m-cros-eabi-",
"main.cc",
"-L/usr/armv7m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-eabi"
@@ -320,35 +320,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv7m-cros-win-gnu-",
"main.cc",
"-L/usr/armv7m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-win-gnu"
@@ -380,35 +380,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv8m-cros-linux-gnu-",
"main.cc",
"-L/usr/armv8m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-linux-gnu"
@@ -440,35 +440,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-eabi",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/armv8m-cros-eabi-",
"main.cc",
"-L/usr/armv8m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-eabi"
@@ -500,35 +500,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-win-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv8m-cros-win-gnu-",
"main.cc",
"-L/usr/armv8m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-win-gnu"
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json
index 52d4184..186a16e 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_path.json
@@ -17,35 +17,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -81,35 +81,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -145,35 +145,35 @@
"../../usr/bin/clang++",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -209,35 +209,35 @@
"somepath/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -280,24 +280,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -306,11 +306,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -356,24 +356,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -382,11 +382,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -427,24 +427,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -453,11 +453,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -493,35 +493,35 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -554,35 +554,35 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -615,35 +615,35 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -676,35 +676,35 @@
"../usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
@@ -740,35 +740,35 @@
"/tmp/usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json
index b71a880..6a38e84 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sanitizer_args.json
@@ -19,35 +19,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -82,35 +82,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -145,35 +145,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -208,35 +208,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -270,35 +270,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=fuzzer",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -333,24 +333,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -358,11 +358,11 @@
"-fprofile-instr-generate",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -396,35 +396,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -458,24 +458,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -483,11 +483,11 @@
"-fprofile-instr-generate",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json
index d10e6cd..ec91216 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_specific_args.json
@@ -27,24 +27,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -59,11 +59,11 @@
"-Wunsafe-loop-optimizations",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -97,24 +97,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -122,11 +122,11 @@
"-Wno-#warnings",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -160,24 +160,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -185,11 +185,11 @@
"-Wno-error=uninitialized",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -223,24 +223,24 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -248,11 +248,11 @@
"-someflag",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json
index 4220918..0ad97b7 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clang_sysroot_wrapper_common.json
@@ -21,8 +21,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -55,35 +53,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -119,35 +117,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -176,19 +174,21 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
@@ -196,13 +196,14 @@
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
+ "-nopie",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -236,19 +237,21 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-D_FORTIFY_SOURCE=2",
@@ -259,11 +262,11 @@
"-D__KERNEL__",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -297,19 +300,21 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7a-cros-linux-gnueabihf",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-D_FORTIFY_SOURCE=2",
@@ -320,11 +325,11 @@
"-D__KERNEL__",
"main.cc",
"-L/usr/armv7a-cros-linux-gnueabihf/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7a-cros-linux-gnueabihf"
@@ -356,24 +361,24 @@
"args": [
"../../usr/bin/clang",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -381,11 +386,11 @@
"--sysroot=xyz",
"main.cc",
"-Lxyz/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json
index ea1363e..f743894 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/clangtidy.json
@@ -32,35 +32,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -74,35 +74,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -146,35 +146,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -189,35 +189,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -263,35 +263,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -309,35 +309,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -384,35 +384,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -427,35 +427,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json
index 4df8157..9cd7261 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/force_disable_werror.json
@@ -20,35 +20,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -84,35 +84,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -134,35 +134,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -202,35 +202,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -252,35 +252,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json
index e2037e2..d9b532f 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_clang_syntax.json
@@ -17,35 +17,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -66,8 +66,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -105,35 +103,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -154,8 +152,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -188,35 +184,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -252,35 +248,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -301,8 +297,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json
index 0cc3d8a..63b7da1 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_maincc_target_specific.json
@@ -21,8 +21,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -60,8 +58,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -99,8 +95,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -138,8 +132,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -176,8 +168,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -214,8 +204,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -252,8 +240,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
@@ -290,8 +276,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -328,8 +312,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-static-libgcc",
"-mthumb",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json
index 5b129d7..6e06ce1 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_path.json
@@ -21,8 +21,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -63,8 +61,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -105,8 +101,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -144,8 +138,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -183,8 +175,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -225,8 +215,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json
index a61ead8..9aaf9c2 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sanitizer_args.json
@@ -23,8 +23,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -65,8 +63,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -107,8 +103,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -149,8 +143,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -190,8 +182,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -232,8 +222,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -274,8 +262,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -315,8 +301,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json
index f7b2000..4ee90cb 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_specific_args.json
@@ -22,8 +22,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -63,8 +61,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -104,8 +100,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json
index 299b46b..9397333 100644
--- a/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json
+++ b/compiler_wrapper/testdata/cros_hardened_golden/gcc_sysroot_wrapper_common.json
@@ -21,8 +21,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -59,8 +57,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -101,8 +97,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -140,6 +134,7 @@
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
+ "-nopie",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
"-mno-movbe"
@@ -251,8 +246,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json
index 05aea31..8bd823e 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/bisect.json
@@ -27,35 +27,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -100,35 +100,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -176,35 +176,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json
index 52d4184..186a16e 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clang_path.json
@@ -17,35 +17,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -81,35 +81,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -145,35 +145,35 @@
"../../usr/bin/clang++",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -209,35 +209,35 @@
"somepath/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -280,24 +280,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -306,11 +306,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -356,24 +356,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -382,11 +382,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -427,24 +427,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -453,11 +453,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -493,35 +493,35 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -554,35 +554,35 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -615,35 +615,35 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -676,35 +676,35 @@
"../usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
@@ -740,35 +740,35 @@
"/tmp/usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json
index ea1363e..f743894 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/clangtidy.json
@@ -32,35 +32,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -74,35 +74,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -146,35 +146,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -189,35 +189,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -263,35 +263,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -309,35 +309,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -384,35 +384,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -427,35 +427,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json
index 4df8157..9cd7261 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/force_disable_werror.json
@@ -20,35 +20,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -84,35 +84,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -134,35 +134,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -202,35 +202,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -252,35 +252,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json
index e2037e2..d9b532f 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_clang_syntax.json
@@ -17,35 +17,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -66,8 +66,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -105,35 +103,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -154,8 +152,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -188,35 +184,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -252,35 +248,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -301,8 +297,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json
index 5b129d7..6e06ce1 100644
--- a/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_llvmnext_golden/gcc_path.json
@@ -21,8 +21,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -63,8 +61,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -105,8 +101,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -144,8 +138,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -183,8 +175,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -225,8 +215,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json
index a676dc6..da522b9 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/bisect.json
@@ -26,35 +26,35 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -95,35 +95,35 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -167,35 +167,35 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json
index cc6d1ad..287833c 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clang_path.json
@@ -16,35 +16,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -74,35 +74,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -132,35 +132,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -190,35 +190,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -255,24 +255,24 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -281,11 +281,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -326,24 +326,24 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -352,11 +352,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -396,24 +396,24 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -422,11 +422,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -456,35 +456,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -511,35 +511,35 @@
"args": [
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -566,35 +566,35 @@
"args": [
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -621,35 +621,35 @@
"args": [
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
@@ -679,35 +679,35 @@
"args": [
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json
index ea1363e..f743894 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/clangtidy.json
@@ -32,35 +32,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -74,35 +74,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -146,35 +146,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -189,35 +189,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -263,35 +263,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -309,35 +309,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -384,35 +384,35 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -427,35 +427,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json
index 2c34edb..54943fb 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/force_disable_werror.json
@@ -19,35 +19,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -77,35 +77,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -121,35 +121,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -183,35 +183,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -227,35 +227,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json
index 368eb85..1704cd1 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_clang_syntax.json
@@ -17,35 +17,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -65,8 +65,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -100,35 +98,35 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -149,8 +147,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -183,35 +179,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -247,35 +243,35 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fno-addrsig",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-fuse-ld=lld",
- "--unwindlib=libgcc",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "--unwindlib=libunwind",
+ "-Wno-section",
+ "-fno-addrsig",
+ "-fuse-ld=lld",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -295,8 +291,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json
index 9393c4b..fb87737 100644
--- a/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json
+++ b/compiler_wrapper/testdata/cros_hardened_noccache_golden/gcc_path.json
@@ -20,8 +20,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -57,8 +55,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -94,8 +90,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -128,8 +122,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -162,8 +154,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
@@ -199,8 +189,6 @@
"-Wno-maybe-uninitialized",
"-fcommon",
"-fstack-protector-strong",
- "-fPIE",
- "-pie",
"-D_FORTIFY_SOURCE=2",
"-fno-omit-frame-pointer",
"-static-libgcc",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json
index 6c46fee..96fd88c 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/bisect.json
@@ -27,26 +27,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -91,26 +93,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -158,26 +162,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json
index 86960cc..35f90b1 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_ftrapv_maincc_target_specific.json
@@ -18,27 +18,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -72,27 +74,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-eabi-",
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-eabi/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -126,27 +130,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-win-gnu-",
"-ftrapv",
"main.cc",
"-L/usr/x86_64-cros-win-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -180,16 +186,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
@@ -197,11 +205,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-linux-gnu"
@@ -234,27 +242,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/armv7m-cros-eabi-",
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-eabi"
@@ -287,16 +297,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
@@ -304,11 +316,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv7m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-win-gnu"
@@ -341,16 +353,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
@@ -358,11 +372,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-linux-gnu"
@@ -395,27 +409,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/armv8m-cros-eabi-",
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-eabi"
@@ -448,16 +464,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
@@ -465,11 +483,11 @@
"-ftrapv",
"main.cc",
"-L/usr/armv8m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-win-gnu"
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json
index 69af166..e2479e9 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_maincc_target_specific.json
@@ -17,26 +17,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -69,26 +71,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-eabi-",
"main.cc",
"-L/usr/x86_64-cros-eabi/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -121,26 +125,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-win-gnu-",
"main.cc",
"-L/usr/x86_64-cros-win-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -173,27 +179,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv7m-cros-linux-gnu-",
"main.cc",
"-L/usr/armv7m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-linux-gnu"
@@ -225,26 +233,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/armv7m-cros-eabi-",
"main.cc",
"-L/usr/armv7m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-eabi"
@@ -276,27 +286,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7m-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv7m-cros-win-gnu-",
"main.cc",
"-L/usr/armv7m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7m-cros-win-gnu"
@@ -328,27 +340,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv8m-cros-linux-gnu-",
"main.cc",
"-L/usr/armv8m-cros-linux-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-linux-gnu"
@@ -380,26 +394,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-eabi",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/armv8m-cros-eabi-",
"main.cc",
"-L/usr/armv8m-cros-eabi/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-eabi"
@@ -431,27 +447,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv8m-cros-win-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
"--prefix=../../bin/armv8m-cros-win-gnu-",
"main.cc",
"-L/usr/armv8m-cros-win-gnu/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv8m-cros-win-gnu"
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json
index afc7951..11c566b 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_path.json
@@ -17,26 +17,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -72,26 +74,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -127,26 +131,28 @@
"../../usr/bin/clang++",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -182,26 +188,28 @@
"somepath/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -244,16 +252,18 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -261,11 +271,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -311,16 +321,18 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -328,11 +340,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -373,16 +385,18 @@
"/somedir/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -390,11 +404,11 @@
"--gcc-toolchain=/usr",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -430,26 +444,28 @@
"/usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -482,26 +498,28 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -534,26 +552,28 @@
"a/b/c/d/e/usr/bin/clang",
"--sysroot=/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=a/b/c/d/e/bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/stable/a/b/c/d/e/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-Ba/b/c/d/e/bin",
"-target",
@@ -586,26 +606,28 @@
"../usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
@@ -641,26 +663,28 @@
"/tmp/usr/bin/clang",
"--sysroot=/tmp/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/tmp/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json
index 3e140d5..fe0df74 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sanitizer_args.json
@@ -19,27 +19,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -74,27 +76,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -129,27 +133,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -184,27 +190,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=kernel-address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -238,27 +246,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=fuzzer",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -293,16 +303,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -310,11 +322,11 @@
"-fprofile-instr-generate",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -348,27 +360,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fsanitize=address",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -402,27 +416,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-fprofile-instr-generate",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json
index fbe038e..b744e8c 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_specific_args.json
@@ -27,16 +27,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
@@ -50,11 +52,11 @@
"-Wunsafe-loop-optimizations",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -88,27 +90,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-Wno-#warnings",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -142,27 +146,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-Wno-error=uninitialized",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -196,27 +202,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"-someflag",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json
index f2ccadb..ed3a6ef 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clang_sysroot_wrapper_common.json
@@ -50,26 +50,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -105,26 +107,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -153,26 +157,29 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
+ "-nopie",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -206,16 +213,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-fno-stack-protector",
@@ -223,11 +232,11 @@
"-D__KERNEL__",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -261,16 +270,18 @@
"../../usr/bin/clang",
"--sysroot=/usr/armv7a-cros-linux-gnueabihf",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"-mthumb",
@@ -279,11 +290,11 @@
"-D__KERNEL__",
"main.cc",
"-L/usr/armv7a-cros-linux-gnueabihf/usr/lib",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-B../../bin",
"-target",
"armv7a-cros-linux-gnueabihf"
@@ -315,27 +326,29 @@
"args": [
"../../usr/bin/clang",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"--sysroot=xyz",
"main.cc",
"-Lxyz/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json
index dc641c9..830abee 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/clangtidy.json
@@ -32,26 +32,28 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -65,26 +67,28 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -128,26 +132,28 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -162,26 +168,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -227,26 +235,28 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -264,26 +274,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -330,26 +342,28 @@
"-resource-dir=someResourceDir",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -364,26 +378,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json
index 54b994c..07c2090 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/force_disable_werror.json
@@ -20,26 +20,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -75,26 +77,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -116,26 +120,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -175,26 +181,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -216,26 +224,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json
index 5234715..9dd5687 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_clang_syntax.json
@@ -17,26 +17,28 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -91,26 +93,28 @@
"../../usr/bin/clang",
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -160,26 +164,28 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
@@ -215,26 +221,28 @@
"args": [
"--sysroot=/usr/x86_64-cros-linux-gnu",
"-Qunused-arguments",
- "-fdebug-default-version=5",
+ "-Werror=poison-system-directories",
+ "-Wno-compound-token-split-by-macro",
+ "-Wno-deprecated-declarations",
+ "-Wno-error=implicit-function-declaration",
+ "-Wno-error=implicit-int",
+ "-Wno-final-dtor-non-final-class",
"-Wno-tautological-constant-compare",
"-Wno-tautological-unsigned-enum-zero-compare",
"-Wno-unknown-warning-option",
- "-Wno-section",
- "-Wno-final-dtor-non-final-class",
- "-Werror=poison-system-directories",
+ "-fdebug-default-version=5",
"-fexperimental-new-pass-manager",
- "-Wno-compound-token-split-by-macro",
- "-Wno-deprecated-declarations",
+ "-Wno-section",
"-fcrash-diagnostics-dir=/tmp/stable/clang_crash_diagnostics",
"-static-libgcc",
"--prefix=../../bin/x86_64-cros-linux-gnu-",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
- "-Wno-implicit-int-float-conversion",
"-Wno-compound-token-split-by-space",
- "-Wno-string-concatenation",
"-Wno-deprecated-copy",
"-Wno-unused-but-set-variable",
+ "-Wno-implicit-int-float-conversion",
+ "-Wno-string-concatenation",
"-mno-movbe",
"-B../../bin",
"-target",
diff --git a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json
index ba2f292..25411f2 100644
--- a/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json
+++ b/compiler_wrapper/testdata/cros_nonhardened_golden/gcc_sysroot_wrapper_common.json
@@ -122,6 +122,7 @@
"-Wno-deprecated-declarations",
"-Wtrampolines",
"-static-libgcc",
+ "-nopie",
"main.cc",
"-L/usr/x86_64-cros-linux-gnu/usr/lib64",
"-mno-movbe"
diff --git a/compiler_wrapper/testutil_test.go b/compiler_wrapper/testutil_test.go
index 035f237..463e34a 100644
--- a/compiler_wrapper/testutil_test.go
+++ b/compiler_wrapper/testutil_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/thumb_flags.go b/compiler_wrapper/thumb_flags.go
index 0edaf4f..3e641d3 100644
--- a/compiler_wrapper/thumb_flags.go
+++ b/compiler_wrapper/thumb_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/thumb_flags_test.go b/compiler_wrapper/thumb_flags_test.go
index 2e8f7e6..24985bc 100644
--- a/compiler_wrapper/thumb_flags_test.go
+++ b/compiler_wrapper/thumb_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/unsupported_flags.go b/compiler_wrapper/unsupported_flags.go
index 48fee2f..5a0dcee 100644
--- a/compiler_wrapper/unsupported_flags.go
+++ b/compiler_wrapper/unsupported_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/unsupported_flags_test.go b/compiler_wrapper/unsupported_flags_test.go
index a32eb52..17ff13a 100644
--- a/compiler_wrapper/unsupported_flags_test.go
+++ b/compiler_wrapper/unsupported_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/x64_flags.go b/compiler_wrapper/x64_flags.go
index 40505cf..2e4a0af 100644
--- a/compiler_wrapper/x64_flags.go
+++ b/compiler_wrapper/x64_flags.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/compiler_wrapper/x64_flags_test.go b/compiler_wrapper/x64_flags_test.go
index fd93728..ce5caeb 100644
--- a/compiler_wrapper/x64_flags_test.go
+++ b/compiler_wrapper/x64_flags_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/crate_ebuild_help.py b/crate_ebuild_help.py
new file mode 100755
index 0000000..c66b989
--- /dev/null
+++ b/crate_ebuild_help.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Help creating a Rust ebuild with CRATES.
+
+This script is meant to help someone creating a Rust ebuild of the type
+currently used by sys-apps/ripgrep and sys-apps/rust-analyzer.
+
+In these ebuilds, the CRATES variable is used to list all dependencies, rather
+than creating an ebuild for each dependency. This style of ebuild can be used
+for a crate which is only intended for use in the chromiumos SDK, and which has
+many dependencies which otherwise won't be used.
+
+To create such an ebuild, there are essentially two tasks that must be done:
+
+1. Determine all transitive dependent crates and version and list them in the
+CRATES variable. Ignore crates that are already included in the main crate's
+repository.
+
+2. Find which dependent crates are not already on a chromeos mirror, retrieve
+them from crates.io, and upload them to `gs://chromeos-localmirror/distfiles`.
+
+This script parses the crate's lockfile to list transitive dependent crates,
+and either lists crates to be uploaded or actually uploads them.
+
+Of course these can be done manually instead. If you choose to do these steps
+manually, I recommend *not* using the `cargo download` tool, and instead obtain
+dependent crates at
+`https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download`.
+
+Example usage:
+
+ # Here we instruct the script to ignore crateA and crateB, presumably
+ # because they are already included in the same repository as some-crate.
+ # This will not actually upload any crates to `gs`.
+ python3 crate_ebuild_help.py --lockfile some-crate/Cargo.lock \
+ --ignore crateA --ignore crateB --dry-run
+
+ # Similar to the above, but here we'll actually carry out the uploads.
+ python3 crate_ebuild_help.py --lockfile some-crate/Cargo.lock \
+ --ignore crateA --ignore crateB
+
+See the ebuild files for ripgrep or rust-analyzer for other details.
+"""
+
+import argparse
+import concurrent.futures
+from pathlib import Path
+import subprocess
+import tempfile
+from typing import List, Tuple
+import urllib.request
+
+# Python 3.11 has `tomllib`, so maybe eventually we can switch to that.
+import toml
+
+
+def run(args: List[str]) -> bool:
+ result = subprocess.run(
+ args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False
+ )
+ return result.returncode == 0
+
+
+def run_check(args: List[str]):
+ subprocess.run(
+ args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
+ )
+
+
+def gs_address_exists(address: str) -> bool:
+ # returns False if the file isn't there
+ return run(["gsutil.py", "ls", address])
+
+
+def crate_already_uploaded(crate_name: str, crate_version: str) -> bool:
+ filename = f"{crate_name}-{crate_version}.crate"
+ return gs_address_exists(
+ f"gs://chromeos-localmirror/distfiles/{filename}"
+ ) or gs_address_exists(f"gs://chromeos-mirror/gentoo/distfiles/{filename}")
+
+
+def download_crate(crate_name: str, crate_version: str, localpath: Path):
+ urllib.request.urlretrieve(
+ f"https://crates.io/api/v1/crates/{crate_name}/{crate_version}/download",
+ localpath,
+ )
+
+
+def upload_crate(crate_name: str, crate_version: str, localpath: Path):
+ run_check(
+ [
+ "gsutil.py",
+ "cp",
+ "-n",
+ "-a",
+ "public-read",
+ str(localpath),
+ f"gs://chromeos-localmirror/distfiles/{crate_name}-{crate_version}.crate",
+ ]
+ )
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Help prepare a Rust crate for an ebuild."
+ )
+ parser.add_argument(
+ "--lockfile",
+ type=str,
+ required=True,
+ help="Path to the lockfile of the crate in question.",
+ )
+ parser.add_argument(
+ "--ignore",
+ type=str,
+ action="append",
+ required=False,
+ default=[],
+ help="Ignore the crate by this name (may be used multiple times).",
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Don't actually download/upload crates, just print their names.",
+ )
+ ns = parser.parse_args()
+
+ to_ignore = set(ns.ignore)
+
+ toml_contents = toml.load(ns.lockfile)
+ packages = toml_contents["package"]
+
+ crates = [
+ (pkg["name"], pkg["version"])
+ for pkg in packages
+ if pkg["name"] not in to_ignore
+ ]
+ crates.sort()
+
+ print("Dependent crates:")
+ for name, version in crates:
+ print(f"{name}-{version}")
+ print()
+
+ if ns.dry_run:
+ print("Crates that would be uploaded (skipping ones already uploaded):")
+ else:
+ print("Uploading crates (skipping ones already uploaded):")
+
+ def maybe_upload(crate: Tuple[str, str]) -> str:
+ name, version = crate
+ if crate_already_uploaded(name, version):
+ return ""
+ if not ns.dry_run:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ path = Path(temp_dir.name, f"{name}-{version}.crate")
+ download_crate(name, version, path)
+ upload_crate(name, version, path)
+ return f"{name}-{version}"
+
+ # Simple benchmarking on my machine with rust-analyzer's Cargo.lock, using
+ # the --dry-run option, gives a wall time of 277 seconds with max_workers=1
+ # and 70 seconds with max_workers=4.
+ with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
+ crates_len = len(crates)
+ for i, s in enumerate(executor.map(maybe_upload, crates)):
+ if s:
+ j = i + 1
+ print(f"[{j}/{crates_len}] {s}")
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/cros_utils/__init__.py b/cros_utils/__init__.py
index 4c4e555..d365cb0 100644
--- a/cros_utils/__init__.py
+++ b/cros_utils/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/cros_utils/bugs.py b/cros_utils/bugs.py
index 88fb767..43e0e55 100755
--- a/cros_utils/bugs.py
+++ b/cros_utils/bugs.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -12,93 +12,103 @@
import os
from typing import Any, Dict, List, Optional
-X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_bugs'
+
+X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_bugs"
class WellKnownComponents(enum.IntEnum):
- """A listing of "well-known" components recognized by our infra."""
- CrOSToolchainPublic = -1
- CrOSToolchainPrivate = -2
+ """A listing of "well-known" components recognized by our infra."""
+
+ CrOSToolchainPublic = -1
+ CrOSToolchainPrivate = -2
def _WriteBugJSONFile(object_type: str, json_object: Dict[str, Any]):
- """Writes a JSON file to X20_PATH with the given bug-ish object."""
- final_object = {
- 'type': object_type,
- 'value': json_object,
- }
+ """Writes a JSON file to X20_PATH with the given bug-ish object."""
+ final_object = {
+ "type": object_type,
+ "value": json_object,
+ }
- # The name of this has two parts:
- # - An easily sortable time, to provide uniqueness and let our service send
- # things in the order they were put into the outbox.
- # - 64 bits of entropy, so two racing bug writes don't clobber the same file.
- now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z'
- entropy = base64.urlsafe_b64encode(os.getrandom(8))
- entropy_str = entropy.rstrip(b'=').decode('utf-8')
- file_path = os.path.join(X20_PATH, f'{now}_{entropy_str}.json')
+ # The name of this has two parts:
+ # - An easily sortable time, to provide uniqueness and let our service send
+ # things in the order they were put into the outbox.
+ # - 64 bits of entropy, so two racing bug writes don't clobber the same file.
+ now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z"
+ entropy = base64.urlsafe_b64encode(os.getrandom(8))
+ entropy_str = entropy.rstrip(b"=").decode("utf-8")
+ file_path = os.path.join(X20_PATH, f"{now}_{entropy_str}.json")
- temp_path = file_path + '.in_progress'
- try:
- with open(temp_path, 'w') as f:
- json.dump(final_object, f)
- os.rename(temp_path, file_path)
- except:
- os.remove(temp_path)
- raise
- return file_path
+ temp_path = file_path + ".in_progress"
+ try:
+ with open(temp_path, "w") as f:
+ json.dump(final_object, f)
+ os.rename(temp_path, file_path)
+ except:
+ os.remove(temp_path)
+ raise
+ return file_path
def AppendToExistingBug(bug_id: int, body: str):
- """Sends a reply to an existing bug."""
- _WriteBugJSONFile('AppendToExistingBugRequest', {
- 'body': body,
- 'bug_id': bug_id,
- })
+ """Sends a reply to an existing bug."""
+ _WriteBugJSONFile(
+ "AppendToExistingBugRequest",
+ {
+ "body": body,
+ "bug_id": bug_id,
+ },
+ )
-def CreateNewBug(component_id: int,
- title: str,
- body: str,
- assignee: Optional[str] = None,
- cc: Optional[List[str]] = None):
- """Sends a request to create a new bug.
+def CreateNewBug(
+ component_id: int,
+ title: str,
+ body: str,
+ assignee: Optional[str] = None,
+ cc: Optional[List[str]] = None,
+):
+ """Sends a request to create a new bug.
- Args:
- component_id: The component ID to add. Anything from WellKnownComponents
- also works.
- title: Title of the bug. Must be nonempty.
- body: Body of the bug. Must be nonempty.
- assignee: Assignee of the bug. Must be either an email address, or a
- "well-known" assignee (detective, mage).
- cc: A list of emails to add to the CC list. Must either be an email
- address, or a "well-known" individual (detective, mage).
- """
- obj = {
- 'component_id': component_id,
- 'subject': title,
- 'body': body,
- }
+ Args:
+ component_id: The component ID to add. Anything from WellKnownComponents
+ also works.
+ title: Title of the bug. Must be nonempty.
+ body: Body of the bug. Must be nonempty.
+ assignee: Assignee of the bug. Must be either an email address, or a
+ "well-known" assignee (detective, mage).
+ cc: A list of emails to add to the CC list. Must either be an email
+ address, or a "well-known" individual (detective, mage).
+ """
+ obj = {
+ "component_id": component_id,
+ "subject": title,
+ "body": body,
+ }
- if assignee:
- obj['assignee'] = assignee
+ if assignee:
+ obj["assignee"] = assignee
- if cc:
- obj['cc'] = cc
+ if cc:
+ obj["cc"] = cc
- _WriteBugJSONFile('FileNewBugRequest', obj)
+ _WriteBugJSONFile("FileNewBugRequest", obj)
def SendCronjobLog(cronjob_name: str, failed: bool, message: str):
- """Sends the record of a cronjob to our bug infra.
+ """Sends the record of a cronjob to our bug infra.
- cronjob_name: The name of the cronjob. Expected to remain consistent over
- time.
- failed: Whether the job failed or not.
- message: Any seemingly relevant context. This is pasted verbatim in a bug, if
- the cronjob infra deems it worthy.
- """
- _WriteBugJSONFile('ChrotomationCronjobUpdate', {
- 'name': cronjob_name,
- 'message': message,
- 'failed': failed,
- })
+ cronjob_name: The name of the cronjob. Expected to remain consistent over
+ time.
+ failed: Whether the job failed or not.
+ message: Any seemingly relevant context. This is pasted verbatim in a bug, if
+ the cronjob infra deems it worthy.
+ """
+ _WriteBugJSONFile(
+ "ChrotomationCronjobUpdate",
+ {
+ "name": cronjob_name,
+ "message": message,
+ "failed": failed,
+ },
+ )
diff --git a/cros_utils/bugs_test.py b/cros_utils/bugs_test.py
index 03dee64..5a07dbd 100755
--- a/cros_utils/bugs_test.py
+++ b/cros_utils/bugs_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -17,108 +17,115 @@
class Tests(unittest.TestCase):
- """Tests for the bugs module."""
- def testWritingJSONFileSeemsToWork(self):
- """Tests JSON file writing."""
- old_x20_path = bugs.X20_PATH
+ """Tests for the bugs module."""
- def restore_x20_path():
- bugs.X20_PATH = old_x20_path
+ def testWritingJSONFileSeemsToWork(self):
+ """Tests JSON file writing."""
+ old_x20_path = bugs.X20_PATH
- self.addCleanup(restore_x20_path)
+ def restore_x20_path():
+ bugs.X20_PATH = old_x20_path
- with tempfile.TemporaryDirectory() as tempdir:
- bugs.X20_PATH = tempdir
- file_path = bugs._WriteBugJSONFile(
- 'ObjectType', {
- 'foo': 'bar',
- 'baz': bugs.WellKnownComponents.CrOSToolchainPublic,
- })
+ self.addCleanup(restore_x20_path)
- self.assertTrue(file_path.startswith(tempdir),
- f'Expected {file_path} to start with {tempdir}')
-
- with open(file_path) as f:
- self.assertEqual(
- json.load(f),
- {
- 'type': 'ObjectType',
- 'value': {
- 'foo': 'bar',
- 'baz': int(bugs.WellKnownComponents.CrOSToolchainPublic),
+ with tempfile.TemporaryDirectory() as tempdir:
+ bugs.X20_PATH = tempdir
+ file_path = bugs._WriteBugJSONFile(
+ "ObjectType",
+ {
+ "foo": "bar",
+ "baz": bugs.WellKnownComponents.CrOSToolchainPublic,
},
+ )
+
+ self.assertTrue(
+ file_path.startswith(tempdir),
+ f"Expected {file_path} to start with {tempdir}",
+ )
+
+ with open(file_path) as f:
+ self.assertEqual(
+ json.load(f),
+ {
+ "type": "ObjectType",
+ "value": {
+ "foo": "bar",
+ "baz": int(
+ bugs.WellKnownComponents.CrOSToolchainPublic
+ ),
+ },
+ },
+ )
+
+ @patch("bugs._WriteBugJSONFile")
+ def testAppendingToBugsSeemsToWork(self, mock_write_json_file):
+ """Tests AppendToExistingBug."""
+ bugs.AppendToExistingBug(1234, "hello, world!")
+ mock_write_json_file.assert_called_once_with(
+ "AppendToExistingBugRequest",
+ {
+ "body": "hello, world!",
+ "bug_id": 1234,
},
)
- @patch('bugs._WriteBugJSONFile')
- def testAppendingToBugsSeemsToWork(self, mock_write_json_file):
- """Tests AppendToExistingBug."""
- bugs.AppendToExistingBug(1234, 'hello, world!')
- mock_write_json_file.assert_called_once_with(
- 'AppendToExistingBugRequest',
- {
- 'body': 'hello, world!',
- 'bug_id': 1234,
- },
- )
+ @patch("bugs._WriteBugJSONFile")
+ def testBugCreationSeemsToWork(self, mock_write_json_file):
+ """Tests CreateNewBug."""
+ test_case_additions = (
+ {},
+ {
+ "component_id": bugs.WellKnownComponents.CrOSToolchainPublic,
+ },
+ {
+ "assignee": "foo@gbiv.com",
+ "cc": ["bar@baz.com"],
+ },
+ )
- @patch('bugs._WriteBugJSONFile')
- def testBugCreationSeemsToWork(self, mock_write_json_file):
- """Tests CreateNewBug."""
- test_case_additions = (
- {},
- {
- 'component_id': bugs.WellKnownComponents.CrOSToolchainPublic,
- },
- {
- 'assignee': 'foo@gbiv.com',
- 'cc': ['bar@baz.com'],
- },
- )
+ for additions in test_case_additions:
+ test_case = {
+ "component_id": 123,
+ "title": "foo",
+ "body": "bar",
+ **additions,
+ }
- for additions in test_case_additions:
- test_case = {
- 'component_id': 123,
- 'title': 'foo',
- 'body': 'bar',
- **additions,
- }
+ bugs.CreateNewBug(**test_case)
- bugs.CreateNewBug(**test_case)
+ expected_output = {
+ "component_id": test_case["component_id"],
+ "subject": test_case["title"],
+ "body": test_case["body"],
+ }
- expected_output = {
- 'component_id': test_case['component_id'],
- 'subject': test_case['title'],
- 'body': test_case['body'],
- }
+ assignee = test_case.get("assignee")
+ if assignee:
+ expected_output["assignee"] = assignee
- assignee = test_case.get('assignee')
- if assignee:
- expected_output['assignee'] = assignee
+ cc = test_case.get("cc")
+ if cc:
+ expected_output["cc"] = cc
- cc = test_case.get('cc')
- if cc:
- expected_output['cc'] = cc
+ mock_write_json_file.assert_called_once_with(
+ "FileNewBugRequest",
+ expected_output,
+ )
+ mock_write_json_file.reset_mock()
- mock_write_json_file.assert_called_once_with(
- 'FileNewBugRequest',
- expected_output,
- )
- mock_write_json_file.reset_mock()
-
- @patch('bugs._WriteBugJSONFile')
- def testCronjobLogSendingSeemsToWork(self, mock_write_json_file):
- """Tests SendCronjobLog."""
- bugs.SendCronjobLog('my_name', False, 'hello, world!')
- mock_write_json_file.assert_called_once_with(
- 'ChrotomationCronjobUpdate',
- {
- 'name': 'my_name',
- 'message': 'hello, world!',
- 'failed': False,
- },
- )
+ @patch("bugs._WriteBugJSONFile")
+ def testCronjobLogSendingSeemsToWork(self, mock_write_json_file):
+ """Tests SendCronjobLog."""
+ bugs.SendCronjobLog("my_name", False, "hello, world!")
+ mock_write_json_file.assert_called_once_with(
+ "ChrotomationCronjobUpdate",
+ {
+ "name": "my_name",
+ "message": "hello, world!",
+ "failed": False,
+ },
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py
index b600c6a..8f0ce5e 100644
--- a/cros_utils/buildbot_utils.py
+++ b/cros_utils/buildbot_utils.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for launching and accessing ChromeOS buildbots."""
-from __future__ import division
-from __future__ import print_function
import ast
import json
@@ -17,6 +15,7 @@
from cros_utils import command_executer
from cros_utils import logger
+
INITIAL_SLEEP_TIME = 7200 # 2 hours; wait time before polling buildbot.
SLEEP_TIME = 600 # 10 minutes; time between polling of buildbot.
@@ -26,267 +25,283 @@
class BuildbotTimeout(Exception):
- """Exception to throw when a buildbot operation timesout."""
+ """Exception to throw when a buildbot operation timesout."""
def RunCommandInPath(path, cmd):
- ce = command_executer.GetCommandExecuter()
- cwd = os.getcwd()
- os.chdir(path)
- status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False)
- os.chdir(cwd)
- return status, stdout, stderr
+ ce = command_executer.GetCommandExecuter()
+ cwd = os.getcwd()
+ os.chdir(path)
+ status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False)
+ os.chdir(cwd)
+ return status, stdout, stderr
def PeekTrybotImage(chromeos_root, buildbucket_id):
- """Get the artifact URL of a given tryjob.
+ """Get the artifact URL of a given tryjob.
- Args:
- buildbucket_id: buildbucket-id
- chromeos_root: root dir of chrome os checkout
+ Args:
+ buildbucket_id: buildbucket-id
+ chromeos_root: root dir of chrome os checkout
- Returns:
- (status, url) where status can be 'pass', 'fail', 'running',
- and url looks like:
- gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
- """
- command = ('cros buildresult --report json --buildbucket-id %s' %
- buildbucket_id)
- rc, out, _ = RunCommandInPath(chromeos_root, command)
+ Returns:
+ (status, url) where status can be 'pass', 'fail', 'running',
+ and url looks like:
+ gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ """
+ command = (
+ "cros buildresult --report json --buildbucket-id %s" % buildbucket_id
+ )
+ rc, out, _ = RunCommandInPath(chromeos_root, command)
- # Current implementation of cros buildresult returns fail when a job is still
- # running.
- if rc != 0:
- return ('running', None)
+ # Current implementation of cros buildresult returns fail when a job is still
+ # running.
+ if rc != 0:
+ return ("running", None)
- results = json.loads(out)[buildbucket_id]
+ results = json.loads(out)[buildbucket_id]
- # Handle the case where the tryjob failed to launch correctly.
- if results['artifacts_url'] is None:
- return (results['status'], '')
+ # Handle the case where the tryjob failed to launch correctly.
+ if results["artifacts_url"] is None:
+ return (results["status"], "")
- return (results['status'], results['artifacts_url'].rstrip('/'))
+ return (results["status"], results["artifacts_url"].rstrip("/"))
def ParseTryjobBuildbucketId(msg):
- """Find the buildbucket-id in the messages from `cros tryjob`.
+ """Find the buildbucket-id in the messages from `cros tryjob`.
- Args:
- msg: messages from `cros tryjob`
+ Args:
+ msg: messages from `cros tryjob`
- Returns:
- buildbucket-id, which will be passed to `cros buildresult`
- """
- output_list = ast.literal_eval(msg)
- output_dict = output_list[0]
- if 'buildbucket_id' in output_dict:
- return output_dict['buildbucket_id']
- return None
+ Returns:
+ buildbucket-id, which will be passed to `cros buildresult`
+ """
+ output_list = ast.literal_eval(msg)
+ output_dict = output_list[0]
+ if "buildbucket_id" in output_dict:
+ return output_dict["buildbucket_id"]
+ return None
-def SubmitTryjob(chromeos_root,
- buildbot_name,
- patch_list,
- tryjob_flags=None,
- build_toolchain=False):
- """Calls `cros tryjob ...`
+def SubmitTryjob(
+ chromeos_root,
+ buildbot_name,
+ patch_list,
+ tryjob_flags=None,
+ build_toolchain=False,
+):
+ """Calls `cros tryjob ...`
- Args:
- chromeos_root: the path to the ChromeOS root, needed for finding chromite
- and launching the buildbot.
- buildbot_name: the name of the buildbot queue, such as lumpy-release or
- daisy-paladin.
- patch_list: a python list of the patches, if any, for the buildbot to use.
- tryjob_flags: See cros tryjob --help for available options.
- build_toolchain: builds and uses the latest toolchain, rather than the
- prebuilt one in SDK.
+ Args:
+ chromeos_root: the path to the ChromeOS root, needed for finding chromite
+ and launching the buildbot.
+ buildbot_name: the name of the buildbot queue, such as lumpy-release or
+ daisy-paladin.
+ patch_list: a python list of the patches, if any, for the buildbot to use.
+ tryjob_flags: See cros tryjob --help for available options.
+ build_toolchain: builds and uses the latest toolchain, rather than the
+ prebuilt one in SDK.
- Returns:
- buildbucket id
- """
- patch_arg = ''
- if patch_list:
- for p in patch_list:
- patch_arg = patch_arg + ' -g ' + repr(p)
- if not tryjob_flags:
- tryjob_flags = []
- if build_toolchain:
- tryjob_flags.append('--latest-toolchain')
- tryjob_flags = ' '.join(tryjob_flags)
+ Returns:
+ buildbucket id
+ """
+ patch_arg = ""
+ if patch_list:
+ for p in patch_list:
+ patch_arg = patch_arg + " -g " + repr(p)
+ if not tryjob_flags:
+ tryjob_flags = []
+ if build_toolchain:
+ tryjob_flags.append("--latest-toolchain")
+ tryjob_flags = " ".join(tryjob_flags)
- # Launch buildbot with appropriate flags.
- build = buildbot_name
- command = ('cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s' %
- (tryjob_flags, patch_arg, build))
- print('CMD: %s' % command)
- _, out, _ = RunCommandInPath(chromeos_root, command)
- buildbucket_id = ParseTryjobBuildbucketId(out)
- print('buildbucket_id: %s' % repr(buildbucket_id))
- if not buildbucket_id:
- logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
- '%s' % command)
- return buildbucket_id
-
-
-def GetTrybotImage(chromeos_root,
- buildbot_name,
- patch_list,
- tryjob_flags=None,
- build_toolchain=False,
- asynchronous=False):
- """Launch buildbot and get resulting trybot artifact name.
-
- This function launches a buildbot with the appropriate flags to
- build the test ChromeOS image, with the current ToT mobile compiler. It
- checks every 10 minutes to see if the trybot has finished. When the trybot
- has finished, it parses the resulting report logs to find the trybot
- artifact (if one was created), and returns that artifact name.
-
- Args:
- chromeos_root: the path to the ChromeOS root, needed for finding chromite
- and launching the buildbot.
- buildbot_name: the name of the buildbot queue, such as lumpy-release or
- daisy-paladin.
- patch_list: a python list of the patches, if any, for the buildbot to use.
- tryjob_flags: See cros tryjob --help for available options.
- build_toolchain: builds and uses the latest toolchain, rather than the
- prebuilt one in SDK.
- asynchronous: don't wait for artifacts; just return the buildbucket id
-
- Returns:
- (buildbucket id, partial image url) e.g.
- (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596)
- """
- buildbucket_id = SubmitTryjob(chromeos_root, buildbot_name, patch_list,
- tryjob_flags, build_toolchain)
- if asynchronous:
- return buildbucket_id, ' '
-
- # The trybot generally takes more than 2 hours to finish.
- # Wait two hours before polling the status.
- time.sleep(INITIAL_SLEEP_TIME)
- elapsed = INITIAL_SLEEP_TIME
- status = 'running'
- image = ''
- while True:
- status, image = PeekTrybotImage(chromeos_root, buildbucket_id)
- if status == 'running':
- if elapsed > TIME_OUT:
+ # Launch buildbot with appropriate flags.
+ build = buildbot_name
+ command = "cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s" % (
+ tryjob_flags,
+ patch_arg,
+ build,
+ )
+ print("CMD: %s" % command)
+ _, out, _ = RunCommandInPath(chromeos_root, command)
+ buildbucket_id = ParseTryjobBuildbucketId(out)
+ print("buildbucket_id: %s" % repr(buildbucket_id))
+ if not buildbucket_id:
logger.GetLogger().LogFatal(
- 'Unable to get build result for target %s.' % buildbot_name)
- else:
- wait_msg = 'Unable to find build result; job may be running.'
- logger.GetLogger().LogOutput(wait_msg)
- logger.GetLogger().LogOutput('{0} minutes elapsed.'.format(elapsed / 60))
- logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
- time.sleep(SLEEP_TIME)
- elapsed += SLEEP_TIME
+ "Error occurred while launching trybot job: " "%s" % command
+ )
+ return buildbucket_id
+
+
+def GetTrybotImage(
+ chromeos_root,
+ buildbot_name,
+ patch_list,
+ tryjob_flags=None,
+ build_toolchain=False,
+ asynchronous=False,
+):
+ """Launch buildbot and get resulting trybot artifact name.
+
+ This function launches a buildbot with the appropriate flags to
+ build the test ChromeOS image, with the current ToT mobile compiler. It
+ checks every 10 minutes to see if the trybot has finished. When the trybot
+ has finished, it parses the resulting report logs to find the trybot
+ artifact (if one was created), and returns that artifact name.
+
+ Args:
+ chromeos_root: the path to the ChromeOS root, needed for finding chromite
+ and launching the buildbot.
+ buildbot_name: the name of the buildbot queue, such as lumpy-release or
+ daisy-paladin.
+ patch_list: a python list of the patches, if any, for the buildbot to use.
+ tryjob_flags: See cros tryjob --help for available options.
+ build_toolchain: builds and uses the latest toolchain, rather than the
+ prebuilt one in SDK.
+ asynchronous: don't wait for artifacts; just return the buildbucket id
+
+ Returns:
+ (buildbucket id, partial image url) e.g.
+ (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596)
+ """
+ buildbucket_id = SubmitTryjob(
+ chromeos_root, buildbot_name, patch_list, tryjob_flags, build_toolchain
+ )
+ if asynchronous:
+ return buildbucket_id, " "
+
+ # The trybot generally takes more than 2 hours to finish.
+ # Wait two hours before polling the status.
+ time.sleep(INITIAL_SLEEP_TIME)
+ elapsed = INITIAL_SLEEP_TIME
+ status = "running"
+ image = ""
+ while True:
+ status, image = PeekTrybotImage(chromeos_root, buildbucket_id)
+ if status == "running":
+ if elapsed > TIME_OUT:
+ logger.GetLogger().LogFatal(
+ "Unable to get build result for target %s." % buildbot_name
+ )
+ else:
+ wait_msg = "Unable to find build result; job may be running."
+ logger.GetLogger().LogOutput(wait_msg)
+ logger.GetLogger().LogOutput(f"{elapsed / 60} minutes elapsed.")
+ logger.GetLogger().LogOutput(f"Sleeping {SLEEP_TIME} seconds.")
+ time.sleep(SLEEP_TIME)
+ elapsed += SLEEP_TIME
+ else:
+ break
+
+ if not buildbot_name.endswith("-toolchain") and status == "fail":
+ # For rotating testers, we don't care about their status
+ # result, because if any HWTest failed it will be non-zero.
+ #
+ # The nightly performance tests do not run HWTests, so if
+ # their status is non-zero, we do care. In this case
+ # non-zero means the image itself probably did not build.
+ image = ""
+
+ if not image:
+ logger.GetLogger().LogError(
+ "Trybot job (buildbucket id: %s) failed with"
+ "status %s; no trybot image generated. " % (buildbucket_id, status)
+ )
else:
- break
+ # Convert full gs path to what crosperf expects. For example, convert
+ # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ # to
+ # trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ image = "/".join(image.split("/")[-2:])
- if not buildbot_name.endswith('-toolchain') and status == 'fail':
- # For rotating testers, we don't care about their status
- # result, because if any HWTest failed it will be non-zero.
- #
- # The nightly performance tests do not run HWTests, so if
- # their status is non-zero, we do care. In this case
- # non-zero means the image itself probably did not build.
- image = ''
-
- if not image:
- logger.GetLogger().LogError('Trybot job (buildbucket id: %s) failed with'
- 'status %s; no trybot image generated. ' %
- (buildbucket_id, status))
- else:
- # Convert full gs path to what crosperf expects. For example, convert
- # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
- # to
- # trybot-elm-release-tryjob/R67-10468.0.0-b20789
- image = '/'.join(image.split('/')[-2:])
-
- logger.GetLogger().LogOutput("image is '%s'" % image)
- logger.GetLogger().LogOutput('status is %s' % status)
- return buildbucket_id, image
+ logger.GetLogger().LogOutput("image is '%s'" % image)
+ logger.GetLogger().LogOutput("status is %s" % status)
+ return buildbucket_id, image
def DoesImageExist(chromeos_root, build):
- """Check if the image for the given build exists."""
+ """Check if the image for the given build exists."""
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s'
- '/chromiumos_test_image.tar.xz' % (build))
- ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False)
- return not ret
+ ce = command_executer.GetCommandExecuter()
+ command = (
+ "gsutil ls gs://chromeos-image-archive/%s"
+ "/chromiumos_test_image.tar.xz" % (build)
+ )
+ ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False)
+ return not ret
def WaitForImage(chromeos_root, build):
- """Wait for an image to be ready."""
+ """Wait for an image to be ready."""
- elapsed_time = 0
- while elapsed_time < TIME_OUT:
- if DoesImageExist(chromeos_root, build):
- return
- logger.GetLogger().LogOutput('Image %s not ready, waiting for 10 minutes' %
- build)
- time.sleep(SLEEP_TIME)
- elapsed_time += SLEEP_TIME
+ elapsed_time = 0
+ while elapsed_time < TIME_OUT:
+ if DoesImageExist(chromeos_root, build):
+ return
+ logger.GetLogger().LogOutput(
+ "Image %s not ready, waiting for 10 minutes" % build
+ )
+ time.sleep(SLEEP_TIME)
+ elapsed_time += SLEEP_TIME
- logger.GetLogger().LogOutput('Image %s not found, waited for %d hours' %
- (build, (TIME_OUT / 3600)))
- raise BuildbotTimeout('Timeout while waiting for image %s' % build)
+ logger.GetLogger().LogOutput(
+ "Image %s not found, waited for %d hours" % (build, (TIME_OUT / 3600))
+ )
+ raise BuildbotTimeout("Timeout while waiting for image %s" % build)
def GetLatestImage(chromeos_root, path):
- """Get latest image"""
+ """Get latest image"""
- fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)')
+ fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)")
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s' % path)
- ret, out, _ = ce.ChrootRunCommandWOutput(
- chromeos_root, command, print_to_console=False)
- if ret != 0:
- raise RuntimeError('Failed to list buckets with command: %s.' % command)
- candidates = [l.split('/')[-2] for l in out.split()]
- candidates = [fmt.match(c) for c in candidates]
- candidates = [[int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m]
- candidates.sort(reverse=True)
- for c in candidates:
- build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
- # Denylist "R79-12384.0.0" image released by mistake.
- # TODO(crbug.com/992242): Remove the filter by 2019-09-05.
- if c == [79, 12384, 0, 0]:
- continue
-
- if DoesImageExist(chromeos_root, build):
- return build
+ ce = command_executer.GetCommandExecuter()
+ command = "gsutil ls gs://chromeos-image-archive/%s" % path
+ ret, out, _ = ce.ChrootRunCommandWOutput(
+ chromeos_root, command, print_to_console=False
+ )
+ if ret != 0:
+ raise RuntimeError("Failed to list buckets with command: %s." % command)
+ candidates = [l.split("/")[-2] for l in out.split()]
+ candidates = [fmt.match(c) for c in candidates]
+ candidates = [
+ [int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m
+ ]
+ candidates.sort(reverse=True)
+ for c in candidates:
+ build = "%s/R%d-%d.%d.%d" % (path, c[0], c[1], c[2], c[3])
+ if DoesImageExist(chromeos_root, build):
+ return build
def GetLatestRecipeImage(chromeos_root, path):
- """Get latest nightly test image from recipe bucket.
+ """Get latest nightly test image from recipe bucket.
- Image location example:
- $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032
- """
+ Image location example:
+ $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032
+ """
- fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)')
+ fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)")
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s' % path)
- ret, out, _ = ce.ChrootRunCommandWOutput(
- chromeos_root, command, print_to_console=False)
- if ret != 0:
- raise RuntimeError('Failed to list buckets with command: %s.' % command)
- candidates = [l.split('/')[-2] for l in out.split()]
- candidates = [(fmt.match(c), c) for c in candidates]
- candidates = [([int(r)
- for r in m[0].group(1, 2, 3, 4, 5)], m[1])
- for m in candidates
- if m]
- candidates.sort(key=lambda x: x[0], reverse=True)
- # Try to get ony last two days of images since nightly tests are run once
- # another day.
- for c in candidates[:2]:
- build = '%s/%s' % (path, c[1])
- if DoesImageExist(chromeos_root, build):
- return build
+ ce = command_executer.GetCommandExecuter()
+ command = "gsutil ls gs://chromeos-image-archive/%s" % path
+ ret, out, _ = ce.ChrootRunCommandWOutput(
+ chromeos_root, command, print_to_console=False
+ )
+ if ret != 0:
+ raise RuntimeError("Failed to list buckets with command: %s." % command)
+ candidates = [l.split("/")[-2] for l in out.split()]
+ candidates = [(fmt.match(c), c) for c in candidates]
+ candidates = [
+ ([int(r) for r in m[0].group(1, 2, 3, 4, 5)], m[1])
+ for m in candidates
+ if m
+ ]
+ candidates.sort(key=lambda x: x[0], reverse=True)
+ # Try to get ony last two days of images since nightly tests are run once
+ # another day.
+ for c in candidates[:2]:
+ build = "%s/%s" % (path, c[1])
+ if DoesImageExist(chromeos_root, build):
+ return build
diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py
index c615c95..2c9585b 100755
--- a/cros_utils/buildbot_utils_unittest.py
+++ b/cros_utils/buildbot_utils_unittest.py
@@ -1,16 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for buildbot_utils.py."""
-from __future__ import print_function
import time
-
import unittest
from unittest.mock import patch
@@ -19,160 +17,226 @@
class TrybotTest(unittest.TestCase):
- """Test for CommandExecuter class."""
+ """Test for CommandExecuter class."""
- tryjob_out = (
- '[{"buildbucket_id": "8952721143823688176", "build_config": '
- '"cave-llvm-toolchain-tryjob", "url": '
- # pylint: disable=line-too-long
- '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
- )
+ tryjob_out = (
+ '[{"buildbucket_id": "8952721143823688176", "build_config": '
+ '"cave-llvm-toolchain-tryjob", "url": '
+ # pylint: disable=line-too-long
+ '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
+ )
- GSUTILS_LS = '\n'.join([
- 'gs://chromeos-image-archive/{0}/R78-12421.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12422.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12423.0.0/',
- ])
+ GSUTILS_LS = "\n".join(
+ [
+ "gs://chromeos-image-archive/{0}/R78-12421.0.0/",
+ "gs://chromeos-image-archive/{0}/R78-12422.0.0/",
+ "gs://chromeos-image-archive/{0}/R78-12423.0.0/",
+ ]
+ )
- GSUTILS_LS_RECIPE = '\n'.join([
- 'gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/',
- 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/',
- 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/',
- ])
+ GSUTILS_LS_RECIPE = "\n".join(
+ [
+ "gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/",
+ "gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/",
+ "gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/",
+ ]
+ )
- buildresult_out = (
- '{"8952721143823688176": {"status": "pass", "artifacts_url":'
- '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
- 'b20789"}}')
+ buildresult_out = (
+ '{"8952721143823688176": {"status": "pass", "artifacts_url":'
+ '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
+ 'b20789"}}'
+ )
- buildbucket_id = '8952721143823688176'
- counter_1 = 10
+ buildbucket_id = "8952721143823688176"
+ counter_1 = 10
- def testGetTrybotImage(self):
- with patch.object(buildbot_utils, 'SubmitTryjob') as mock_submit:
- with patch.object(buildbot_utils, 'PeekTrybotImage') as mock_peek:
- with patch.object(time, 'sleep', return_value=None):
+ def testGetTrybotImage(self):
+ with patch.object(buildbot_utils, "SubmitTryjob") as mock_submit:
+ with patch.object(buildbot_utils, "PeekTrybotImage") as mock_peek:
+ with patch.object(time, "sleep", return_value=None):
- def peek(_chromeos_root, _buildbucket_id):
- self.counter_1 -= 1
- if self.counter_1 >= 0:
- return ('running', '')
- return ('pass',
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789')
+ def peek(_chromeos_root, _buildbucket_id):
+ self.counter_1 -= 1
+ if self.counter_1 >= 0:
+ return ("running", "")
+ return (
+ "pass",
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ )
- mock_peek.side_effect = peek
- mock_submit.return_value = self.buildbucket_id
+ mock_peek.side_effect = peek
+ mock_submit.return_value = self.buildbucket_id
- # sync
- buildbucket_id, image = buildbot_utils.GetTrybotImage(
- '/tmp', 'falco-release-tryjob', [])
- self.assertEqual(buildbucket_id, self.buildbucket_id)
- self.assertEqual('trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
+ # sync
+ buildbucket_id, image = buildbot_utils.GetTrybotImage(
+ "/tmp", "falco-release-tryjob", []
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+ self.assertEqual(
+ "trybot-elm-release-tryjob/" "R67-10468.0.0-b20789",
+ image,
+ )
- # async
- buildbucket_id, image = buildbot_utils.GetTrybotImage(
- '/tmp', 'falco-release-tryjob', [], asynchronous=True)
- self.assertEqual(buildbucket_id, self.buildbucket_id)
- self.assertEqual(' ', image)
+ # async
+ buildbucket_id, image = buildbot_utils.GetTrybotImage(
+ "/tmp", "falco-release-tryjob", [], asynchronous=True
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+ self.assertEqual(" ", image)
- def testSubmitTryjob(self):
- with patch.object(command_executer.CommandExecuter,
- 'RunCommandWOutput') as mocked_run:
- mocked_run.return_value = (0, self.tryjob_out, '')
- buildbucket_id = buildbot_utils.SubmitTryjob('/', 'falco-release-tryjob',
- [], [])
- self.assertEqual(buildbucket_id, self.buildbucket_id)
+ def testSubmitTryjob(self):
+ with patch.object(
+ command_executer.CommandExecuter, "RunCommandWOutput"
+ ) as mocked_run:
+ mocked_run.return_value = (0, self.tryjob_out, "")
+ buildbucket_id = buildbot_utils.SubmitTryjob(
+ "/", "falco-release-tryjob", [], []
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
- def testPeekTrybotImage(self):
- with patch.object(command_executer.CommandExecuter,
- 'RunCommandWOutput') as mocked_run:
- # pass
- mocked_run.return_value = (0, self.buildresult_out, '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('pass', status)
- self.assertEqual(
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
+ def testPeekTrybotImage(self):
+ with patch.object(
+ command_executer.CommandExecuter, "RunCommandWOutput"
+ ) as mocked_run:
+ # pass
+ mocked_run.return_value = (0, self.buildresult_out, "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("pass", status)
+ self.assertEqual(
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ image,
+ )
- # running
- mocked_run.return_value = (1, '', '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('running', status)
- self.assertEqual(None, image)
+ # running
+ mocked_run.return_value = (1, "", "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("running", status)
+ self.assertEqual(None, image)
- # fail
- buildresult_fail = self.buildresult_out.replace('\"pass\"', '\"fail\"')
- mocked_run.return_value = (0, buildresult_fail, '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('fail', status)
- self.assertEqual(
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
+ # fail
+ buildresult_fail = self.buildresult_out.replace('"pass"', '"fail"')
+ mocked_run.return_value = (0, buildresult_fail, "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("fail", status)
+ self.assertEqual(
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ image,
+ )
- def testParseTryjobBuildbucketId(self):
- buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(self.tryjob_out)
- self.assertEqual(buildbucket_id, self.buildbucket_id)
+ def testParseTryjobBuildbucketId(self):
+ buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(
+ self.tryjob_out
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
- def testGetLatestImageValid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = True
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertEqual(image, '{0}/R78-12423.0.0'.format(IMAGE_DIR))
+ def testGetLatestImageValid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-release"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = True
+ image = buildbot_utils.GetLatestImage("", IMAGE_DIR)
+ self.assertEqual(image, "{0}/R78-12423.0.0".format(IMAGE_DIR))
- def testGetLatestImageInvalid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'kefka-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = False
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertIsNone(image)
+ def testGetLatestImageInvalid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "kefka-release"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = False
+ image = buildbot_utils.GetLatestImage("", IMAGE_DIR)
+ self.assertIsNone(image)
- def testGetLatestRecipeImageValid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.return_value = True
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertEqual(
- image, '{0}/R83-13003.0.0-30218-8884712858556419'.format(IMAGE_DIR))
+ def testGetLatestRecipeImageValid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = True
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertEqual(
+ image,
+ "{0}/R83-13003.0.0-30218-8884712858556419".format(
+ IMAGE_DIR
+ ),
+ )
- def testGetLatestRecipeImageInvalid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'kefka-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.return_value = False
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertIsNone(image)
+ def testGetLatestRecipeImageInvalid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "kefka-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = False
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertIsNone(image)
- def testGetLatestRecipeImageTwodays(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.side_effect = [False, False, True]
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertIsNone(image)
- mocked_imageexist.side_effect = [False, True, True]
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertEqual(
- image, '{0}/R83-13003.0.0-30196-8884755532184725'.format(IMAGE_DIR))
+ def testGetLatestRecipeImageTwodays(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.side_effect = [False, False, True]
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertIsNone(image)
+ mocked_imageexist.side_effect = [False, True, True]
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertEqual(
+ image,
+ "{0}/R83-13003.0.0-30196-8884755532184725".format(
+ IMAGE_DIR
+ ),
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py
index cc0f337..573bb2d 100755
--- a/cros_utils/command_executer.py
+++ b/cros_utils/command_executer.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to run commands in outside/inside chroot and on the board."""
-from __future__ import print_function
import getpass
import os
@@ -20,699 +19,775 @@
from cros_utils import logger
+
mock_default = False
-CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts'
-LOG_LEVEL = ('none', 'quiet', 'average', 'verbose')
+CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts"
+LOG_LEVEL = ("none", "quiet", "average", "verbose")
def InitCommandExecuter(mock=False):
- # pylint: disable=global-statement
- global mock_default
- # Whether to default to a mock command executer or not
- mock_default = mock
+ # pylint: disable=global-statement
+ global mock_default
+ # Whether to default to a mock command executer or not
+ mock_default = mock
-def GetCommandExecuter(logger_to_set=None, mock=False, log_level='verbose'):
- # If the default is a mock executer, always return one.
- if mock_default or mock:
- return MockCommandExecuter(log_level, logger_to_set)
- else:
- return CommandExecuter(log_level, logger_to_set)
+def GetCommandExecuter(logger_to_set=None, mock=False, log_level="verbose"):
+ # If the default is a mock executer, always return one.
+ if mock_default or mock:
+ return MockCommandExecuter(log_level, logger_to_set)
+ else:
+ return CommandExecuter(log_level, logger_to_set)
class CommandExecuter(object):
- """Provides several methods to execute commands on several environments."""
+ """Provides several methods to execute commands on several environments."""
- def __init__(self, log_level, logger_to_set=None):
- self.log_level = log_level
- if log_level == 'none':
- self.logger = None
- else:
- if logger_to_set is not None:
- self.logger = logger_to_set
- else:
- self.logger = logger.GetLogger()
+ def __init__(self, log_level, logger_to_set=None):
+ self.log_level = log_level
+ if log_level == "none":
+ self.logger = None
+ else:
+ if logger_to_set is not None:
+ self.logger = logger_to_set
+ else:
+ self.logger = logger.GetLogger()
- def GetLogLevel(self):
- return self.log_level
+ def GetLogLevel(self):
+ return self.log_level
- def SetLogLevel(self, log_level):
- self.log_level = log_level
+ def SetLogLevel(self, log_level):
+ self.log_level = log_level
- def RunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- username=None,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- env=None,
- except_handler=lambda p, e: None):
- """Run a command.
+ def RunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ """Run a command.
- Returns triplet (returncode, stdout, stderr).
- """
+ Returns triplet (returncode, stdout, stderr).
+ """
- cmd = str(cmd)
+ cmd = str(cmd)
- if self.log_level == 'quiet':
- print_to_console = False
+ if self.log_level == "quiet":
+ print_to_console = False
- if self.log_level == 'verbose':
- self.logger.LogCmd(cmd, machine, username, print_to_console)
- elif self.logger:
- self.logger.LogCmdToFileOnly(cmd, machine, username)
- if command_terminator and command_terminator.IsTerminated():
- if self.logger:
- self.logger.LogError('Command was terminated!', print_to_console)
- return (1, '', '')
-
- if machine is not None:
- user = ''
- if username is not None:
- user = username + '@'
- cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd)
-
- # We use setsid so that the child will have a different session id
- # and we can easily kill the process group. This is also important
- # because the child will be disassociated from the parent terminal.
- # In this way the child cannot mess the parent's terminal.
- p = None
- try:
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- p = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- preexec_fn=os.setsid,
- executable='/bin/bash',
- env=env)
-
- full_stdout = ''
- full_stderr = ''
-
- # Pull output from pipes, send it to file/stdout/string
- out = err = None
- pipes = [p.stdout, p.stderr]
-
- my_poll = select.poll()
- my_poll.register(p.stdout, select.POLLIN)
- my_poll.register(p.stderr, select.POLLIN)
-
- terminated_time = None
- started_time = time.time()
-
- while pipes:
+ if self.log_level == "verbose":
+ self.logger.LogCmd(cmd, machine, username, print_to_console)
+ elif self.logger:
+ self.logger.LogCmdToFileOnly(cmd, machine, username)
if command_terminator and command_terminator.IsTerminated():
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
- if self.logger:
- self.logger.LogError(
- 'Command received termination request. '
- 'Killed child process group.', print_to_console)
- break
+ if self.logger:
+ self.logger.LogError(
+ "Command was terminated!", print_to_console
+ )
+ return (1, "", "")
- l = my_poll.poll(100)
- for (fd, _) in l:
- if fd == p.stdout.fileno():
- out = os.read(p.stdout.fileno(), 16384).decode('utf8')
+ if machine is not None:
+ user = ""
+ if username is not None:
+ user = username + "@"
+ cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd)
+
+ # We use setsid so that the child will have a different session id
+ # and we can easily kill the process group. This is also important
+ # because the child will be disassociated from the parent terminal.
+ # In this way the child cannot mess the parent's terminal.
+ p = None
+ try:
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ p = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ preexec_fn=os.setsid,
+ executable="/bin/bash",
+ env=env,
+ )
+
+ full_stdout = ""
+ full_stderr = ""
+
+ # Pull output from pipes, send it to file/stdout/string
+ out = err = None
+ pipes = [p.stdout, p.stderr]
+
+ my_poll = select.poll()
+ my_poll.register(p.stdout, select.POLLIN)
+ my_poll.register(p.stderr, select.POLLIN)
+
+ terminated_time = None
+ started_time = time.time()
+
+ while pipes:
+ if command_terminator and command_terminator.IsTerminated():
+ os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ if self.logger:
+ self.logger.LogError(
+ "Command received termination request. "
+ "Killed child process group.",
+ print_to_console,
+ )
+ break
+
+ l = my_poll.poll(100)
+ for (fd, _) in l:
+ if fd == p.stdout.fileno():
+ out = os.read(p.stdout.fileno(), 16384).decode("utf8")
+ if return_output:
+ full_stdout += out
+ if self.logger:
+ self.logger.LogCommandOutput(out, print_to_console)
+ if out == "":
+ pipes.remove(p.stdout)
+ my_poll.unregister(p.stdout)
+ if fd == p.stderr.fileno():
+ err = os.read(p.stderr.fileno(), 16384).decode("utf8")
+ if return_output:
+ full_stderr += err
+ if self.logger:
+ self.logger.LogCommandError(err, print_to_console)
+ if err == "":
+ pipes.remove(p.stderr)
+ my_poll.unregister(p.stderr)
+
+ if p.poll() is not None:
+ if terminated_time is None:
+ terminated_time = time.time()
+ elif (
+ terminated_timeout is not None
+ and time.time() - terminated_time > terminated_timeout
+ ):
+ if self.logger:
+ self.logger.LogWarning(
+ "Timeout of %s seconds reached since "
+ "process termination." % terminated_timeout,
+ print_to_console,
+ )
+ break
+
+ if (
+ command_timeout is not None
+ and time.time() - started_time > command_timeout
+ ):
+ os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ if self.logger:
+ self.logger.LogWarning(
+ "Timeout of %s seconds reached since process"
+ "started. Killed child process group."
+ % command_timeout,
+ print_to_console,
+ )
+ break
+
+ if out == err == "":
+ break
+
+ p.wait()
if return_output:
- full_stdout += out
+ return (p.returncode, full_stdout, full_stderr)
+ return (p.returncode, "", "")
+ except BaseException as err:
+ except_handler(p, err)
+ raise
+
+ def RunCommand(self, *args, **kwargs):
+ """Run a command.
+
+ Takes the same arguments as RunCommandGeneric except for return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.RunCommandGeneric(*args, **kwargs)[0]
+
+ def RunCommandWExceptionCleanup(self, *args, **kwargs):
+ """Run a command and kill process if exception is thrown.
+
+ Takes the same arguments as RunCommandGeneric except for except_handler.
+ Returns same as RunCommandGeneric.
+ """
+
+ def KillProc(proc, _):
+ if proc:
+ os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
+
+ # Make sure that args does not overwrite 'except_handler'
+ assert len(args) <= 8
+ assert "except_handler" not in kwargs
+ kwargs["except_handler"] = KillProc
+ return self.RunCommandGeneric(*args, **kwargs)
+
+ def RunCommandWOutput(self, *args, **kwargs):
+ """Run a command.
+
+ Takes the same arguments as RunCommandGeneric except for return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.RunCommandGeneric(*args, **kwargs)
+
+ def RemoteAccessInitCommand(self, chromeos_root, machine, port=None):
+ command = ""
+ command += "\nset -- --remote=" + machine
+ if port:
+ command += " --ssh_port=" + port
+ command += "\n. " + chromeos_root + "/src/scripts/common.sh"
+ command += "\n. " + chromeos_root + "/src/scripts/remote_access.sh"
+ command += "\nTMP=$(mktemp -d)"
+ command += '\nFLAGS "$@" || exit 1'
+ command += "\nremote_access_init"
+ return command
+
+ def WriteToTempShFile(self, contents):
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ prefix=os.uname()[1],
+ suffix=".sh",
+ ) as f:
+ f.write("#!/bin/bash\n")
+ f.write(contents)
+ f.flush()
+ return f.name
+
+ def CrosLearnBoard(self, chromeos_root, machine):
+ command = self.RemoteAccessInitCommand(chromeos_root, machine)
+ command += "\nlearn_board"
+ command += "\necho ${FLAGS_board}"
+ retval, output, _ = self.RunCommandWOutput(command)
+ if self.logger:
+ self.logger.LogFatalIf(retval, "learn_board command failed")
+ elif retval:
+ sys.exit(1)
+ return output.split()[-1]
+
+ def CrosRunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ command_terminator=None,
+ chromeos_root=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ ):
+ """Run a command on a ChromeOS box.
+
+ Returns triplet (returncode, stdout, stderr).
+ """
+
+ if self.log_level != "verbose":
+ print_to_console = False
+
+ if self.logger:
+ self.logger.LogCmd(cmd, print_to_console=print_to_console)
+ self.logger.LogFatalIf(not machine, "No machine provided!")
+ self.logger.LogFatalIf(
+ not chromeos_root, "chromeos_root not given!"
+ )
+ else:
+ if not chromeos_root or not machine:
+ sys.exit(1)
+ chromeos_root = os.path.expanduser(chromeos_root)
+
+ port = None
+ if ":" in machine:
+ machine, port = machine.split(":")
+ # Write all commands to a file.
+ command_file = self.WriteToTempShFile(cmd)
+ retval = self.CopyFiles(
+ command_file,
+ command_file,
+ dest_machine=machine,
+ dest_port=port,
+ command_terminator=command_terminator,
+ chromeos_root=chromeos_root,
+ dest_cros=True,
+ recursive=False,
+ print_to_console=print_to_console,
+ )
+ if retval:
if self.logger:
- self.logger.LogCommandOutput(out, print_to_console)
- if out == '':
- pipes.remove(p.stdout)
- my_poll.unregister(p.stdout)
- if fd == p.stderr.fileno():
- err = os.read(p.stderr.fileno(), 16384).decode('utf8')
- if return_output:
- full_stderr += err
+ self.logger.LogError(
+ "Could not run remote command on machine."
+ " Is the machine up?"
+ )
+ return (retval, "", "")
+
+ command = self.RemoteAccessInitCommand(chromeos_root, machine, port)
+ command += "\nremote_sh bash %s" % command_file
+ command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval'
+ retval = self.RunCommandGeneric(
+ command,
+ return_output,
+ command_terminator=command_terminator,
+ command_timeout=command_timeout,
+ terminated_timeout=terminated_timeout,
+ print_to_console=print_to_console,
+ )
+ if return_output:
+ connect_signature = (
+ "Initiating first contact with remote host\n"
+ + "Connection OK\n"
+ )
+ connect_signature_re = re.compile(connect_signature)
+ modded_retval = list(retval)
+ modded_retval[1] = connect_signature_re.sub("", retval[1])
+ return modded_retval
+ return retval
+
+ def CrosRunCommand(self, *args, **kwargs):
+ """Run a command on a ChromeOS box.
+
+ Takes the same arguments as CrosRunCommandGeneric except for return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.CrosRunCommandGeneric(*args, **kwargs)[0]
+
+ def CrosRunCommandWOutput(self, *args, **kwargs):
+ """Run a command on a ChromeOS box.
+
+ Takes the same arguments as CrosRunCommandGeneric except for return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.CrosRunCommandGeneric(*args, **kwargs)
+
+ def ChrootRunCommandGeneric(
+ self,
+ chromeos_root,
+ command,
+ return_output=False,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ cros_sdk_options="",
+ env=None,
+ ):
+ """Runs a command within the chroot.
+
+ Returns triplet (returncode, stdout, stderr).
+ """
+
+ if self.log_level != "verbose":
+ print_to_console = False
+
+ if self.logger:
+ self.logger.LogCmd(command, print_to_console=print_to_console)
+
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ dir=os.path.join(chromeos_root, "src/scripts"),
+ suffix=".sh",
+ prefix="in_chroot_cmd",
+ ) as f:
+ f.write("#!/bin/bash\n")
+ f.write(command)
+ f.write("\n")
+ f.flush()
+
+ command_file = f.name
+ os.chmod(command_file, 0o777)
+
+ # if return_output is set, run a test command first to make sure that
+ # the chroot already exists. We want the final returned output to skip
+ # the output from chroot creation steps.
+ if return_output:
+ ret = self.RunCommand(
+ "cd %s; cros_sdk %s -- true"
+ % (chromeos_root, cros_sdk_options),
+ env=env,
+ # Give this command a long time to execute; it might involve setting
+ # the chroot up, or running fstrim on its image file. Both of these
+ # operations can take well over the timeout default of 10 seconds.
+ terminated_timeout=5 * 60,
+ )
+ if ret:
+ return (ret, "", "")
+
+ # Run command_file inside the chroot, making sure that any "~" is expanded
+ # by the shell inside the chroot, not outside.
+ command = "cd %s; cros_sdk %s -- bash -c '%s/%s'" % (
+ chromeos_root,
+ cros_sdk_options,
+ CHROMEOS_SCRIPTS_DIR,
+ os.path.basename(command_file),
+ )
+ ret = self.RunCommandGeneric(
+ command,
+ return_output,
+ command_terminator=command_terminator,
+ command_timeout=command_timeout,
+ terminated_timeout=terminated_timeout,
+ print_to_console=print_to_console,
+ env=env,
+ )
+ os.remove(command_file)
+ return ret
+
+ def ChrootRunCommand(self, *args, **kwargs):
+ """Runs a command within the chroot.
+
+ Takes the same arguments as ChrootRunCommandGeneric except for
+ return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 2
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.ChrootRunCommandGeneric(*args, **kwargs)[0]
+
+ def ChrootRunCommandWOutput(self, *args, **kwargs):
+ """Runs a command within the chroot.
+
+ Takes the same arguments as ChrootRunCommandGeneric except for
+ return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 2
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.ChrootRunCommandGeneric(*args, **kwargs)
+
+ def RunCommands(
+ self, cmdlist, machine=None, username=None, command_terminator=None
+ ):
+ cmd = " ;\n".join(cmdlist)
+ return self.RunCommand(
+ cmd,
+ machine=machine,
+ username=username,
+ command_terminator=command_terminator,
+ )
+
+ def CopyFiles(
+ self,
+ src,
+ dest,
+ src_machine=None,
+ src_port=None,
+ dest_machine=None,
+ dest_port=None,
+ src_user=None,
+ dest_user=None,
+ recursive=True,
+ command_terminator=None,
+ chromeos_root=None,
+ src_cros=False,
+ dest_cros=False,
+ print_to_console=True,
+ ):
+ src = os.path.expanduser(src)
+ dest = os.path.expanduser(dest)
+
+ if recursive:
+ src = src + "/"
+ dest = dest + "/"
+
+ if src_cros or dest_cros:
if self.logger:
- self.logger.LogCommandError(err, print_to_console)
- if err == '':
- pipes.remove(p.stderr)
- my_poll.unregister(p.stderr)
+ self.logger.LogFatalIf(
+ src_cros == dest_cros,
+ "Only one of src_cros and desc_cros can " "be True.",
+ )
+ self.logger.LogFatalIf(
+ not chromeos_root, "chromeos_root not given!"
+ )
+ elif src_cros == dest_cros or not chromeos_root:
+ sys.exit(1)
+ if src_cros:
+ cros_machine = src_machine
+ cros_port = src_port
+ host_machine = dest_machine
+ host_user = dest_user
+ else:
+ cros_machine = dest_machine
+ cros_port = dest_port
+ host_machine = src_machine
+ host_user = src_user
- if p.poll() is not None:
- if terminated_time is None:
- terminated_time = time.time()
- elif (terminated_timeout is not None
- and time.time() - terminated_time > terminated_timeout):
- if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since '
- 'process termination.' % terminated_timeout,
- print_to_console)
- break
+ command = self.RemoteAccessInitCommand(
+ chromeos_root, cros_machine, cros_port
+ )
+ ssh_command = (
+ "ssh -o StrictHostKeyChecking=no"
+ + " -o UserKnownHostsFile=$(mktemp)"
+ + " -i $TMP_PRIVATE_KEY"
+ )
+ if cros_port:
+ ssh_command += " -p %s" % cros_port
+ rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command
+ if dest_cros:
+ command += rsync_prefix + "%s root@%s:%s" % (
+ src,
+ cros_machine,
+ dest,
+ )
+ else:
+ command += rsync_prefix + "root@%s:%s %s" % (
+ cros_machine,
+ src,
+ dest,
+ )
- if (command_timeout is not None
- and time.time() - started_time > command_timeout):
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
- if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since process'
- 'started. Killed child process group.' % command_timeout,
- print_to_console)
- break
+ return self.RunCommand(
+ command,
+ machine=host_machine,
+ username=host_user,
+ command_terminator=command_terminator,
+ print_to_console=print_to_console,
+ )
- if out == err == '':
- break
-
- p.wait()
- if return_output:
- return (p.returncode, full_stdout, full_stderr)
- return (p.returncode, '', '')
- except BaseException as err:
- except_handler(p, err)
- raise
-
- def RunCommand(self, *args, **kwargs):
- """Run a command.
-
- Takes the same arguments as RunCommandGeneric except for return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.RunCommandGeneric(*args, **kwargs)[0]
-
- def RunCommandWExceptionCleanup(self, *args, **kwargs):
- """Run a command and kill process if exception is thrown.
-
- Takes the same arguments as RunCommandGeneric except for except_handler.
- Returns same as RunCommandGeneric.
- """
-
- def KillProc(proc, _):
- if proc:
- os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
-
- # Make sure that args does not overwrite 'except_handler'
- assert len(args) <= 8
- assert 'except_handler' not in kwargs
- kwargs['except_handler'] = KillProc
- return self.RunCommandGeneric(*args, **kwargs)
-
- def RunCommandWOutput(self, *args, **kwargs):
- """Run a command.
-
- Takes the same arguments as RunCommandGeneric except for return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.RunCommandGeneric(*args, **kwargs)
-
- def RemoteAccessInitCommand(self, chromeos_root, machine, port=None):
- command = ''
- command += '\nset -- --remote=' + machine
- if port:
- command += ' --ssh_port=' + port
- command += '\n. ' + chromeos_root + '/src/scripts/common.sh'
- command += '\n. ' + chromeos_root + '/src/scripts/remote_access.sh'
- command += '\nTMP=$(mktemp -d)'
- command += '\nFLAGS "$@" || exit 1'
- command += '\nremote_access_init'
- return command
-
- def WriteToTempShFile(self, contents):
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- prefix=os.uname()[1],
- suffix='.sh') as f:
- f.write('#!/bin/bash\n')
- f.write(contents)
- f.flush()
- return f.name
-
- def CrosLearnBoard(self, chromeos_root, machine):
- command = self.RemoteAccessInitCommand(chromeos_root, machine)
- command += '\nlearn_board'
- command += '\necho ${FLAGS_board}'
- retval, output, _ = self.RunCommandWOutput(command)
- if self.logger:
- self.logger.LogFatalIf(retval, 'learn_board command failed')
- elif retval:
- sys.exit(1)
- return output.split()[-1]
-
- def CrosRunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- command_terminator=None,
- chromeos_root=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True):
- """Run a command on a ChromeOS box.
-
- Returns triplet (returncode, stdout, stderr).
- """
-
- if self.log_level != 'verbose':
- print_to_console = False
-
- if self.logger:
- self.logger.LogCmd(cmd, print_to_console=print_to_console)
- self.logger.LogFatalIf(not machine, 'No machine provided!')
- self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
- else:
- if not chromeos_root or not machine:
- sys.exit(1)
- chromeos_root = os.path.expanduser(chromeos_root)
-
- port = None
- if ':' in machine:
- machine, port = machine.split(':')
- # Write all commands to a file.
- command_file = self.WriteToTempShFile(cmd)
- retval = self.CopyFiles(command_file,
- command_file,
- dest_machine=machine,
- dest_port=port,
- command_terminator=command_terminator,
- chromeos_root=chromeos_root,
- dest_cros=True,
- recursive=False,
- print_to_console=print_to_console)
- if retval:
- if self.logger:
- self.logger.LogError('Could not run remote command on machine.'
- ' Is the machine up?')
- return (retval, '', '')
-
- command = self.RemoteAccessInitCommand(chromeos_root, machine, port)
- command += '\nremote_sh bash %s' % command_file
- command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval'
- retval = self.RunCommandGeneric(command,
- return_output,
- command_terminator=command_terminator,
- command_timeout=command_timeout,
- terminated_timeout=terminated_timeout,
- print_to_console=print_to_console)
- if return_output:
- connect_signature = ('Initiating first contact with remote host\n' +
- 'Connection OK\n')
- connect_signature_re = re.compile(connect_signature)
- modded_retval = list(retval)
- modded_retval[1] = connect_signature_re.sub('', retval[1])
- return modded_retval
- return retval
-
- def CrosRunCommand(self, *args, **kwargs):
- """Run a command on a ChromeOS box.
-
- Takes the same arguments as CrosRunCommandGeneric except for return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.CrosRunCommandGeneric(*args, **kwargs)[0]
-
- def CrosRunCommandWOutput(self, *args, **kwargs):
- """Run a command on a ChromeOS box.
-
- Takes the same arguments as CrosRunCommandGeneric except for return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.CrosRunCommandGeneric(*args, **kwargs)
-
- def ChrootRunCommandGeneric(self,
- chromeos_root,
- command,
- return_output=False,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- cros_sdk_options='',
- env=None):
- """Runs a command within the chroot.
-
- Returns triplet (returncode, stdout, stderr).
- """
-
- if self.log_level != 'verbose':
- print_to_console = False
-
- if self.logger:
- self.logger.LogCmd(command, print_to_console=print_to_console)
-
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- dir=os.path.join(chromeos_root,
- 'src/scripts'),
- suffix='.sh',
- prefix='in_chroot_cmd') as f:
- f.write('#!/bin/bash\n')
- f.write(command)
- f.write('\n')
- f.flush()
-
- command_file = f.name
- os.chmod(command_file, 0o777)
-
- # if return_output is set, run a test command first to make sure that
- # the chroot already exists. We want the final returned output to skip
- # the output from chroot creation steps.
- if return_output:
- ret = self.RunCommand(
- 'cd %s; cros_sdk %s -- true' % (chromeos_root, cros_sdk_options),
- env=env,
- # Give this command a long time to execute; it might involve setting
- # the chroot up, or running fstrim on its image file. Both of these
- # operations can take well over the timeout default of 10 seconds.
- terminated_timeout=5 * 60)
- if ret:
- return (ret, '', '')
-
- # Run command_file inside the chroot, making sure that any "~" is expanded
- # by the shell inside the chroot, not outside.
- command = ("cd %s; cros_sdk %s -- bash -c '%s/%s'" %
- (chromeos_root, cros_sdk_options, CHROMEOS_SCRIPTS_DIR,
- os.path.basename(command_file)))
- ret = self.RunCommandGeneric(command,
- return_output,
- command_terminator=command_terminator,
- command_timeout=command_timeout,
- terminated_timeout=terminated_timeout,
- print_to_console=print_to_console,
- env=env)
- os.remove(command_file)
- return ret
-
- def ChrootRunCommand(self, *args, **kwargs):
- """Runs a command within the chroot.
-
- Takes the same arguments as ChrootRunCommandGeneric except for
- return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 2
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.ChrootRunCommandGeneric(*args, **kwargs)[0]
-
- def ChrootRunCommandWOutput(self, *args, **kwargs):
- """Runs a command within the chroot.
-
- Takes the same arguments as ChrootRunCommandGeneric except for
- return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 2
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.ChrootRunCommandGeneric(*args, **kwargs)
-
- def RunCommands(self,
- cmdlist,
- machine=None,
- username=None,
- command_terminator=None):
- cmd = ' ;\n'.join(cmdlist)
- return self.RunCommand(cmd,
- machine=machine,
- username=username,
- command_terminator=command_terminator)
-
- def CopyFiles(self,
+ if dest_machine == src_machine:
+ command = "rsync -a %s %s" % (src, dest)
+ else:
+ if src_machine is None:
+ src_machine = os.uname()[1]
+ src_user = getpass.getuser()
+ command = "rsync -a %s@%s:%s %s" % (
+ src_user,
+ src_machine,
src,
dest,
- src_machine=None,
- src_port=None,
- dest_machine=None,
- dest_port=None,
- src_user=None,
- dest_user=None,
- recursive=True,
- command_terminator=None,
- chromeos_root=None,
- src_cros=False,
- dest_cros=False,
- print_to_console=True):
- src = os.path.expanduser(src)
- dest = os.path.expanduser(dest)
+ )
+ return self.RunCommand(
+ command,
+ machine=dest_machine,
+ username=dest_user,
+ command_terminator=command_terminator,
+ print_to_console=print_to_console,
+ )
- if recursive:
- src = src + '/'
- dest = dest + '/'
+ def RunCommand2(
+ self,
+ cmd,
+ cwd=None,
+ line_consumer=None,
+ timeout=None,
+ shell=True,
+ join_stderr=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ """Run the command with an extra feature line_consumer.
- if src_cros or dest_cros:
- if self.logger:
- self.logger.LogFatalIf(
- src_cros == dest_cros, 'Only one of src_cros and desc_cros can '
- 'be True.')
- self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
- elif src_cros == dest_cros or not chromeos_root:
- sys.exit(1)
- if src_cros:
- cros_machine = src_machine
- cros_port = src_port
- host_machine = dest_machine
- host_user = dest_user
- else:
- cros_machine = dest_machine
- cros_port = dest_port
- host_machine = src_machine
- host_user = src_user
+ This version allow developers to provide a line_consumer which will be
+ fed execution output lines.
- command = self.RemoteAccessInitCommand(chromeos_root, cros_machine,
- cros_port)
- ssh_command = ('ssh -o StrictHostKeyChecking=no' +
- ' -o UserKnownHostsFile=$(mktemp)' +
- ' -i $TMP_PRIVATE_KEY')
- if cros_port:
- ssh_command += ' -p %s' % cros_port
- rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command
- if dest_cros:
- command += rsync_prefix + '%s root@%s:%s' % (src, cros_machine, dest)
- else:
- command += rsync_prefix + 'root@%s:%s %s' % (cros_machine, src, dest)
+ A line_consumer is a callback, which is given a chance to run for each
+ line the execution outputs (either to stdout or stderr). The
+ line_consumer must accept one and exactly one dict argument, the dict
+ argument has these items -
+ 'line' - The line output by the binary. Notice, this string includes
+ the trailing '\n'.
+ 'output' - Whether this is a stdout or stderr output, values are either
+ 'stdout' or 'stderr'. When join_stderr is True, this value
+ will always be 'output'.
+ 'pobject' - The object used to control execution, for example, call
+ pobject.kill().
- return self.RunCommand(command,
- machine=host_machine,
- username=host_user,
- command_terminator=command_terminator,
- print_to_console=print_to_console)
+ Note: As this is written, the stdin for the process executed is
+ not associated with the stdin of the caller of this routine.
- if dest_machine == src_machine:
- command = 'rsync -a %s %s' % (src, dest)
- else:
- if src_machine is None:
- src_machine = os.uname()[1]
- src_user = getpass.getuser()
- command = 'rsync -a %s@%s:%s %s' % (src_user, src_machine, src, dest)
- return self.RunCommand(command,
- machine=dest_machine,
- username=dest_user,
- command_terminator=command_terminator,
- print_to_console=print_to_console)
+ Args:
+ cmd: Command in a single string.
+ cwd: Working directory for execution.
+ line_consumer: A function that will ba called by this function. See above
+ for details.
+ timeout: terminate command after this timeout.
+ shell: Whether to use a shell for execution.
+ join_stderr: Whether join stderr to stdout stream.
+ env: Execution environment.
+ except_handler: Callback for when exception is thrown during command
+ execution. Passed process object and exception.
- def RunCommand2(self,
- cmd,
- cwd=None,
- line_consumer=None,
- timeout=None,
- shell=True,
- join_stderr=True,
- env=None,
- except_handler=lambda p, e: None):
- """Run the command with an extra feature line_consumer.
+ Returns:
+ Execution return code.
- This version allow developers to provide a line_consumer which will be
- fed execution output lines.
+ Raises:
+ child_exception: if fails to start the command process (missing
+ permission, no such file, etc)
+ """
- A line_consumer is a callback, which is given a chance to run for each
- line the execution outputs (either to stdout or stderr). The
- line_consumer must accept one and exactly one dict argument, the dict
- argument has these items -
- 'line' - The line output by the binary. Notice, this string includes
- the trailing '\n'.
- 'output' - Whether this is a stdout or stderr output, values are either
- 'stdout' or 'stderr'. When join_stderr is True, this value
- will always be 'output'.
- 'pobject' - The object used to control execution, for example, call
- pobject.kill().
+ class StreamHandler(object):
+ """Internal utility class."""
- Note: As this is written, the stdin for the process executed is
- not associated with the stdin of the caller of this routine.
+ def __init__(self, pobject, fd, name, line_consumer):
+ self._pobject = pobject
+ self._fd = fd
+ self._name = name
+ self._buf = ""
+ self._line_consumer = line_consumer
- Args:
- cmd: Command in a single string.
- cwd: Working directory for execution.
- line_consumer: A function that will ba called by this function. See above
- for details.
- timeout: terminate command after this timeout.
- shell: Whether to use a shell for execution.
- join_stderr: Whether join stderr to stdout stream.
- env: Execution environment.
- except_handler: Callback for when exception is thrown during command
- execution. Passed process object and exception.
+ def read_and_notify_line(self):
+ t = os.read(fd, 1024)
+ self._buf = self._buf + t
+ self.notify_line()
- Returns:
- Execution return code.
+ def notify_line(self):
+ p = self._buf.find("\n")
+ while p >= 0:
+ self._line_consumer(
+ line=self._buf[: p + 1],
+ output=self._name,
+ pobject=self._pobject,
+ )
+ if p < len(self._buf) - 1:
+ self._buf = self._buf[p + 1 :]
+ p = self._buf.find("\n")
+ else:
+ self._buf = ""
+ p = -1
+ break
- Raises:
- child_exception: if fails to start the command process (missing
- permission, no such file, etc)
- """
+ def notify_eos(self):
+ # Notify end of stream. The last line may not end with a '\n'.
+ if self._buf != "":
+ self._line_consumer(
+ line=self._buf, output=self._name, pobject=self._pobject
+ )
+ self._buf = ""
- class StreamHandler(object):
- """Internal utility class."""
+ if self.log_level == "verbose":
+ self.logger.LogCmd(cmd)
+ elif self.logger:
+ self.logger.LogCmdToFileOnly(cmd)
- def __init__(self, pobject, fd, name, line_consumer):
- self._pobject = pobject
- self._fd = fd
- self._name = name
- self._buf = ''
- self._line_consumer = line_consumer
+ # We use setsid so that the child will have a different session id
+ # and we can easily kill the process group. This is also important
+ # because the child will be disassociated from the parent terminal.
+ # In this way the child cannot mess the parent's terminal.
+ pobject = None
+ try:
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ pobject = subprocess.Popen(
+ cmd,
+ cwd=cwd,
+ bufsize=1024,
+ env=env,
+ shell=shell,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
- def read_and_notify_line(self):
- t = os.read(fd, 1024)
- self._buf = self._buf + t
- self.notify_line()
+ # We provide a default line_consumer
+ if line_consumer is None:
+ line_consumer = lambda **d: None
+ start_time = time.time()
+ poll = select.poll()
+ outfd = pobject.stdout.fileno()
+ poll.register(outfd, select.POLLIN | select.POLLPRI)
+ handlermap = {
+ outfd: StreamHandler(pobject, outfd, "stdout", line_consumer)
+ }
+ if not join_stderr:
+ errfd = pobject.stderr.fileno()
+ poll.register(errfd, select.POLLIN | select.POLLPRI)
+ handlermap[errfd] = StreamHandler(
+ pobject, errfd, "stderr", line_consumer
+ )
+ while handlermap:
+ readables = poll.poll(300)
+ for (fd, evt) in readables:
+ handler = handlermap[fd]
+ if evt & (select.POLLPRI | select.POLLIN):
+ handler.read_and_notify_line()
+ elif evt & (
+ select.POLLHUP | select.POLLERR | select.POLLNVAL
+ ):
+ handler.notify_eos()
+ poll.unregister(fd)
+ del handlermap[fd]
- def notify_line(self):
- p = self._buf.find('\n')
- while p >= 0:
- self._line_consumer(line=self._buf[:p + 1],
- output=self._name,
- pobject=self._pobject)
- if p < len(self._buf) - 1:
- self._buf = self._buf[p + 1:]
- p = self._buf.find('\n')
- else:
- self._buf = ''
- p = -1
- break
+ if timeout is not None and (time.time() - start_time > timeout):
+ os.killpg(os.getpgid(pobject.pid), signal.SIGTERM)
- def notify_eos(self):
- # Notify end of stream. The last line may not end with a '\n'.
- if self._buf != '':
- self._line_consumer(line=self._buf,
- output=self._name,
- pobject=self._pobject)
- self._buf = ''
-
- if self.log_level == 'verbose':
- self.logger.LogCmd(cmd)
- elif self.logger:
- self.logger.LogCmdToFileOnly(cmd)
-
- # We use setsid so that the child will have a different session id
- # and we can easily kill the process group. This is also important
- # because the child will be disassociated from the parent terminal.
- # In this way the child cannot mess the parent's terminal.
- pobject = None
- try:
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- pobject = subprocess.Popen(
- cmd,
- cwd=cwd,
- bufsize=1024,
- env=env,
- shell=shell,
- universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE,
- preexec_fn=os.setsid)
-
- # We provide a default line_consumer
- if line_consumer is None:
- line_consumer = lambda **d: None
- start_time = time.time()
- poll = select.poll()
- outfd = pobject.stdout.fileno()
- poll.register(outfd, select.POLLIN | select.POLLPRI)
- handlermap = {
- outfd: StreamHandler(pobject, outfd, 'stdout', line_consumer)
- }
- if not join_stderr:
- errfd = pobject.stderr.fileno()
- poll.register(errfd, select.POLLIN | select.POLLPRI)
- handlermap[errfd] = StreamHandler(pobject, errfd, 'stderr',
- line_consumer)
- while handlermap:
- readables = poll.poll(300)
- for (fd, evt) in readables:
- handler = handlermap[fd]
- if evt & (select.POLLPRI | select.POLLIN):
- handler.read_and_notify_line()
- elif evt & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
- handler.notify_eos()
- poll.unregister(fd)
- del handlermap[fd]
-
- if timeout is not None and (time.time() - start_time > timeout):
- os.killpg(os.getpgid(pobject.pid), signal.SIGTERM)
-
- return pobject.wait()
- except BaseException as err:
- except_handler(pobject, err)
- raise
+ return pobject.wait()
+ except BaseException as err:
+ except_handler(pobject, err)
+ raise
class MockCommandExecuter(CommandExecuter):
- """Mock class for class CommandExecuter."""
+ """Mock class for class CommandExecuter."""
- def RunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- username=None,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- env=None,
- except_handler=lambda p, e: None):
- assert not command_timeout
- cmd = str(cmd)
- if machine is None:
- machine = 'localhost'
- if username is None:
- username = 'current'
- logger.GetLogger().LogCmd('(Mock) ' + cmd, machine, username,
- print_to_console)
- return (0, '', '')
+ def RunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ assert not command_timeout
+ cmd = str(cmd)
+ if machine is None:
+ machine = "localhost"
+ if username is None:
+ username = "current"
+ logger.GetLogger().LogCmd(
+ "(Mock) " + cmd, machine, username, print_to_console
+ )
+ return (0, "", "")
- def RunCommand(self, *args, **kwargs):
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.RunCommandGeneric(*args, **kwargs)[0]
+ def RunCommand(self, *args, **kwargs):
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.RunCommandGeneric(*args, **kwargs)[0]
- def RunCommandWOutput(self, *args, **kwargs):
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.RunCommandGeneric(*args, **kwargs)
+ def RunCommandWOutput(self, *args, **kwargs):
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.RunCommandGeneric(*args, **kwargs)
class CommandTerminator(object):
- """Object to request termination of a command in execution."""
+ """Object to request termination of a command in execution."""
- def __init__(self):
- self.terminated = False
+ def __init__(self):
+ self.terminated = False
- def Terminate(self):
- self.terminated = True
+ def Terminate(self):
+ self.terminated = True
- def IsTerminated(self):
- return self.terminated
+ def IsTerminated(self):
+ return self.terminated
diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py
index 1c9c74c..3af9bd3 100755
--- a/cros_utils/command_executer_timeout_test.py
+++ b/cros_utils/command_executer_timeout_test.py
@@ -1,15 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Timeout test for command_executer."""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
import argparse
import sys
@@ -18,20 +17,20 @@
def Usage(parser, message):
- print('ERROR: %s' % message)
- parser.print_help()
- sys.exit(0)
+ print("ERROR: %s" % message)
+ parser.print_help()
+ sys.exit(0)
def Main(argv):
- parser = argparse.ArgumentParser()
- _ = parser.parse_args(argv)
+ parser = argparse.ArgumentParser()
+ _ = parser.parse_args(argv)
- command = 'sleep 1000'
- ce = command_executer.GetCommandExecuter()
- ce.RunCommand(command, command_timeout=1)
- return 0
+ command = "sleep 1000"
+ ce = command_executer.GetCommandExecuter()
+ ce.RunCommand(command, command_timeout=1)
+ return 0
-if __name__ == '__main__':
- Main(sys.argv[1:])
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py
index 22331ae..7cd46a7 100755
--- a/cros_utils/command_executer_unittest.py
+++ b/cros_utils/command_executer_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for command_executer.py."""
-from __future__ import print_function
import time
import unittest
@@ -15,18 +14,20 @@
class CommandExecuterTest(unittest.TestCase):
- """Test for CommandExecuter class."""
+ """Test for CommandExecuter class."""
- def testTimeout(self):
- timeout = 1
- logging_level = 'average'
- ce = command_executer.CommandExecuter(logging_level)
- start = time.time()
- command = 'sleep 20'
- ce.RunCommand(command, command_timeout=timeout, terminated_timeout=timeout)
- end = time.time()
- self.assertTrue(round(end - start) == timeout)
+ def testTimeout(self):
+ timeout = 1
+ logging_level = "average"
+ ce = command_executer.CommandExecuter(logging_level)
+ start = time.time()
+ command = "sleep 20"
+ ce.RunCommand(
+ command, command_timeout=timeout, terminated_timeout=timeout
+ )
+ end = time.time()
+ self.assertTrue(round(end - start) == timeout)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/constants.py b/cros_utils/constants.py
index b12175b..47c1668 100644
--- a/cros_utils/constants.py
+++ b/cros_utils/constants.py
@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic constants used accross modules.
"""
-__author__ = 'shenhan@google.com (Han Shen)'
+__author__ = "shenhan@google.com (Han Shen)"
-MOUNTED_TOOLCHAIN_ROOT = '/usr/local/toolchain_root'
+MOUNTED_TOOLCHAIN_ROOT = "/usr/local/toolchain_root"
# Root directory for night testing run.
-CROSTC_WORKSPACE = '/usr/local/google/crostc'
+CROSTC_WORKSPACE = "/usr/local/google/crostc"
diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py
index 61dbba2..443c647 100644
--- a/cros_utils/device_setup_utils.py
+++ b/cros_utils/device_setup_utils.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,529 +9,607 @@
This script provides utils to set device specs.
"""
-from __future__ import division
-from __future__ import print_function
-__author__ = 'zhizhouy@google.com (Zhizhou Yang)'
-
-import re
-import time
+__author__ = "zhizhouy@google.com (Zhizhou Yang)"
from contextlib import contextmanager
+import re
+import time
from cros_utils import command_executer
class DutWrapper(object):
- """Wrap DUT parameters inside."""
+ """Wrap DUT parameters inside."""
- def __init__(self,
- chromeos_root,
- remote,
- log_level='verbose',
- logger=None,
- ce=None,
- dut_config=None):
- self.chromeos_root = chromeos_root
- self.remote = remote
- self.log_level = log_level
- self.logger = logger
- self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level)
- self.dut_config = dut_config
+ def __init__(
+ self,
+ chromeos_root,
+ remote,
+ log_level="verbose",
+ logger=None,
+ ce=None,
+ dut_config=None,
+ ):
+ self.chromeos_root = chromeos_root
+ self.remote = remote
+ self.log_level = log_level
+ self.logger = logger
+ self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level)
+ self.dut_config = dut_config
- def RunCommandOnDut(self, command, ignore_status=False):
- """Helper function to run command on DUT."""
- ret, msg, err_msg = self.ce.CrosRunCommandWOutput(
- command, machine=self.remote, chromeos_root=self.chromeos_root)
+ def RunCommandOnDut(self, command, ignore_status=False):
+ """Helper function to run command on DUT."""
+ ret, msg, err_msg = self.ce.CrosRunCommandWOutput(
+ command, machine=self.remote, chromeos_root=self.chromeos_root
+ )
- if ret:
- err_msg = ('Command execution on DUT %s failed.\n'
- 'Failing command: %s\n'
- 'returned %d\n'
- 'Error message: %s' % (self.remote, command, ret, err_msg))
- if ignore_status:
- self.logger.LogError(err_msg +
- '\n(Failure is considered non-fatal. Continue.)')
- else:
- self.logger.LogFatal(err_msg)
-
- return ret, msg, err_msg
-
- def DisableASLR(self):
- """Disable ASLR on DUT."""
- disable_aslr = ('set -e; '
- 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
- ' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi')
- if self.log_level == 'average':
- self.logger.LogOutput('Disable ASLR.')
- self.RunCommandOnDut(disable_aslr)
-
- def SetCpuGovernor(self, governor, ignore_status=False):
- """Setup CPU Governor on DUT."""
- set_gov_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is offline.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- if self.log_level == 'average':
- self.logger.LogOutput('Setup CPU Governor: %s.' % governor)
- ret, _, _ = self.RunCommandOnDut(
- set_gov_cmd % governor, ignore_status=ignore_status)
- return ret
-
- def DisableTurbo(self):
- """Disable Turbo on DUT."""
- dis_turbo_cmd = (
- 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
- ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then '
- ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
- ' fi; '
- 'fi; ')
- if self.log_level == 'average':
- self.logger.LogOutput('Disable Turbo.')
- self.RunCommandOnDut(dis_turbo_cmd)
-
- def SetupCpuUsage(self):
- """Setup CPU usage.
-
- Based on self.dut_config['cpu_usage'] configure CPU cores
- utilization.
- """
-
- if (self.dut_config['cpu_usage'] == 'big_only' or
- self.dut_config['cpu_usage'] == 'little_only'):
- _, arch, _ = self.RunCommandOnDut('uname -m')
-
- if arch.lower().startswith('arm') or arch.lower().startswith('aarch64'):
- self.SetupArmCores()
-
- def SetupArmCores(self):
- """Setup ARM big/little cores."""
-
- # CPU implemeters/part numbers of big/LITTLE CPU.
- # Format: dict(CPU implementer: set(CPU part numbers))
- LITTLE_CORES = {
- '0x41': {
- '0xd01', # Cortex A32
- '0xd03', # Cortex A53
- '0xd04', # Cortex A35
- '0xd05', # Cortex A55
- },
- }
- BIG_CORES = {
- '0x41': {
- '0xd07', # Cortex A57
- '0xd08', # Cortex A72
- '0xd09', # Cortex A73
- '0xd0a', # Cortex A75
- '0xd0b', # Cortex A76
- },
- }
-
- # Values of CPU Implementer and CPU part number are exposed by cpuinfo.
- # Format:
- # =================
- # processor : 0
- # model name : ARMv8 Processor rev 4 (v8l)
- # BogoMIPS : 48.00
- # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4
- # CPU implementer : 0x41
- # CPU architecture: 8
- # CPU variant : 0x0
- # CPU part : 0xd03
- # CPU revision : 4
-
- _, cpuinfo, _ = self.RunCommandOnDut('cat /proc/cpuinfo')
-
- # List of all CPU cores: 0, 1, ..
- proc_matches = re.findall(r'^processor\s*: (\d+)$', cpuinfo, re.MULTILINE)
- # List of all corresponding CPU implementers
- impl_matches = re.findall(r'^CPU implementer\s*: (0x[\da-f]+)$', cpuinfo,
- re.MULTILINE)
- # List of all corresponding CPU part numbers
- part_matches = re.findall(r'^CPU part\s*: (0x[\da-f]+)$', cpuinfo,
- re.MULTILINE)
- assert len(proc_matches) == len(impl_matches)
- assert len(part_matches) == len(impl_matches)
-
- all_cores = set(proc_matches)
- dut_big_cores = {
- core
- for core, impl, part in zip(proc_matches, impl_matches, part_matches)
- if impl in BIG_CORES and part in BIG_CORES[impl]
- }
- dut_lit_cores = {
- core
- for core, impl, part in zip(proc_matches, impl_matches, part_matches)
- if impl in LITTLE_CORES and part in LITTLE_CORES[impl]
- }
-
- if self.dut_config['cpu_usage'] == 'big_only':
- cores_to_enable = dut_big_cores
- cores_to_disable = all_cores - dut_big_cores
- elif self.dut_config['cpu_usage'] == 'little_only':
- cores_to_enable = dut_lit_cores
- cores_to_disable = all_cores - dut_lit_cores
- else:
- self.logger.LogError(
- 'cpu_usage=%s is not supported on ARM.\n'
- 'Ignore ARM CPU setup and continue.' % self.dut_config['cpu_usage'])
- return
-
- if cores_to_enable:
- cmd_enable_cores = ('echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online'
- % ','.join(sorted(cores_to_enable)))
-
- cmd_disable_cores = ''
- if cores_to_disable:
- cmd_disable_cores = (
- 'echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online' % ','.join(
- sorted(cores_to_disable)))
-
- self.RunCommandOnDut('; '.join([cmd_enable_cores, cmd_disable_cores]))
- else:
- # If there are no cores enabled by dut_config then configuration
- # is invalid for current platform and should be ignored.
- self.logger.LogError(
- '"cpu_usage" is invalid for targeted platform.\n'
- 'dut_config[cpu_usage]=%s\n'
- 'dut big cores: %s\n'
- 'dut little cores: %s\n'
- 'Ignore ARM CPU setup and continue.' % (self.dut_config['cpu_usage'],
- dut_big_cores, dut_lit_cores))
-
- def GetCpuOnline(self):
- """Get online status of CPU cores.
-
- Return dict of {int(cpu_num): <0|1>}.
- """
- get_cpu_online_cmd = ('paste -d" "'
- ' <(ls /sys/devices/system/cpu/cpu*/online)'
- ' <(cat /sys/devices/system/cpu/cpu*/online)')
- _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd)
-
- # Here is the output we expect to see:
- # -----------------
- # /sys/devices/system/cpu/cpu0/online 0
- # /sys/devices/system/cpu/cpu1/online 1
-
- cpu_online = {}
- cpu_online_match = re.compile(r'^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$')
- for line in online_output_str.splitlines():
- match = cpu_online_match.match(line)
- if match:
- cpu = int(match.group(1))
- status = int(match.group(2))
- cpu_online[cpu] = status
- # At least one CPU has to be online.
- assert cpu_online
-
- return cpu_online
-
- def SetupCpuFreq(self, online_cores):
- """Setup CPU frequency.
-
- Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores
- to a supported value which is less or equal to (freq_pct * max_freq / 100)
- limited by min_freq.
-
- NOTE: scaling_available_frequencies support is required.
- Otherwise the function has no effect.
- """
- freq_percent = self.dut_config['cpu_freq_pct']
- list_all_avail_freq_cmd = ('ls /sys/devices/system/cpu/cpu{%s}/cpufreq/'
- 'scaling_available_frequencies')
- # Ignore error to support general usage of frequency setup.
- # Not all platforms support scaling_available_frequencies.
- ret, all_avail_freq_str, _ = self.RunCommandOnDut(
- list_all_avail_freq_cmd % ','.join(str(core) for core in online_cores),
- ignore_status=True)
- if ret or not all_avail_freq_str:
- # No scalable frequencies available for the core.
- return ret
- for avail_freq_path in all_avail_freq_str.split():
- # Get available freq from every scaling_available_frequency path.
- # Error is considered fatal in self.RunCommandOnDut().
- _, avail_freq_str, _ = self.RunCommandOnDut('cat ' + avail_freq_path)
- assert avail_freq_str
-
- all_avail_freq = sorted(
- int(freq_str) for freq_str in avail_freq_str.split())
- min_freq = all_avail_freq[0]
- max_freq = all_avail_freq[-1]
- # Calculate the frequency we are targeting.
- target_freq = round(max_freq * freq_percent / 100)
- # More likely it's not in the list of supported frequencies
- # and our goal is to find the one which is less or equal.
- # Default is min and we will try to maximize it.
- avail_ngt_target = min_freq
- # Find the largest not greater than the target.
- for next_largest in reversed(all_avail_freq):
- if next_largest <= target_freq:
- avail_ngt_target = next_largest
- break
-
- max_freq_path = avail_freq_path.replace('scaling_available_frequencies',
- 'scaling_max_freq')
- min_freq_path = avail_freq_path.replace('scaling_available_frequencies',
- 'scaling_min_freq')
- # With default ignore_status=False we expect 0 status or Fatal error.
- self.RunCommandOnDut('echo %s | tee %s %s' %
- (avail_ngt_target, max_freq_path, min_freq_path))
-
- def WaitCooldown(self):
- """Wait for DUT to cool down to certain temperature."""
- waittime = 0
- timeout_in_sec = int(self.dut_config['cooldown_time']) * 60
- # Temperature from sensors come in uCelsius units.
- temp_in_ucels = int(self.dut_config['cooldown_temp']) * 1000
- sleep_interval = 30
-
- # Wait until any of two events occurs:
- # 1. CPU cools down to a specified temperature.
- # 2. Timeout cooldown_time expires.
- # For the case when targeted temperature is not reached within specified
- # timeout the benchmark is going to start with higher initial CPU temp.
- # In the worst case it may affect test results but at the same time we
- # guarantee the upper bound of waiting time.
- # TODO(denik): Report (or highlight) "high" CPU temperature in test results.
- # "high" should be calculated based on empirical data per platform.
- # Based on such reports we can adjust CPU configuration or
- # cooldown limits accordingly.
- while waittime < timeout_in_sec:
- _, temp_output, _ = self.RunCommandOnDut(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- if any(int(temp) > temp_in_ucels for temp in temp_output.split()):
- time.sleep(sleep_interval)
- waittime += sleep_interval
- else:
- # Exit the loop when:
- # 1. Reported temp numbers from all thermal sensors do not exceed
- # 'cooldown_temp' or
- # 2. No data from the sensors.
- break
-
- self.logger.LogOutput('Cooldown wait time: %.1f min' % (waittime / 60))
- return waittime
-
- def DecreaseWaitTime(self):
- """Change the ten seconds wait time for pagecycler to two seconds."""
- FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py'
- ret = self.RunCommandOnDut('ls ' + FILE)
-
- if not ret:
- sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
- self.RunCommandOnDut(sed_command + FILE)
-
- def StopUI(self):
- """Stop UI on DUT."""
- # Added "ignore_status" for the case when crosperf stops ui service which
- # was already stopped. Command is going to fail with 1.
- self.RunCommandOnDut('stop ui', ignore_status=True)
-
- def StartUI(self):
- """Start UI on DUT."""
- # Similar to StopUI, `start ui` fails if the service is already started.
- self.RunCommandOnDut('start ui', ignore_status=True)
-
- def KerncmdUpdateNeeded(self, intel_pstate):
- """Check whether kernel cmdline update is needed.
-
- Args:
- intel_pstate: kernel command line argument (active, passive, no_hwp)
-
- Returns:
- True if update is needed.
- """
-
- good = 0
-
- # Check that dut platform supports hwp
- cmd = "grep -q '^flags.*hwp' /proc/cpuinfo"
- ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True)
- if ret_code != good:
- # Intel hwp is not supported, update is not needed.
- return False
-
- kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
- ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True)
- self.logger.LogOutput('grep /proc/cmdline returned %d' % ret_code)
- if (intel_pstate and ret_code == good or
- not intel_pstate and ret_code != good):
- # No need to updated cmdline if:
- # 1. We are setting intel_pstate and we found it is already set.
- # 2. Not using intel_pstate and it is not in cmdline.
- return False
-
- # Otherwise we need to update intel_pstate.
- return True
-
- def UpdateKerncmdIntelPstate(self, intel_pstate):
- """Update kernel command line.
-
- Args:
- intel_pstate: kernel command line argument (active, passive, no_hwp)
- """
-
- good = 0
-
- # First phase is to remove rootfs verification to allow cmdline change.
- remove_verif_cmd = ' '.join([
- '/usr/share/vboot/bin/make_dev_ssd.sh',
- '--remove_rootfs_verification',
- '--partition %d',
- ])
- # Command for partition 2.
- verif_part2_failed, _, _ = self.RunCommandOnDut(
- remove_verif_cmd % 2, ignore_status=True)
- # Command for partition 4
- # Some machines in the lab use partition 4 to boot from,
- # so cmdline should be update for both partitions.
- verif_part4_failed, _, _ = self.RunCommandOnDut(
- remove_verif_cmd % 4, ignore_status=True)
- if verif_part2_failed or verif_part4_failed:
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline on partition %d.\n'
- 'Remove verification failed with status %d' %
- (2 if verif_part2_failed else 4, verif_part2_failed or
- verif_part4_failed))
-
- self.RunCommandOnDut('reboot && exit')
- # Give enough time for dut to complete reboot
- # TODO(denik): Replace with the function checking machine availability.
- time.sleep(30)
-
- # Second phase to update intel_pstate in kernel cmdline.
- kern_cmdline = '\n'.join([
- 'tmpfile=$(mktemp)',
- 'partnumb=%d',
- 'pstate=%s',
- # Store kernel cmdline in a temp file.
- '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}'
- ' --save_config ${tmpfile}',
- # Remove intel_pstate argument if present.
- "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}",
- # Insert intel_pstate with a new value if it is set.
- '[[ -n ${pstate} ]] &&'
- ' sed -i -e \"s/ *$/ intel_pstate=${pstate}/\" ${tmpfile}.${partnumb}',
- # Save the change in kernel cmdline.
- # After completion we have to reboot.
- '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}'
- ' --set_config ${tmpfile}'
- ])
- kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate)
- self.logger.LogOutput(
- 'Command to change kernel command line: %s' % kern_part2_cmdline_cmd)
- upd_part2_failed, _, _ = self.RunCommandOnDut(
- kern_part2_cmdline_cmd, ignore_status=True)
- # Again here we are updating cmdline for partition 4
- # in addition to partition 2. Without this some machines
- # in the lab might fail.
- kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate)
- self.logger.LogOutput(
- 'Command to change kernel command line: %s' % kern_part4_cmdline_cmd)
- upd_part4_failed, _, _ = self.RunCommandOnDut(
- kern_part4_cmdline_cmd, ignore_status=True)
- if upd_part2_failed or upd_part4_failed:
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline on partition %d.\n'
- 'intel_pstate update failed with status %d' %
- (2 if upd_part2_failed else 4, upd_part2_failed or upd_part4_failed))
-
- self.RunCommandOnDut('reboot && exit')
- # Wait 30s after reboot.
- time.sleep(30)
-
- # Verification phase.
- # Check that cmdline was updated.
- # Throw an exception if not.
- kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
- ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True)
- if (intel_pstate and ret_code != good or
- not intel_pstate and ret_code == good):
- # Kernel cmdline doesn't match input intel_pstate.
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline. '
- 'Final verification failed with status %d' % ret_code)
-
- self.logger.LogOutput('Kernel cmdline updated successfully.')
-
- @contextmanager
- def PauseUI(self):
- """Stop UI before and Start UI after the context block.
-
- Context manager will make sure UI is always resumed at the end.
- """
- self.StopUI()
- try:
- yield
-
- finally:
- self.StartUI()
-
- def SetupDevice(self):
- """Setup device to get it ready for testing.
-
- @Returns Wait time of cool down for this benchmark run.
- """
- self.logger.LogOutput('Update kernel cmdline if necessary and reboot')
- intel_pstate = self.dut_config['intel_pstate']
- if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate):
- self.UpdateKerncmdIntelPstate(intel_pstate)
-
- wait_time = 0
- # Pause UI while configuring the DUT.
- # This will accelerate setup (waiting for cooldown has x10 drop)
- # and help to reset a Chrome state left after the previous test.
- with self.PauseUI():
- # Unless the user turns on ASLR in the flag, we first disable ASLR
- # before running the benchmarks
- if not self.dut_config['enable_aslr']:
- self.DisableASLR()
-
- # CPU usage setup comes first where we enable/disable cores.
- self.SetupCpuUsage()
- cpu_online_status = self.GetCpuOnline()
- # List of online cores of type int (core number).
- online_cores = [
- core for core, status in cpu_online_status.items() if status
- ]
- if self.dut_config['cooldown_time']:
- # Setup power conservative mode for effective cool down.
- # Set ignore status since powersave may no be available
- # on all platforms and we are going to handle it.
- ret = self.SetCpuGovernor('powersave', ignore_status=True)
if ret:
- # "powersave" is not available, use "ondemand".
- # Still not a fatal error if it fails.
- ret = self.SetCpuGovernor('ondemand', ignore_status=True)
- # TODO(denik): Run comparison test for 'powersave' and 'ondemand'
- # on scarlet and kevin64.
- # We might have to consider reducing freq manually to the min
- # if it helps to reduce waiting time.
- wait_time = self.WaitCooldown()
+ err_msg = (
+ "Command execution on DUT %s failed.\n"
+ "Failing command: %s\n"
+ "returned %d\n"
+ "Error message: %s" % (self.remote, command, ret, err_msg)
+ )
+ if ignore_status:
+ self.logger.LogError(
+ err_msg + "\n(Failure is considered non-fatal. Continue.)"
+ )
+ else:
+ self.logger.LogFatal(err_msg)
- # Setup CPU governor for the benchmark run.
- # It overwrites the previous governor settings.
- governor = self.dut_config['governor']
- # FIXME(denik): Pass online cores to governor setup.
- self.SetCpuGovernor(governor)
+ return ret, msg, err_msg
- # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup
- # since governor may change:
- # - frequency;
- # - turbo/boost.
- self.DisableTurbo()
- self.SetupCpuFreq(online_cores)
+ def DisableASLR(self):
+ """Disable ASLR on DUT."""
+ disable_aslr = (
+ "set -e; "
+ "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then "
+ " echo 0 > /proc/sys/kernel/randomize_va_space; "
+ "fi"
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Disable ASLR.")
+ self.RunCommandOnDut(disable_aslr)
- self.DecreaseWaitTime()
- # FIXME(denik): Currently we are not recovering the previous cpufreq
- # settings since we do reboot/setup every time anyway.
- # But it may change in the future and then we have to recover the
- # settings.
- return wait_time
+ def SetCpuGovernor(self, governor, ignore_status=False):
+ """Setup CPU Governor on DUT."""
+ set_gov_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is offline.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Setup CPU Governor: %s." % governor)
+ ret, _, _ = self.RunCommandOnDut(
+ set_gov_cmd % governor, ignore_status=ignore_status
+ )
+ return ret
+
+ def DisableTurbo(self):
+ """Disable Turbo on DUT."""
+ dis_turbo_cmd = (
+ "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then "
+ " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then "
+ " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; "
+ " fi; "
+ "fi; "
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Disable Turbo.")
+ self.RunCommandOnDut(dis_turbo_cmd)
+
+ def SetupCpuUsage(self):
+ """Setup CPU usage.
+
+ Based on self.dut_config['cpu_usage'] configure CPU cores
+ utilization.
+ """
+
+ if (
+ self.dut_config["cpu_usage"] == "big_only"
+ or self.dut_config["cpu_usage"] == "little_only"
+ ):
+ _, arch, _ = self.RunCommandOnDut("uname -m")
+
+ if arch.lower().startswith("arm") or arch.lower().startswith(
+ "aarch64"
+ ):
+ self.SetupArmCores()
+
+ def SetupArmCores(self):
+ """Setup ARM big/little cores."""
+
+ # CPU implemeters/part numbers of big/LITTLE CPU.
+ # Format: dict(CPU implementer: set(CPU part numbers))
+ LITTLE_CORES = {
+ "0x41": {
+ "0xd01", # Cortex A32
+ "0xd03", # Cortex A53
+ "0xd04", # Cortex A35
+ "0xd05", # Cortex A55
+ },
+ }
+ BIG_CORES = {
+ "0x41": {
+ "0xd07", # Cortex A57
+ "0xd08", # Cortex A72
+ "0xd09", # Cortex A73
+ "0xd0a", # Cortex A75
+ "0xd0b", # Cortex A76
+ },
+ }
+
+ # Values of CPU Implementer and CPU part number are exposed by cpuinfo.
+ # Format:
+ # =================
+ # processor : 0
+ # model name : ARMv8 Processor rev 4 (v8l)
+ # BogoMIPS : 48.00
+ # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4
+ # CPU implementer : 0x41
+ # CPU architecture: 8
+ # CPU variant : 0x0
+ # CPU part : 0xd03
+ # CPU revision : 4
+
+ _, cpuinfo, _ = self.RunCommandOnDut("cat /proc/cpuinfo")
+
+ # List of all CPU cores: 0, 1, ..
+ proc_matches = re.findall(
+ r"^processor\s*: (\d+)$", cpuinfo, re.MULTILINE
+ )
+ # List of all corresponding CPU implementers
+ impl_matches = re.findall(
+ r"^CPU implementer\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE
+ )
+ # List of all corresponding CPU part numbers
+ part_matches = re.findall(
+ r"^CPU part\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE
+ )
+ assert len(proc_matches) == len(impl_matches)
+ assert len(part_matches) == len(impl_matches)
+
+ all_cores = set(proc_matches)
+ dut_big_cores = {
+ core
+ for core, impl, part in zip(
+ proc_matches, impl_matches, part_matches
+ )
+ if impl in BIG_CORES and part in BIG_CORES[impl]
+ }
+ dut_lit_cores = {
+ core
+ for core, impl, part in zip(
+ proc_matches, impl_matches, part_matches
+ )
+ if impl in LITTLE_CORES and part in LITTLE_CORES[impl]
+ }
+
+ if self.dut_config["cpu_usage"] == "big_only":
+ cores_to_enable = dut_big_cores
+ cores_to_disable = all_cores - dut_big_cores
+ elif self.dut_config["cpu_usage"] == "little_only":
+ cores_to_enable = dut_lit_cores
+ cores_to_disable = all_cores - dut_lit_cores
+ else:
+ self.logger.LogError(
+ "cpu_usage=%s is not supported on ARM.\n"
+ "Ignore ARM CPU setup and continue."
+ % self.dut_config["cpu_usage"]
+ )
+ return
+
+ if cores_to_enable:
+ cmd_enable_cores = (
+ "echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online"
+ % ",".join(sorted(cores_to_enable))
+ )
+
+ cmd_disable_cores = ""
+ if cores_to_disable:
+ cmd_disable_cores = (
+ "echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online"
+ % ",".join(sorted(cores_to_disable))
+ )
+
+ self.RunCommandOnDut(
+ "; ".join([cmd_enable_cores, cmd_disable_cores])
+ )
+ else:
+ # If there are no cores enabled by dut_config then configuration
+ # is invalid for current platform and should be ignored.
+ self.logger.LogError(
+ '"cpu_usage" is invalid for targeted platform.\n'
+ "dut_config[cpu_usage]=%s\n"
+ "dut big cores: %s\n"
+ "dut little cores: %s\n"
+ "Ignore ARM CPU setup and continue."
+ % (self.dut_config["cpu_usage"], dut_big_cores, dut_lit_cores)
+ )
+
+ def GetCpuOnline(self):
+ """Get online status of CPU cores.
+
+ Return dict of {int(cpu_num): <0|1>}.
+ """
+ get_cpu_online_cmd = (
+ 'paste -d" "'
+ " <(ls /sys/devices/system/cpu/cpu*/online)"
+ " <(cat /sys/devices/system/cpu/cpu*/online)"
+ )
+ _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd)
+
+ # Here is the output we expect to see:
+ # -----------------
+ # /sys/devices/system/cpu/cpu0/online 0
+ # /sys/devices/system/cpu/cpu1/online 1
+
+ cpu_online = {}
+ cpu_online_match = re.compile(r"^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$")
+ for line in online_output_str.splitlines():
+ match = cpu_online_match.match(line)
+ if match:
+ cpu = int(match.group(1))
+ status = int(match.group(2))
+ cpu_online[cpu] = status
+ # At least one CPU has to be online.
+ assert cpu_online
+
+ return cpu_online
+
+ def SetupCpuFreq(self, online_cores):
+ """Setup CPU frequency.
+
+ Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores
+ to a supported value which is less or equal to (freq_pct * max_freq / 100)
+ limited by min_freq.
+
+ NOTE: scaling_available_frequencies support is required.
+ Otherwise the function has no effect.
+ """
+ freq_percent = self.dut_config["cpu_freq_pct"]
+ list_all_avail_freq_cmd = (
+ "ls /sys/devices/system/cpu/cpu{%s}/cpufreq/"
+ "scaling_available_frequencies"
+ )
+ # Ignore error to support general usage of frequency setup.
+ # Not all platforms support scaling_available_frequencies.
+ ret, all_avail_freq_str, _ = self.RunCommandOnDut(
+ list_all_avail_freq_cmd
+ % ",".join(str(core) for core in online_cores),
+ ignore_status=True,
+ )
+ if ret or not all_avail_freq_str:
+ # No scalable frequencies available for the core.
+ return ret
+ for avail_freq_path in all_avail_freq_str.split():
+ # Get available freq from every scaling_available_frequency path.
+ # Error is considered fatal in self.RunCommandOnDut().
+ _, avail_freq_str, _ = self.RunCommandOnDut(
+ "cat " + avail_freq_path
+ )
+ assert avail_freq_str
+
+ all_avail_freq = sorted(
+ int(freq_str) for freq_str in avail_freq_str.split()
+ )
+ min_freq = all_avail_freq[0]
+ max_freq = all_avail_freq[-1]
+ # Calculate the frequency we are targeting.
+ target_freq = round(max_freq * freq_percent / 100)
+ # More likely it's not in the list of supported frequencies
+ # and our goal is to find the one which is less or equal.
+ # Default is min and we will try to maximize it.
+ avail_ngt_target = min_freq
+ # Find the largest not greater than the target.
+ for next_largest in reversed(all_avail_freq):
+ if next_largest <= target_freq:
+ avail_ngt_target = next_largest
+ break
+
+ max_freq_path = avail_freq_path.replace(
+ "scaling_available_frequencies", "scaling_max_freq"
+ )
+ min_freq_path = avail_freq_path.replace(
+ "scaling_available_frequencies", "scaling_min_freq"
+ )
+ # With default ignore_status=False we expect 0 status or Fatal error.
+ self.RunCommandOnDut(
+ "echo %s | tee %s %s"
+ % (avail_ngt_target, max_freq_path, min_freq_path)
+ )
+
+ def WaitCooldown(self):
+ """Wait for DUT to cool down to certain temperature."""
+ waittime = 0
+ timeout_in_sec = int(self.dut_config["cooldown_time"]) * 60
+ # Temperature from sensors come in uCelsius units.
+ temp_in_ucels = int(self.dut_config["cooldown_temp"]) * 1000
+ sleep_interval = 30
+
+ # Wait until any of two events occurs:
+ # 1. CPU cools down to a specified temperature.
+ # 2. Timeout cooldown_time expires.
+ # For the case when targeted temperature is not reached within specified
+ # timeout the benchmark is going to start with higher initial CPU temp.
+ # In the worst case it may affect test results but at the same time we
+ # guarantee the upper bound of waiting time.
+ # TODO(denik): Report (or highlight) "high" CPU temperature in test results.
+ # "high" should be calculated based on empirical data per platform.
+ # Based on such reports we can adjust CPU configuration or
+ # cooldown limits accordingly.
+ while waittime < timeout_in_sec:
+ _, temp_output, _ = self.RunCommandOnDut(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ if any(int(temp) > temp_in_ucels for temp in temp_output.split()):
+ time.sleep(sleep_interval)
+ waittime += sleep_interval
+ else:
+ # Exit the loop when:
+ # 1. Reported temp numbers from all thermal sensors do not exceed
+ # 'cooldown_temp' or
+ # 2. No data from the sensors.
+ break
+
+ self.logger.LogOutput("Cooldown wait time: %.1f min" % (waittime / 60))
+ return waittime
+
+ def DecreaseWaitTime(self):
+ """Change the ten seconds wait time for pagecycler to two seconds."""
+ FILE = (
+ "/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py"
+ )
+ ret = self.RunCommandOnDut("ls " + FILE)
+
+ if not ret:
+ sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
+ self.RunCommandOnDut(sed_command + FILE)
+
+ def StopUI(self):
+ """Stop UI on DUT."""
+ # Added "ignore_status" for the case when crosperf stops ui service which
+ # was already stopped. Command is going to fail with 1.
+ self.RunCommandOnDut("stop ui", ignore_status=True)
+
+ def StartUI(self):
+ """Start UI on DUT."""
+ # Similar to StopUI, `start ui` fails if the service is already started.
+ self.RunCommandOnDut("start ui", ignore_status=True)
+
+ def KerncmdUpdateNeeded(self, intel_pstate):
+ """Check whether kernel cmdline update is needed.
+
+ Args:
+ intel_pstate: kernel command line argument (active, passive, no_hwp)
+
+ Returns:
+ True if update is needed.
+ """
+
+ good = 0
+
+ # Check that dut platform supports hwp
+ cmd = "grep -q '^flags.*hwp' /proc/cpuinfo"
+ ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True)
+ if ret_code != good:
+ # Intel hwp is not supported, update is not needed.
+ return False
+
+ kern_cmdline_cmd = (
+ 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
+ )
+ ret_code, _, _ = self.RunCommandOnDut(
+ kern_cmdline_cmd, ignore_status=True
+ )
+ self.logger.LogOutput("grep /proc/cmdline returned %d" % ret_code)
+ if (
+ intel_pstate
+ and ret_code == good
+ or not intel_pstate
+ and ret_code != good
+ ):
+ # No need to updated cmdline if:
+ # 1. We are setting intel_pstate and we found it is already set.
+ # 2. Not using intel_pstate and it is not in cmdline.
+ return False
+
+ # Otherwise we need to update intel_pstate.
+ return True
+
+ def UpdateKerncmdIntelPstate(self, intel_pstate):
+ """Update kernel command line.
+
+ Args:
+ intel_pstate: kernel command line argument (active, passive, no_hwp)
+ """
+
+ good = 0
+
+ # First phase is to remove rootfs verification to allow cmdline change.
+ remove_verif_cmd = " ".join(
+ [
+ "/usr/share/vboot/bin/make_dev_ssd.sh",
+ "--remove_rootfs_verification",
+ "--partition %d",
+ ]
+ )
+ # Command for partition 2.
+ verif_part2_failed, _, _ = self.RunCommandOnDut(
+ remove_verif_cmd % 2, ignore_status=True
+ )
+ # Command for partition 4
+ # Some machines in the lab use partition 4 to boot from,
+ # so cmdline should be update for both partitions.
+ verif_part4_failed, _, _ = self.RunCommandOnDut(
+ remove_verif_cmd % 4, ignore_status=True
+ )
+ if verif_part2_failed or verif_part4_failed:
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline on partition %d.\n"
+ "Remove verification failed with status %d"
+ % (
+ 2 if verif_part2_failed else 4,
+ verif_part2_failed or verif_part4_failed,
+ )
+ )
+
+ self.RunCommandOnDut("reboot && exit")
+ # Give enough time for dut to complete reboot
+ # TODO(denik): Replace with the function checking machine availability.
+ time.sleep(30)
+
+ # Second phase to update intel_pstate in kernel cmdline.
+ kern_cmdline = "\n".join(
+ [
+ "tmpfile=$(mktemp)",
+ "partnumb=%d",
+ "pstate=%s",
+ # Store kernel cmdline in a temp file.
+ "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}"
+ " --save_config ${tmpfile}",
+ # Remove intel_pstate argument if present.
+ "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}",
+ # Insert intel_pstate with a new value if it is set.
+ "[[ -n ${pstate} ]] &&"
+ ' sed -i -e "s/ *$/ intel_pstate=${pstate}/" ${tmpfile}.${partnumb}',
+ # Save the change in kernel cmdline.
+ # After completion we have to reboot.
+ "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}"
+ " --set_config ${tmpfile}",
+ ]
+ )
+ kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate)
+ self.logger.LogOutput(
+ "Command to change kernel command line: %s" % kern_part2_cmdline_cmd
+ )
+ upd_part2_failed, _, _ = self.RunCommandOnDut(
+ kern_part2_cmdline_cmd, ignore_status=True
+ )
+ # Again here we are updating cmdline for partition 4
+ # in addition to partition 2. Without this some machines
+ # in the lab might fail.
+ kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate)
+ self.logger.LogOutput(
+ "Command to change kernel command line: %s" % kern_part4_cmdline_cmd
+ )
+ upd_part4_failed, _, _ = self.RunCommandOnDut(
+ kern_part4_cmdline_cmd, ignore_status=True
+ )
+ if upd_part2_failed or upd_part4_failed:
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline on partition %d.\n"
+ "intel_pstate update failed with status %d"
+ % (
+ 2 if upd_part2_failed else 4,
+ upd_part2_failed or upd_part4_failed,
+ )
+ )
+
+ self.RunCommandOnDut("reboot && exit")
+ # Wait 30s after reboot.
+ time.sleep(30)
+
+ # Verification phase.
+ # Check that cmdline was updated.
+ # Throw an exception if not.
+ kern_cmdline_cmd = (
+ 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
+ )
+ ret_code, _, _ = self.RunCommandOnDut(
+ kern_cmdline_cmd, ignore_status=True
+ )
+ if (
+ intel_pstate
+ and ret_code != good
+ or not intel_pstate
+ and ret_code == good
+ ):
+ # Kernel cmdline doesn't match input intel_pstate.
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline. "
+ "Final verification failed with status %d" % ret_code
+ )
+
+ self.logger.LogOutput("Kernel cmdline updated successfully.")
+
+ @contextmanager
+ def PauseUI(self):
+ """Stop UI before and Start UI after the context block.
+
+ Context manager will make sure UI is always resumed at the end.
+ """
+ self.StopUI()
+ try:
+ yield
+
+ finally:
+ self.StartUI()
+
+ def SetupDevice(self):
+ """Setup device to get it ready for testing.
+
+ @Returns Wait time of cool down for this benchmark run.
+ """
+ self.logger.LogOutput("Update kernel cmdline if necessary and reboot")
+ intel_pstate = self.dut_config["intel_pstate"]
+ if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate):
+ self.UpdateKerncmdIntelPstate(intel_pstate)
+
+ wait_time = 0
+ # Pause UI while configuring the DUT.
+ # This will accelerate setup (waiting for cooldown has x10 drop)
+ # and help to reset a Chrome state left after the previous test.
+ with self.PauseUI():
+ # Unless the user turns on ASLR in the flag, we first disable ASLR
+ # before running the benchmarks
+ if not self.dut_config["enable_aslr"]:
+ self.DisableASLR()
+
+ # CPU usage setup comes first where we enable/disable cores.
+ self.SetupCpuUsage()
+ cpu_online_status = self.GetCpuOnline()
+ # List of online cores of type int (core number).
+ online_cores = [
+ core for core, status in cpu_online_status.items() if status
+ ]
+ if self.dut_config["cooldown_time"]:
+ # Setup power conservative mode for effective cool down.
+ # Set ignore status since powersave may no be available
+ # on all platforms and we are going to handle it.
+ ret = self.SetCpuGovernor("powersave", ignore_status=True)
+ if ret:
+ # "powersave" is not available, use "ondemand".
+ # Still not a fatal error if it fails.
+ ret = self.SetCpuGovernor("ondemand", ignore_status=True)
+ # TODO(denik): Run comparison test for 'powersave' and 'ondemand'
+ # on scarlet and kevin64.
+ # We might have to consider reducing freq manually to the min
+ # if it helps to reduce waiting time.
+ wait_time = self.WaitCooldown()
+
+ # Setup CPU governor for the benchmark run.
+ # It overwrites the previous governor settings.
+ governor = self.dut_config["governor"]
+ # FIXME(denik): Pass online cores to governor setup.
+ self.SetCpuGovernor(governor)
+
+ # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup
+ # since governor may change:
+ # - frequency;
+ # - turbo/boost.
+ self.DisableTurbo()
+ self.SetupCpuFreq(online_cores)
+
+ self.DecreaseWaitTime()
+ # FIXME(denik): Currently we are not recovering the previous cpufreq
+ # settings since we do reboot/setup every time anyway.
+ # But it may change in the future and then we have to recover the
+ # settings.
+ return wait_time
diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py
index 12a7081..d7339e2 100755
--- a/cros_utils/device_setup_utils_unittest.py
+++ b/cros_utils/device_setup_utils_unittest.py
@@ -1,16 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for device_setup_utils."""
-from __future__ import print_function
import time
-
import unittest
from unittest import mock
@@ -18,6 +16,7 @@
from cros_utils import logger
from cros_utils.device_setup_utils import DutWrapper
+
BIG_LITTLE_CPUINFO = """processor : 0
model name : ARMv8 Processor rev 4 (v8l)
BogoMIPS : 48.00
@@ -94,609 +93,713 @@
class DutWrapperTest(unittest.TestCase):
- """Class of DutWrapper test."""
- real_logger = logger.GetLogger()
+ """Class of DutWrapper test."""
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- mock_logger = mock.Mock(spec=logger.Logger)
+ real_logger = logger.GetLogger()
- def __init__(self, *args, **kwargs):
- super(DutWrapperTest, self).__init__(*args, **kwargs)
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ mock_logger = mock.Mock(spec=logger.Logger)
- def setUp(self):
- self.dw = DutWrapper(
- '/tmp/chromeos',
- 'lumpy.cros2',
- log_level='verbose',
- logger=self.mock_logger,
- ce=self.mock_cmd_exec,
- dut_config={})
+ def __init__(self, *args, **kwargs):
+ super(DutWrapperTest, self).__init__(*args, **kwargs)
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_run_command_on_dut(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- mock_cros_runcmd.return_value = (0, '', '')
- mock_cros_runcmd.assert_not_called()
- self.dw.RunCommandOnDut('run command;')
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
+ def setUp(self):
+ self.dw = DutWrapper(
+ "/tmp/chromeos",
+ "lumpy.cros2",
+ log_level="verbose",
+ logger=self.mock_logger,
+ ce=self.mock_cmd_exec,
+ dut_config={},
+ )
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_dut_wrapper_fatal_error(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- # Command returns error 1.
- mock_cros_runcmd.return_value = (1, '', 'Error!')
- mock_cros_runcmd.assert_not_called()
- self.dw.RunCommandOnDut('run command;')
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
- # Error status causes log fatal.
- self.assertEqual(
- self.mock_logger.method_calls[-1],
- mock.call.LogFatal('Command execution on DUT lumpy.cros2 failed.\n'
- 'Failing command: run command;\nreturned 1\n'
- 'Error message: Error!'))
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_run_command_on_dut(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ mock_cros_runcmd.return_value = (0, "", "")
+ mock_cros_runcmd.assert_not_called()
+ self.dw.RunCommandOnDut("run command;")
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_dut_wrapper_ignore_error(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- # Command returns error 1.
- mock_cros_runcmd.return_value = (1, '', 'Error!')
- self.dw.RunCommandOnDut('run command;', ignore_status=True)
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
- # Error status is not fatal. LogError records the error message.
- self.assertEqual(
- self.mock_logger.method_calls[-1],
- mock.call.LogError('Command execution on DUT lumpy.cros2 failed.\n'
- 'Failing command: run command;\nreturned 1\n'
- 'Error message: Error!\n'
- '(Failure is considered non-fatal. Continue.)'))
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_dut_wrapper_fatal_error(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ # Command returns error 1.
+ mock_cros_runcmd.return_value = (1, "", "Error!")
+ mock_cros_runcmd.assert_not_called()
+ self.dw.RunCommandOnDut("run command;")
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
+ # Error status causes log fatal.
+ self.assertEqual(
+ self.mock_logger.method_calls[-1],
+ mock.call.LogFatal(
+ "Command execution on DUT lumpy.cros2 failed.\n"
+ "Failing command: run command;\nreturned 1\n"
+ "Error message: Error!"
+ ),
+ )
- def test_disable_aslr(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.DisableASLR()
- # pyformat: disable
- set_cpu_cmd = ('set -e; '
- 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
- ' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi')
- self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_dut_wrapper_ignore_error(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ # Command returns error 1.
+ mock_cros_runcmd.return_value = (1, "", "Error!")
+ self.dw.RunCommandOnDut("run command;", ignore_status=True)
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
+ # Error status is not fatal. LogError records the error message.
+ self.assertEqual(
+ self.mock_logger.method_calls[-1],
+ mock.call.LogError(
+ "Command execution on DUT lumpy.cros2 failed.\n"
+ "Failing command: run command;\nreturned 1\n"
+ "Error message: Error!\n"
+ "(Failure is considered non-fatal. Continue.)"
+ ),
+ )
- def test_set_cpu_governor(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.SetCpuGovernor('new_governor', ignore_status=False)
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is offline.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'new_governor', ignore_status=False)
+ def test_disable_aslr(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.DisableASLR()
+ # pyformat: disable
+ set_cpu_cmd = (
+ "set -e; "
+ "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then "
+ " echo 0 > /proc/sys/kernel/randomize_va_space; "
+ "fi"
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
- def test_set_cpu_governor_propagate_error(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.'))
- self.dw.SetCpuGovernor('non-exist_governor')
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is not online.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- # By default error status is fatal.
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'non-exist_governor', ignore_status=False)
+ def test_set_cpu_governor(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.SetCpuGovernor("new_governor", ignore_status=False)
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is offline.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "new_governor", ignore_status=False
+ )
- def test_set_cpu_governor_ignore_status(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.'))
- ret_code = self.dw.SetCpuGovernor('non-exist_governor', ignore_status=True)
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is not online.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'non-exist_governor', ignore_status=True)
- self.assertEqual(ret_code, 1)
+ def test_set_cpu_governor_propagate_error(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error."))
+ self.dw.SetCpuGovernor("non-exist_governor")
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is not online.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ # By default error status is fatal.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "non-exist_governor", ignore_status=False
+ )
- def test_disable_turbo(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.DisableTurbo()
- set_cpu_cmd = (
- # Disable Turbo in Intel pstate driver
- 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
- ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then '
- ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
- ' fi; '
- 'fi; ')
- self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
+ def test_set_cpu_governor_ignore_status(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error."))
+ ret_code = self.dw.SetCpuGovernor(
+ "non-exist_governor", ignore_status=True
+ )
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is not online.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "non-exist_governor", ignore_status=True
+ )
+ self.assertEqual(ret_code, 1)
- def test_get_cpu_online_two(self):
- """Test one digit CPU #."""
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(0, '/sys/devices/system/cpu/cpu0/online 0\n'
- '/sys/devices/system/cpu/cpu1/online 1\n', ''))
- cpu_online = self.dw.GetCpuOnline()
- self.assertEqual(cpu_online, {0: 0, 1: 1})
+ def test_disable_turbo(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.DisableTurbo()
+ set_cpu_cmd = (
+ # Disable Turbo in Intel pstate driver
+ "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then "
+ " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then "
+ " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; "
+ " fi; "
+ "fi; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
- def test_get_cpu_online_twelve(self):
- """Test two digit CPU #."""
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(0, '/sys/devices/system/cpu/cpu0/online 1\n'
- '/sys/devices/system/cpu/cpu1/online 0\n'
- '/sys/devices/system/cpu/cpu10/online 1\n'
- '/sys/devices/system/cpu/cpu11/online 1\n'
- '/sys/devices/system/cpu/cpu2/online 1\n'
- '/sys/devices/system/cpu/cpu3/online 0\n'
- '/sys/devices/system/cpu/cpu4/online 1\n'
- '/sys/devices/system/cpu/cpu5/online 0\n'
- '/sys/devices/system/cpu/cpu6/online 1\n'
- '/sys/devices/system/cpu/cpu7/online 0\n'
- '/sys/devices/system/cpu/cpu8/online 1\n'
- '/sys/devices/system/cpu/cpu9/online 0\n', ''))
- cpu_online = self.dw.GetCpuOnline()
- self.assertEqual(cpu_online, {
- 0: 1,
- 1: 0,
- 2: 1,
- 3: 0,
- 4: 1,
- 5: 0,
- 6: 1,
- 7: 0,
- 8: 1,
- 9: 0,
- 10: 1,
- 11: 1
- })
+ def test_get_cpu_online_two(self):
+ """Test one digit CPU #."""
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(
+ 0,
+ "/sys/devices/system/cpu/cpu0/online 0\n"
+ "/sys/devices/system/cpu/cpu1/online 1\n",
+ "",
+ )
+ )
+ cpu_online = self.dw.GetCpuOnline()
+ self.assertEqual(cpu_online, {0: 0, 1: 1})
- def test_get_cpu_online_no_output(self):
- """Test error case, no output."""
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- with self.assertRaises(AssertionError):
- self.dw.GetCpuOnline()
+ def test_get_cpu_online_twelve(self):
+ """Test two digit CPU #."""
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(
+ 0,
+ "/sys/devices/system/cpu/cpu0/online 1\n"
+ "/sys/devices/system/cpu/cpu1/online 0\n"
+ "/sys/devices/system/cpu/cpu10/online 1\n"
+ "/sys/devices/system/cpu/cpu11/online 1\n"
+ "/sys/devices/system/cpu/cpu2/online 1\n"
+ "/sys/devices/system/cpu/cpu3/online 0\n"
+ "/sys/devices/system/cpu/cpu4/online 1\n"
+ "/sys/devices/system/cpu/cpu5/online 0\n"
+ "/sys/devices/system/cpu/cpu6/online 1\n"
+ "/sys/devices/system/cpu/cpu7/online 0\n"
+ "/sys/devices/system/cpu/cpu8/online 1\n"
+ "/sys/devices/system/cpu/cpu9/online 0\n",
+ "",
+ )
+ )
+ cpu_online = self.dw.GetCpuOnline()
+ self.assertEqual(
+ cpu_online,
+ {
+ 0: 1,
+ 1: 0,
+ 2: 1,
+ 3: 0,
+ 4: 1,
+ 5: 0,
+ 6: 1,
+ 7: 0,
+ 8: 1,
+ 9: 0,
+ 10: 1,
+ 11: 1,
+ },
+ )
- def test_get_cpu_online_command_error(self):
- """Test error case, command error."""
- self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError)
- with self.assertRaises(AssertionError):
- self.dw.GetCpuOnline()
+ def test_get_cpu_online_no_output(self):
+ """Test error case, no output."""
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ with self.assertRaises(AssertionError):
+ self.dw.GetCpuOnline()
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'armv7l', ''))
- self.dw.dut_config['cpu_usage'] = 'little_only'
- self.dw.SetupCpuUsage()
- self.dw.SetupArmCores.assert_called_once_with()
+ def test_get_cpu_online_command_error(self):
+ """Test error case, command error."""
+ self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError)
+ with self.assertRaises(AssertionError):
+ self.dw.GetCpuOnline()
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'aarch64', ''))
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupCpuUsage()
- self.dw.SetupArmCores.assert_called_once_with()
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "armv7l", ""))
+ self.dw.dut_config["cpu_usage"] = "little_only"
+ self.dw.SetupCpuUsage()
+ self.dw.SetupArmCores.assert_called_once_with()
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', ''))
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupCpuUsage()
- # Check that SetupArmCores not called with invalid setup.
- self.dw.SetupArmCores.assert_not_called()
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "aarch64", ""))
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupCpuUsage()
+ self.dw.SetupArmCores.assert_called_once_with()
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', ''))
- self.dw.dut_config['cpu_usage'] = 'all'
- self.dw.SetupCpuUsage()
- # Check that SetupArmCores not called in general case.
- self.dw.SetupArmCores.assert_not_called()
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", ""))
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupCpuUsage()
+ # Check that SetupArmCores not called with invalid setup.
+ self.dw.SetupArmCores.assert_not_called()
- def test_setup_arm_cores_big_on_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- self.dw.RunCommandOnDut.assert_called_with(
- 'echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; '
- 'echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online')
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", ""))
+ self.dw.dut_config["cpu_usage"] = "all"
+ self.dw.SetupCpuUsage()
+ # Check that SetupArmCores not called in general case.
+ self.dw.SetupArmCores.assert_not_called()
- def test_setup_arm_cores_little_on_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'little_only'
- self.dw.SetupArmCores()
- self.dw.RunCommandOnDut.assert_called_with(
- 'echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; '
- 'echo 0 | tee /sys/devices/system/cpu/cpu{2}/online')
+ def test_setup_arm_cores_big_on_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ self.dw.RunCommandOnDut.assert_called_with(
+ "echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; "
+ "echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online"
+ )
- def test_setup_arm_cores_invalid_config(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, LITTLE_ONLY_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying
- # to use 'big_only' on a platform with all little cores.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
+ def test_setup_arm_cores_little_on_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "little_only"
+ self.dw.SetupArmCores()
+ self.dw.RunCommandOnDut.assert_called_with(
+ "echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; "
+ "echo 0 | tee /sys/devices/system/cpu/cpu{2}/online"
+ )
- def test_setup_arm_cores_not_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, NOT_BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying
- # to use 'big_only' on a platform w/o support of big/little.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
+ def test_setup_arm_cores_invalid_config(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, LITTLE_ONLY_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying
+ # to use 'big_only' on a platform with all little cores.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
- def test_setup_arm_cores_unsupported_cpu_usage(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'exclusive_cores'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying to use
- # 'exclusive_cores' on ARM CPU setup.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
+ def test_setup_arm_cores_not_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, NOT_BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying
+ # to use 'big_only' on a platform w/o support of big/little.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
- def test_setup_cpu_freq_single_full(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 100
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 10 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
+ def test_setup_arm_cores_unsupported_cpu_usage(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "exclusive_cores"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying to use
+ # 'exclusive_cores' on ARM CPU setup.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
- def test_setup_cpu_freq_middle(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 60
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 6 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
+ def test_setup_cpu_freq_single_full(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 100
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 10 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
- def test_setup_cpu_freq_lowest(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 0
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 1 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
+ def test_setup_cpu_freq_middle(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 60
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 6 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
- def test_setup_cpu_freq_multiple_middle(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n'
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- (0, '1 4 6 8 10 12 14 16 18 20', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 70
- self.dw.SetupCpuFreq(online)
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 5)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args_list[2],
- mock.call('echo 7 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args_list[4],
- mock.call('echo 14 | tee '
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq'))
+ def test_setup_cpu_freq_lowest(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 0
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 1 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
- def test_setup_cpu_freq_no_scaling_available(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(2, '', 'No such file or directory'))
- self.dw.dut_config['cpu_freq_pct'] = 50
- self.dw.SetupCpuFreq(online)
- self.dw.RunCommandOnDut.assert_called_once()
- self.assertNotRegex(self.dw.RunCommandOnDut.call_args_list[0][0][0],
- '^echo.*scaling_max_freq$')
+ def test_setup_cpu_freq_multiple_middle(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n"
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ (0, "1 4 6 8 10 12 14 16 18 20", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 70
+ self.dw.SetupCpuFreq(online)
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 5)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args_list[2],
+ mock.call(
+ "echo 7 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args_list[4],
+ mock.call(
+ "echo 14 | tee "
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq"
+ ),
+ )
- def test_setup_cpu_freq_multiple_no_access(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n'
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 4 6 8 10 12 14 16 18 20', ''),
- AssertionError(),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 30
- # Error status causes log fatal.
- with self.assertRaises(AssertionError):
- self.dw.SetupCpuFreq(online)
+ def test_setup_cpu_freq_no_scaling_available(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(2, "", "No such file or directory")
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 50
+ self.dw.SetupCpuFreq(online)
+ self.dw.RunCommandOnDut.assert_called_once()
+ self.assertNotRegex(
+ self.dw.RunCommandOnDut.call_args_list[0][0][0],
+ "^echo.*scaling_max_freq$",
+ )
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_nowait(self, mock_sleep):
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '39000', ''))
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Send command to DUT only once to check temperature
- # and make sure it does not exceed the threshold.
- self.dw.RunCommandOnDut.assert_called_once()
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
+ def test_setup_cpu_freq_multiple_no_access(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n"
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 4 6 8 10 12 14 16 18 20", ""),
+ AssertionError(),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 30
+ # Error status causes log fatal.
+ with self.assertRaises(AssertionError):
+ self.dw.SetupCpuFreq(online)
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait_once(self, mock_sleep):
- """Wait one iteration for cooldown.
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_nowait(self, mock_sleep):
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "39000", ""))
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Send command to DUT only once to check temperature
+ # and make sure it does not exceed the threshold.
+ self.dw.RunCommandOnDut.assert_called_once()
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
- Set large enough timeout and changing temperature
- output. Make sure it exits when expected value
- received.
- Expect that WaitCooldown check temp twice.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(0, '41000',
- ''), (0, '39999', '')])
- self.dw.dut_config['cooldown_time'] = 100
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait_once(self, mock_sleep):
+ """Wait one iteration for cooldown.
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait(self, mock_sleep):
- """Test exit by timeout.
+ Set large enough timeout and changing temperature
+ output. Make sure it exits when expected value
+ received.
+ Expect that WaitCooldown check temp twice.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(0, "41000", ""), (0, "39999", "")]
+ )
+ self.dw.dut_config["cooldown_time"] = 100
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
- Send command to DUT checking the temperature and
- check repeatedly until timeout goes off.
- Output from temperature sensor never changes.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '41000', ''))
- self.dw.dut_config['cooldown_time'] = 60
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertGreater(self.dw.RunCommandOnDut.call_count, 2)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait(self, mock_sleep):
+ """Test exit by timeout.
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait_multtemp(self, mock_sleep):
- """Wait until all temps go down.
+ Send command to DUT checking the temperature and
+ check repeatedly until timeout goes off.
+ Output from temperature sensor never changes.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "41000", ""))
+ self.dw.dut_config["cooldown_time"] = 60
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertGreater(self.dw.RunCommandOnDut.call_count, 2)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
- Set large enough timeout and changing temperature
- output. Make sure it exits when expected value
- for all temperatures received.
- Expect 3 checks.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, '41000\n20000\n30000\n45000', ''),
- (0, '39000\n20000\n30000\n41000', ''),
- (0, '39000\n20000\n30000\n31000', ''),
- ])
- self.dw.dut_config['cooldown_time'] = 100
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 3)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait_multtemp(self, mock_sleep):
+ """Wait until all temps go down.
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_error(self, mock_sleep):
- """Handle error status.
+ Set large enough timeout and changing temperature
+ output. Make sure it exits when expected value
+ for all temperatures received.
+ Expect 3 checks.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, "41000\n20000\n30000\n45000", ""),
+ (0, "39000\n20000\n30000\n41000", ""),
+ (0, "39000\n20000\n30000\n31000", ""),
+ ]
+ )
+ self.dw.dut_config["cooldown_time"] = 100
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 3)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
- Any error should be considered non-fatal.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (1, '39000\n20000\n30000\n41000', 'Thermal error'),
- (1, '39000\n20000\n30000\n31000', 'Thermal error'),
- ])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
- # Check that we are waiting even when an error is returned
- # as soon as data is coming.
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_error(self, mock_sleep):
+ """Handle error status.
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_no_output(self, mock_sleep):
- """Handle no output.
+ Any error should be considered non-fatal.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (1, "39000\n20000\n30000\n41000", "Thermal error"),
+ (1, "39000\n20000\n30000\n31000", "Thermal error"),
+ ]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
+ # Check that we are waiting even when an error is returned
+ # as soon as data is coming.
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
- Check handling of empty stdout.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '', 'Thermal error')])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- # No wait.
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_no_output(self, mock_sleep):
+ """Handle no output.
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_ws_output(self, mock_sleep):
- """Handle whitespace output.
+ Check handling of empty stdout.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(1, "", "Thermal error")]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ # No wait.
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
- Check handling of whitespace only.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '\n',
- 'Thermal error')])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- # No wait.
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_ws_output(self, mock_sleep):
+ """Handle whitespace output.
- def test_stop_ui(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.StopUI()
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'stop ui', ignore_status=True)
+ Check handling of whitespace only.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(1, "\n", "Thermal error")]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ # No wait.
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
- def test_start_ui(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.StartUI()
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'start ui', ignore_status=True)
+ def test_stop_ui(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.StopUI()
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "stop ui", ignore_status=True
+ )
- def test_setup_device(self):
+ def test_start_ui(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.StartUI()
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "start ui", ignore_status=True
+ )
- def FakeRunner(command, ignore_status=False):
- # pylint fix for unused variable.
- del command, ignore_status
- return 0, '', ''
+ def test_setup_device(self):
+ def FakeRunner(command, ignore_status=False):
+ # pylint fix for unused variable.
+ del command, ignore_status
+ return 0, "", ""
- def SetupMockFunctions():
- self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner)
- self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True)
- self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0)
- self.dw.DisableASLR = mock.Mock(return_value=0)
- self.dw.SetupCpuUsage = mock.Mock(return_value=0)
- self.dw.SetupCpuFreq = mock.Mock(return_value=0)
- self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0})
- self.dw.SetCpuGovernor = mock.Mock(return_value=0)
- self.dw.DisableTurbo = mock.Mock(return_value=0)
- self.dw.StopUI = mock.Mock(return_value=0)
- self.dw.StartUI = mock.Mock(return_value=0)
- self.dw.WaitCooldown = mock.Mock(return_value=0)
- self.dw.DecreaseWaitTime = mock.Mock(return_value=0)
+ def SetupMockFunctions():
+ self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner)
+ self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True)
+ self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0)
+ self.dw.DisableASLR = mock.Mock(return_value=0)
+ self.dw.SetupCpuUsage = mock.Mock(return_value=0)
+ self.dw.SetupCpuFreq = mock.Mock(return_value=0)
+ self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0})
+ self.dw.SetCpuGovernor = mock.Mock(return_value=0)
+ self.dw.DisableTurbo = mock.Mock(return_value=0)
+ self.dw.StopUI = mock.Mock(return_value=0)
+ self.dw.StartUI = mock.Mock(return_value=0)
+ self.dw.WaitCooldown = mock.Mock(return_value=0)
+ self.dw.DecreaseWaitTime = mock.Mock(return_value=0)
- self.dw.dut_config['enable_aslr'] = False
- self.dw.dut_config['cooldown_time'] = 0
- self.dw.dut_config['governor'] = 'fake_governor'
- self.dw.dut_config['cpu_freq_pct'] = 65
- self.dw.dut_config['intel_pstate'] = 'no_hwp'
+ self.dw.dut_config["enable_aslr"] = False
+ self.dw.dut_config["cooldown_time"] = 0
+ self.dw.dut_config["governor"] = "fake_governor"
+ self.dw.dut_config["cpu_freq_pct"] = 65
+ self.dw.dut_config["intel_pstate"] = "no_hwp"
- SetupMockFunctions()
- self.dw.SetupDevice()
+ SetupMockFunctions()
+ self.dw.SetupDevice()
- self.dw.KerncmdUpdateNeeded.assert_called_once()
- self.dw.UpdateKerncmdIntelPstate.assert_called_once()
- self.dw.DisableASLR.assert_called_once()
- self.dw.SetupCpuUsage.assert_called_once_with()
- self.dw.SetupCpuFreq.assert_called_once_with([0, 1])
- self.dw.GetCpuOnline.assert_called_once_with()
- self.dw.SetCpuGovernor.assert_called_once_with('fake_governor')
- self.dw.DisableTurbo.assert_called_once_with()
- self.dw.DecreaseWaitTime.assert_called_once_with()
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
- self.dw.WaitCooldown.assert_not_called()
+ self.dw.KerncmdUpdateNeeded.assert_called_once()
+ self.dw.UpdateKerncmdIntelPstate.assert_called_once()
+ self.dw.DisableASLR.assert_called_once()
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ self.dw.SetupCpuFreq.assert_called_once_with([0, 1])
+ self.dw.GetCpuOnline.assert_called_once_with()
+ self.dw.SetCpuGovernor.assert_called_once_with("fake_governor")
+ self.dw.DisableTurbo.assert_called_once_with()
+ self.dw.DecreaseWaitTime.assert_called_once_with()
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
+ self.dw.WaitCooldown.assert_not_called()
- # Test SetupDevice with cooldown
- self.dw.dut_config['cooldown_time'] = 10
+ # Test SetupDevice with cooldown
+ self.dw.dut_config["cooldown_time"] = 10
- SetupMockFunctions()
- self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1})
+ SetupMockFunctions()
+ self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1})
- self.dw.SetupDevice()
+ self.dw.SetupDevice()
- self.dw.WaitCooldown.assert_called_once_with()
- self.dw.DisableASLR.assert_called_once()
- self.dw.DisableTurbo.assert_called_once_with()
- self.dw.SetupCpuUsage.assert_called_once_with()
- self.dw.SetupCpuFreq.assert_called_once_with([1])
- self.dw.SetCpuGovernor.assert_called()
- self.dw.GetCpuOnline.assert_called_once_with()
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
- self.assertGreater(self.dw.SetCpuGovernor.call_count, 1)
- self.assertEqual(self.dw.SetCpuGovernor.call_args,
- mock.call('fake_governor'))
+ self.dw.WaitCooldown.assert_called_once_with()
+ self.dw.DisableASLR.assert_called_once()
+ self.dw.DisableTurbo.assert_called_once_with()
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ self.dw.SetupCpuFreq.assert_called_once_with([1])
+ self.dw.SetCpuGovernor.assert_called()
+ self.dw.GetCpuOnline.assert_called_once_with()
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
+ self.assertGreater(self.dw.SetCpuGovernor.call_count, 1)
+ self.assertEqual(
+ self.dw.SetCpuGovernor.call_args, mock.call("fake_governor")
+ )
- # Test SetupDevice with cooldown
- SetupMockFunctions()
- self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError())
+ # Test SetupDevice with cooldown
+ SetupMockFunctions()
+ self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError())
- with self.assertRaises(RuntimeError):
- self.dw.SetupDevice()
+ with self.assertRaises(RuntimeError):
+ self.dw.SetupDevice()
- # This call injected an exception.
- self.dw.SetupCpuUsage.assert_called_once_with()
- # Calls following the expeption are skipped.
- self.dw.WaitCooldown.assert_not_called()
- self.dw.DisableTurbo.assert_not_called()
- self.dw.SetupCpuFreq.assert_not_called()
- self.dw.SetCpuGovernor.assert_not_called()
- self.dw.GetCpuOnline.assert_not_called()
- # Check that Stop/Start UI are always called.
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
+ # This call injected an exception.
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ # Calls following the expeption are skipped.
+ self.dw.WaitCooldown.assert_not_called()
+ self.dw.DisableTurbo.assert_not_called()
+ self.dw.SetupCpuFreq.assert_not_called()
+ self.dw.SetCpuGovernor.assert_not_called()
+ self.dw.GetCpuOnline.assert_not_called()
+ # Check that Stop/Start UI are always called.
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py
index df8afbc..ccf4c1b 100755
--- a/cros_utils/email_sender.py
+++ b/cros_utils/email_sender.py
@@ -1,259 +1,314 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to send email either through SMTP or SendGMR."""
-from __future__ import print_function
import base64
import contextlib
import datetime
+from email import encoders as Encoders
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
import getpass
import json
import os
import smtplib
import tempfile
-from email import encoders as Encoders
-from email.mime.base import MIMEBase
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
from cros_utils import command_executer
-X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_emails'
+
+X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_emails"
@contextlib.contextmanager
def AtomicallyWriteFile(file_path):
- temp_path = file_path + '.in_progress'
- try:
- with open(temp_path, 'w') as f:
- yield f
- os.rename(temp_path, file_path)
- except:
- os.remove(temp_path)
- raise
+ temp_path = file_path + ".in_progress"
+ try:
+ with open(temp_path, "w") as f:
+ yield f
+ os.rename(temp_path, file_path)
+ except:
+ os.remove(temp_path)
+ raise
class EmailSender(object):
- """Utility class to send email through SMTP or SendGMR."""
+ """Utility class to send email through SMTP or SendGMR."""
- class Attachment(object):
- """Small class to keep track of attachment info."""
+ class Attachment(object):
+ """Small class to keep track of attachment info."""
- def __init__(self, name, content):
- self.name = name
- self.content = content
+ def __init__(self, name, content):
+ self.name = name
+ self.content = content
- def SendX20Email(self,
- subject,
- identifier,
- well_known_recipients=(),
- direct_recipients=(),
- text_body=None,
- html_body=None):
- """Enqueues an email in our x20 outbox.
+ def SendX20Email(
+ self,
+ subject,
+ identifier,
+ well_known_recipients=(),
+ direct_recipients=(),
+ text_body=None,
+ html_body=None,
+ ):
+ """Enqueues an email in our x20 outbox.
- These emails ultimately get sent by the machinery in
- //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This
- kind of sending is intended for accounts that don't have smtp or gmr access
- (e.g., role accounts), but can be used by anyone with x20 access.
+ These emails ultimately get sent by the machinery in
+ //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This
+ kind of sending is intended for accounts that don't have smtp or gmr access
+ (e.g., role accounts), but can be used by anyone with x20 access.
- All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`.
+ All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`.
- Args:
- subject: email subject. Must be nonempty.
- identifier: email identifier, or the text that lands after the `+` in the
- "From" email address. Must be nonempty.
- well_known_recipients: a list of well-known recipients for the email.
- These are translated into addresses by our mailer.
- Current potential values for this are ('detective',
- 'cwp-team', 'cros-team', 'mage'). Either this or
- direct_recipients must be a nonempty list.
- direct_recipients: @google.com emails to send addresses to. Either this
- or well_known_recipients must be a nonempty list.
- text_body: a 'text/plain' email body to send. Either this or html_body
- must be a nonempty string. Both may be specified
- html_body: a 'text/html' email body to send. Either this or text_body
- must be a nonempty string. Both may be specified
- """
- # `str`s act a lot like tuples/lists. Ensure that we're not accidentally
- # iterating over one of those (or anything else that's sketchy, for that
- # matter).
- if not isinstance(well_known_recipients, (tuple, list)):
- raise ValueError('`well_known_recipients` is unexpectedly a %s' %
- type(well_known_recipients))
+ Args:
+ subject: email subject. Must be nonempty.
+ identifier: email identifier, or the text that lands after the `+` in the
+ "From" email address. Must be nonempty.
+ well_known_recipients: a list of well-known recipients for the email.
+ These are translated into addresses by our mailer.
+ Current potential values for this are ('detective',
+ 'cwp-team', 'cros-team', 'mage'). Either this or
+ direct_recipients must be a nonempty list.
+ direct_recipients: @google.com emails to send addresses to. Either this
+ or well_known_recipients must be a nonempty list.
+ text_body: a 'text/plain' email body to send. Either this or html_body
+ must be a nonempty string. Both may be specified
+ html_body: a 'text/html' email body to send. Either this or text_body
+ must be a nonempty string. Both may be specified
+ """
+ # `str`s act a lot like tuples/lists. Ensure that we're not accidentally
+ # iterating over one of those (or anything else that's sketchy, for that
+ # matter).
+ if not isinstance(well_known_recipients, (tuple, list)):
+ raise ValueError(
+ "`well_known_recipients` is unexpectedly a %s"
+ % type(well_known_recipients)
+ )
- if not isinstance(direct_recipients, (tuple, list)):
- raise ValueError('`direct_recipients` is unexpectedly a %s' %
- type(direct_recipients))
+ if not isinstance(direct_recipients, (tuple, list)):
+ raise ValueError(
+ "`direct_recipients` is unexpectedly a %s"
+ % type(direct_recipients)
+ )
- if not subject or not identifier:
- raise ValueError('both `subject` and `identifier` must be nonempty')
+ if not subject or not identifier:
+ raise ValueError("both `subject` and `identifier` must be nonempty")
- if not (well_known_recipients or direct_recipients):
- raise ValueError('either `well_known_recipients` or `direct_recipients` '
- 'must be specified')
+ if not (well_known_recipients or direct_recipients):
+ raise ValueError(
+ "either `well_known_recipients` or `direct_recipients` "
+ "must be specified"
+ )
- for recipient in direct_recipients:
- if not recipient.endswith('@google.com'):
- raise ValueError('All recipients must end with @google.com')
+ for recipient in direct_recipients:
+ if not recipient.endswith("@google.com"):
+ raise ValueError("All recipients must end with @google.com")
- if not (text_body or html_body):
- raise ValueError('either `text_body` or `html_body` must be specified')
+ if not (text_body or html_body):
+ raise ValueError(
+ "either `text_body` or `html_body` must be specified"
+ )
- email_json = {
- 'email_identifier': identifier,
- 'subject': subject,
- }
+ email_json = {
+ "email_identifier": identifier,
+ "subject": subject,
+ }
- if well_known_recipients:
- email_json['well_known_recipients'] = well_known_recipients
+ if well_known_recipients:
+ email_json["well_known_recipients"] = well_known_recipients
- if direct_recipients:
- email_json['direct_recipients'] = direct_recipients
+ if direct_recipients:
+ email_json["direct_recipients"] = direct_recipients
- if text_body:
- email_json['body'] = text_body
+ if text_body:
+ email_json["body"] = text_body
- if html_body:
- email_json['html_body'] = html_body
+ if html_body:
+ email_json["html_body"] = html_body
- # The name of this has two parts:
- # - An easily sortable time, to provide uniqueness and let our emailer
- # send things in the order they were put into the outbox.
- # - 64 bits of entropy, so two racing email sends don't clobber the same
- # file.
- now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z'
- entropy = base64.urlsafe_b64encode(os.getrandom(8))
- entropy_str = entropy.rstrip(b'=').decode('utf-8')
- result_path = os.path.join(X20_PATH, now + '_' + entropy_str + '.json')
+ # The name of this has two parts:
+ # - An easily sortable time, to provide uniqueness and let our emailer
+ # send things in the order they were put into the outbox.
+ # - 64 bits of entropy, so two racing email sends don't clobber the same
+ # file.
+ now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z"
+ entropy = base64.urlsafe_b64encode(os.getrandom(8))
+ entropy_str = entropy.rstrip(b"=").decode("utf-8")
+ result_path = os.path.join(X20_PATH, now + "_" + entropy_str + ".json")
- with AtomicallyWriteFile(result_path) as f:
- json.dump(email_json, f)
+ with AtomicallyWriteFile(result_path) as f:
+ json.dump(email_json, f)
- def SendEmail(self,
+ def SendEmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc=None,
+ email_bcc=None,
+ email_from=None,
+ msg_type="plain",
+ attachments=None,
+ ):
+ """Choose appropriate email method and call it."""
+ if os.path.exists("/usr/bin/sendgmr"):
+ self.SendGMREmail(
email_to,
subject,
text_to_send,
- email_cc=None,
- email_bcc=None,
- email_from=None,
- msg_type='plain',
- attachments=None):
- """Choose appropriate email method and call it."""
- if os.path.exists('/usr/bin/sendgmr'):
- self.SendGMREmail(email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments)
- else:
- self.SendSMTPEmail(email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments)
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ )
+ else:
+ self.SendSMTPEmail(
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ )
- def SendSMTPEmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments):
- """Send email via standard smtp mail."""
- # Email summary to the current user.
- msg = MIMEMultipart()
+ def SendSMTPEmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ ):
+ """Send email via standard smtp mail."""
+ # Email summary to the current user.
+ msg = MIMEMultipart()
- if not email_from:
- email_from = os.path.basename(__file__)
+ if not email_from:
+ email_from = os.path.basename(__file__)
- msg['To'] = ','.join(email_to)
- msg['Subject'] = subject
+ msg["To"] = ",".join(email_to)
+ msg["Subject"] = subject
- if email_from:
- msg['From'] = email_from
- if email_cc:
- msg['CC'] = ','.join(email_cc)
- email_to += email_cc
- if email_bcc:
- msg['BCC'] = ','.join(email_bcc)
- email_to += email_bcc
+ if email_from:
+ msg["From"] = email_from
+ if email_cc:
+ msg["CC"] = ",".join(email_cc)
+ email_to += email_cc
+ if email_bcc:
+ msg["BCC"] = ",".join(email_bcc)
+ email_to += email_bcc
- msg.attach(MIMEText(text_to_send, msg_type))
- if attachments:
- for attachment in attachments:
- part = MIMEBase('application', 'octet-stream')
- part.set_payload(attachment.content)
- Encoders.encode_base64(part)
- part.add_header('Content-Disposition',
- 'attachment; filename="%s"' % attachment.name)
- msg.attach(part)
+ msg.attach(MIMEText(text_to_send, msg_type))
+ if attachments:
+ for attachment in attachments:
+ part = MIMEBase("application", "octet-stream")
+ part.set_payload(attachment.content)
+ Encoders.encode_base64(part)
+ part.add_header(
+ "Content-Disposition",
+ 'attachment; filename="%s"' % attachment.name,
+ )
+ msg.attach(part)
- # Send the message via our own SMTP server, but don't include the
- # envelope header.
- s = smtplib.SMTP('localhost')
- s.sendmail(email_from, email_to, msg.as_string())
- s.quit()
+ # Send the message via our own SMTP server, but don't include the
+ # envelope header.
+ s = smtplib.SMTP("localhost")
+ s.sendmail(email_from, email_to, msg.as_string())
+ s.quit()
- def SendGMREmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments):
- """Send email via sendgmr program."""
- ce = command_executer.GetCommandExecuter(log_level='none')
+ def SendGMREmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ ):
+ """Send email via sendgmr program."""
+ ce = command_executer.GetCommandExecuter(log_level="none")
- if not email_from:
- email_from = getpass.getuser() + '@google.com'
+ if not email_from:
+ email_from = getpass.getuser() + "@google.com"
- to_list = ','.join(email_to)
+ to_list = ",".join(email_to)
- if not text_to_send:
- text_to_send = 'Empty message body.'
+ if not text_to_send:
+ text_to_send = "Empty message body."
- to_be_deleted = []
- try:
- with tempfile.NamedTemporaryFile('w', encoding='utf-8',
- delete=False) as f:
- f.write(text_to_send)
- f.flush()
- to_be_deleted.append(f.name)
+ to_be_deleted = []
+ try:
+ with tempfile.NamedTemporaryFile(
+ "w", encoding="utf-8", delete=False
+ ) as f:
+ f.write(text_to_send)
+ f.flush()
+ to_be_deleted.append(f.name)
- # Fix single-quotes inside the subject. In bash, to escape a single quote
- # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To
- # make Python read the backslash as a backslash rather than an escape
- # character, you need to double it. So...
- subject = subject.replace("'", "'\\''")
+ # Fix single-quotes inside the subject. In bash, to escape a single quote
+ # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To
+ # make Python read the backslash as a backslash rather than an escape
+ # character, you need to double it. So...
+ subject = subject.replace("'", "'\\''")
- if msg_type == 'html':
- command = ("sendgmr --to='%s' --from='%s' --subject='%s' "
- "--html_file='%s' --body_file=/dev/null" %
- (to_list, email_from, subject, f.name))
- else:
- command = ("sendgmr --to='%s' --from='%s' --subject='%s' "
- "--body_file='%s'" % (to_list, email_from, subject, f.name))
+ if msg_type == "html":
+ command = (
+ "sendgmr --to='%s' --from='%s' --subject='%s' "
+ "--html_file='%s' --body_file=/dev/null"
+ % (to_list, email_from, subject, f.name)
+ )
+ else:
+ command = (
+ "sendgmr --to='%s' --from='%s' --subject='%s' "
+ "--body_file='%s'" % (to_list, email_from, subject, f.name)
+ )
- if email_cc:
- cc_list = ','.join(email_cc)
- command += " --cc='%s'" % cc_list
- if email_bcc:
- bcc_list = ','.join(email_bcc)
- command += " --bcc='%s'" % bcc_list
+ if email_cc:
+ cc_list = ",".join(email_cc)
+ command += " --cc='%s'" % cc_list
+ if email_bcc:
+ bcc_list = ",".join(email_bcc)
+ command += " --bcc='%s'" % bcc_list
- if attachments:
- attachment_files = []
- for attachment in attachments:
- if '<html>' in attachment.content:
- report_suffix = '_report.html'
- else:
- report_suffix = '_report.txt'
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- suffix=report_suffix) as f:
- f.write(attachment.content)
- f.flush()
- attachment_files.append(f.name)
- files = ','.join(attachment_files)
- command += " --attachment_files='%s'" % files
- to_be_deleted += attachment_files
+ if attachments:
+ attachment_files = []
+ for attachment in attachments:
+ if "<html>" in attachment.content:
+ report_suffix = "_report.html"
+ else:
+ report_suffix = "_report.txt"
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ suffix=report_suffix,
+ ) as f:
+ f.write(attachment.content)
+ f.flush()
+ attachment_files.append(f.name)
+ files = ",".join(attachment_files)
+ command += " --attachment_files='%s'" % files
+ to_be_deleted += attachment_files
- # Send the message via our own GMR server.
- status = ce.RunCommand(command)
- return status
+ # Send the message via our own GMR server.
+ status = ce.RunCommand(command)
+ return status
- finally:
- for f in to_be_deleted:
- os.remove(f)
+ finally:
+ for f in to_be_deleted:
+ os.remove(f)
diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py
index ae41f14..66ec6a2 100755
--- a/cros_utils/email_sender_unittest.py
+++ b/cros_utils/email_sender_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for email_sender."""
-from __future__ import print_function
import contextlib
import io
@@ -19,102 +18,105 @@
class Test(unittest.TestCase):
- """Tests for email_sender."""
+ """Tests for email_sender."""
- @mock.patch('cros_utils.email_sender.AtomicallyWriteFile')
- def test_x20_email_sending_rejects_invalid_inputs(self, write_file):
- test_cases = [
- {
- # no subject
- 'subject': '',
- 'identifier': 'foo',
- 'direct_recipients': ['gbiv@google.com'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- # no identifier
- 'identifier': '',
- 'direct_recipients': ['gbiv@google.com'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # no recipients
- 'direct_recipients': [],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- 'direct_recipients': ['gbiv@google.com'],
- # no body
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # direct recipients lack @google.
- 'direct_recipients': ['gbiv'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # non-list recipients
- 'direct_recipients': 'gbiv@google.com',
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # non-list recipients
- 'well_known_recipients': 'detective',
- 'text_body': 'hi',
- },
- ]
+ @mock.patch("cros_utils.email_sender.AtomicallyWriteFile")
+ def test_x20_email_sending_rejects_invalid_inputs(self, write_file):
+ test_cases = [
+ {
+ # no subject
+ "subject": "",
+ "identifier": "foo",
+ "direct_recipients": ["gbiv@google.com"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ # no identifier
+ "identifier": "",
+ "direct_recipients": ["gbiv@google.com"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # no recipients
+ "direct_recipients": [],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ "direct_recipients": ["gbiv@google.com"],
+ # no body
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # direct recipients lack @google.
+ "direct_recipients": ["gbiv"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # non-list recipients
+ "direct_recipients": "gbiv@google.com",
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # non-list recipients
+ "well_known_recipients": "detective",
+ "text_body": "hi",
+ },
+ ]
- sender = email_sender.EmailSender()
- for case in test_cases:
- with self.assertRaises(ValueError):
- sender.SendX20Email(**case)
+ sender = email_sender.EmailSender()
+ for case in test_cases:
+ with self.assertRaises(ValueError):
+ sender.SendX20Email(**case)
- write_file.assert_not_called()
+ write_file.assert_not_called()
- @mock.patch('cros_utils.email_sender.AtomicallyWriteFile')
- def test_x20_email_sending_translates_to_reasonable_json(self, write_file):
- written_obj = None
+ @mock.patch("cros_utils.email_sender.AtomicallyWriteFile")
+ def test_x20_email_sending_translates_to_reasonable_json(self, write_file):
+ written_obj = None
- @contextlib.contextmanager
- def actual_write_file(file_path):
- nonlocal written_obj
+ @contextlib.contextmanager
+ def actual_write_file(file_path):
+ nonlocal written_obj
- self.assertTrue(file_path.startswith(email_sender.X20_PATH + '/'),
- file_path)
- f = io.StringIO()
- yield f
- written_obj = json.loads(f.getvalue())
+ self.assertTrue(
+ file_path.startswith(email_sender.X20_PATH + "/"), file_path
+ )
+ f = io.StringIO()
+ yield f
+ written_obj = json.loads(f.getvalue())
- write_file.side_effect = actual_write_file
- email_sender.EmailSender().SendX20Email(
- subject='hello',
- identifier='world',
- well_known_recipients=['detective'],
- direct_recipients=['gbiv@google.com'],
- text_body='text',
- html_body='html',
- )
+ write_file.side_effect = actual_write_file
+ email_sender.EmailSender().SendX20Email(
+ subject="hello",
+ identifier="world",
+ well_known_recipients=["detective"],
+ direct_recipients=["gbiv@google.com"],
+ text_body="text",
+ html_body="html",
+ )
- self.assertEqual(
- written_obj, {
- 'subject': 'hello',
- 'email_identifier': 'world',
- 'well_known_recipients': ['detective'],
- 'direct_recipients': ['gbiv@google.com'],
- 'body': 'text',
- 'html_body': 'html',
- })
+ self.assertEqual(
+ written_obj,
+ {
+ "subject": "hello",
+ "email_identifier": "world",
+ "well_known_recipients": ["detective"],
+ "direct_recipients": ["gbiv@google.com"],
+ "body": "text",
+ "html_body": "html",
+ },
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py
index f0e4064..743edef 100644
--- a/cros_utils/file_utils.py
+++ b/cros_utils/file_utils.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for operations on files."""
-from __future__ import print_function
import errno
import os
@@ -15,78 +14,83 @@
class FileUtils(object):
- """Utilities for operations on files."""
- _instance = None
- DRY_RUN = False
+ """Utilities for operations on files."""
- @classmethod
- def Configure(cls, dry_run):
- cls.DRY_RUN = dry_run
+ _instance = None
+ DRY_RUN = False
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- if cls.DRY_RUN:
- cls._instance = super(FileUtils, cls).__new__(MockFileUtils, *args,
- **kwargs)
- else:
- cls._instance = super(FileUtils, cls).__new__(cls, *args, **kwargs)
- return cls._instance
+ @classmethod
+ def Configure(cls, dry_run):
+ cls.DRY_RUN = dry_run
- def Md5File(self, filename, log_level='verbose', _block_size=2**10):
- command = 'md5sum %s' % filename
- ce = command_executer.GetCommandExecuter(log_level=log_level)
- ret, out, _ = ce.RunCommandWOutput(command)
- if ret:
- raise RuntimeError('Could not run md5sum on: %s' % filename)
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ if cls.DRY_RUN:
+ cls._instance = super(FileUtils, cls).__new__(
+ MockFileUtils, *args, **kwargs
+ )
+ else:
+ cls._instance = super(FileUtils, cls).__new__(
+ cls, *args, **kwargs
+ )
+ return cls._instance
- return out.strip().split()[0]
+ def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10):
+ command = "md5sum %s" % filename
+ ce = command_executer.GetCommandExecuter(log_level=log_level)
+ ret, out, _ = ce.RunCommandWOutput(command)
+ if ret:
+ raise RuntimeError("Could not run md5sum on: %s" % filename)
- def CanonicalizeChromeOSRoot(self, chromeos_root):
- chromeos_root = os.path.expanduser(chromeos_root)
- if os.path.isdir(os.path.join(chromeos_root, 'chromite')):
- return chromeos_root
- else:
- return None
+ return out.strip().split()[0]
- def ChromeOSRootFromImage(self, chromeos_image):
- chromeos_root = os.path.join(
- os.path.dirname(chromeos_image), '../../../../..')
- return self.CanonicalizeChromeOSRoot(chromeos_root)
+ def CanonicalizeChromeOSRoot(self, chromeos_root):
+ chromeos_root = os.path.expanduser(chromeos_root)
+ if os.path.isdir(os.path.join(chromeos_root, "chromite")):
+ return chromeos_root
+ else:
+ return None
- def MkDirP(self, path):
- try:
- os.makedirs(path)
- except OSError as exc:
- if exc.errno == errno.EEXIST:
- pass
- else:
- raise
+ def ChromeOSRootFromImage(self, chromeos_image):
+ chromeos_root = os.path.join(
+ os.path.dirname(chromeos_image), "../../../../.."
+ )
+ return self.CanonicalizeChromeOSRoot(chromeos_root)
- def RmDir(self, path):
- shutil.rmtree(path, ignore_errors=True)
+ def MkDirP(self, path):
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
- def WriteFile(self, path, contents):
- with open(path, 'w', encoding='utf-8') as f:
- f.write(contents)
+ def RmDir(self, path):
+ shutil.rmtree(path, ignore_errors=True)
+
+ def WriteFile(self, path, contents):
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(contents)
class MockFileUtils(FileUtils):
- """Mock class for file utilities."""
+ """Mock class for file utilities."""
- def Md5File(self, filename, log_level='verbose', _block_size=2**10):
- return 'd41d8cd98f00b204e9800998ecf8427e'
+ def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10):
+ return "d41d8cd98f00b204e9800998ecf8427e"
- def CanonicalizeChromeOSRoot(self, chromeos_root):
- return '/tmp/chromeos_root'
+ def CanonicalizeChromeOSRoot(self, chromeos_root):
+ return "/tmp/chromeos_root"
- def ChromeOSRootFromImage(self, chromeos_image):
- return '/tmp/chromeos_root'
+ def ChromeOSRootFromImage(self, chromeos_image):
+ return "/tmp/chromeos_root"
- def RmDir(self, path):
- pass
+ def RmDir(self, path):
+ pass
- def MkDirP(self, path):
- pass
+ def MkDirP(self, path):
+ pass
- def WriteFile(self, path, contents):
- pass
+ def WriteFile(self, path, contents):
+ pass
diff --git a/cros_utils/html_tools.py b/cros_utils/html_tools.py
index 688955f..202bef0 100644
--- a/cros_utils/html_tools.py
+++ b/cros_utils/html_tools.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,7 +7,8 @@
def GetPageHeader(page_title):
- return """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ return (
+ """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
@@ -32,64 +33,68 @@
</head>
<body>
-""" % page_title
+"""
+ % page_title
+ )
def GetListHeader():
- return '<ul>'
+ return "<ul>"
def GetListItem(text):
- return '<li>%s</li>' % text
+ return "<li>%s</li>" % text
def GetListFooter():
- return '</ul>'
+ return "</ul>"
def GetList(items):
- return '<ul>%s</ul>' % ''.join(['<li>%s</li>' % item for item in items])
+ return "<ul>%s</ul>" % "".join(["<li>%s</li>" % item for item in items])
def GetParagraph(text):
- return '<p>%s</p>' % text
+ return "<p>%s</p>" % text
def GetFooter():
- return '</body>\n</html>'
+ return "</body>\n</html>"
def GetHeader(text, h=1):
- return '<h%s>%s</h%s>' % (h, text, h)
+ return "<h%s>%s</h%s>" % (h, text, h)
def GetTableHeader(headers):
- row = ''.join(['<th>%s</th>' % header for header in headers])
- return '<table><tr>%s</tr>' % row
+ row = "".join(["<th>%s</th>" % header for header in headers])
+ return "<table><tr>%s</tr>" % row
def GetTableFooter():
- return '</table>'
+ return "</table>"
def FormatLineBreaks(text):
- return text.replace('\n', '<br/>')
+ return text.replace("\n", "<br/>")
def GetTableCell(text):
- return '<td>%s</td>' % FormatLineBreaks(str(text))
+ return "<td>%s</td>" % FormatLineBreaks(str(text))
def GetTableRow(columns):
- return '<tr>%s</tr>' % '\n'.join([GetTableCell(column) for column in columns])
+ return "<tr>%s</tr>" % "\n".join(
+ [GetTableCell(column) for column in columns]
+ )
def GetTable(headers, rows):
- table = [GetTableHeader(headers)]
- table.extend([GetTableRow(row) for row in rows])
- table.append(GetTableFooter())
- return '\n'.join(table)
+ table = [GetTableHeader(headers)]
+ table.extend([GetTableRow(row) for row in rows])
+ table.append(GetTableFooter())
+ return "\n".join(table)
def GetLink(link, text):
- return "<a href='%s'>%s</a>" % (link, text)
+ return "<a href='%s'>%s</a>" % (link, text)
diff --git a/cros_utils/locks.py b/cros_utils/locks.py
index 848e23f..db6f434 100644
--- a/cros_utils/locks.py
+++ b/cros_utils/locks.py
@@ -1,49 +1,52 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for locking machines."""
-from __future__ import print_function
import time
-import lock_machine
-
from cros_utils import logger
+import lock_machine
def AcquireLock(machines, chromeos_root, timeout=1200):
- """Acquire lock for machine(s) with timeout."""
- start_time = time.time()
- locked = True
- sleep_time = min(10, timeout / 10.0)
- while True:
- try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(True)
- break
- except Exception as e:
- if time.time() - start_time > timeout:
- locked = False
- logger.GetLogger().LogWarning(
- 'Could not acquire lock on {0} within {1} seconds: {2}'.format(
- repr(machines), timeout, str(e)))
- break
- time.sleep(sleep_time)
- return locked
+ """Acquire lock for machine(s) with timeout."""
+ start_time = time.time()
+ locked = True
+ sleep_time = min(10, timeout / 10.0)
+ while True:
+ try:
+ lock_machine.LockManager(
+ machines, False, chromeos_root
+ ).UpdateMachines(True)
+ break
+ except Exception as e:
+ if time.time() - start_time > timeout:
+ locked = False
+ logger.GetLogger().LogWarning(
+ "Could not acquire lock on {0} within {1} seconds: {2}".format(
+ repr(machines), timeout, str(e)
+ )
+ )
+ break
+ time.sleep(sleep_time)
+ return locked
def ReleaseLock(machines, chromeos_root):
- """Release locked machine(s)."""
- unlocked = True
- try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(False)
- except Exception as e:
- unlocked = False
- logger.GetLogger().LogWarning(
- 'Could not unlock %s. %s' % (repr(machines), str(e)))
- return unlocked
+ """Release locked machine(s)."""
+ unlocked = True
+ try:
+ lock_machine.LockManager(machines, False, chromeos_root).UpdateMachines(
+ False
+ )
+ except Exception as e:
+ unlocked = False
+ logger.GetLogger().LogWarning(
+ "Could not unlock %s. %s" % (repr(machines), str(e))
+ )
+ return unlocked
diff --git a/cros_utils/logger.py b/cros_utils/logger.py
index e304fe1..e9b9d1b 100644
--- a/cros_utils/logger.py
+++ b/cros_utils/logger.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logging helper module."""
-from __future__ import print_function
# System modules
import os.path
@@ -15,350 +14,380 @@
# TODO(yunlian@google.com): Use GetRoot from misc
def GetRoot(scr_name):
- """Break up pathname into (dir+name)."""
- abs_path = os.path.abspath(scr_name)
- return (os.path.dirname(abs_path), os.path.basename(abs_path))
+ """Break up pathname into (dir+name)."""
+ abs_path = os.path.abspath(scr_name)
+ return (os.path.dirname(abs_path), os.path.basename(abs_path))
class Logger(object):
- """Logging helper class."""
+ """Logging helper class."""
- MAX_LOG_FILES = 10
+ MAX_LOG_FILES = 10
- def __init__(self, rootdir, basefilename, print_console, subdir='logs'):
- logdir = os.path.join(rootdir, subdir)
- basename = os.path.join(logdir, basefilename)
+ def __init__(self, rootdir, basefilename, print_console, subdir="logs"):
+ logdir = os.path.join(rootdir, subdir)
+ basename = os.path.join(logdir, basefilename)
- try:
- os.makedirs(logdir)
- except OSError:
- pass
- # print("Warning: Logs directory '%s' already exists." % logdir)
+ try:
+ os.makedirs(logdir)
+ except OSError:
+ pass
+ # print("Warning: Logs directory '%s' already exists." % logdir)
- self.print_console = print_console
+ self.print_console = print_console
- self._CreateLogFileHandles(basename)
+ self._CreateLogFileHandles(basename)
- self._WriteTo(self.cmdfd, ' '.join(sys.argv), True)
+ self._WriteTo(self.cmdfd, " ".join(sys.argv), True)
- def _AddSuffix(self, basename, suffix):
- return '%s%s' % (basename, suffix)
+ def _AddSuffix(self, basename, suffix):
+ return "%s%s" % (basename, suffix)
- def _FindSuffix(self, basename):
- timestamps = []
- found_suffix = None
- for i in range(self.MAX_LOG_FILES):
- suffix = str(i)
- suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = '%s.cmd' % suffixed_basename
- if not os.path.exists(cmd_file):
- found_suffix = suffix
- break
- timestamps.append(os.stat(cmd_file).st_mtime)
+ def _FindSuffix(self, basename):
+ timestamps = []
+ found_suffix = None
+ for i in range(self.MAX_LOG_FILES):
+ suffix = str(i)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+ cmd_file = "%s.cmd" % suffixed_basename
+ if not os.path.exists(cmd_file):
+ found_suffix = suffix
+ break
+ timestamps.append(os.stat(cmd_file).st_mtime)
- if found_suffix:
- return found_suffix
+ if found_suffix:
+ return found_suffix
- # Try to pick the oldest file with the suffix and return that one.
- suffix = str(timestamps.index(min(timestamps)))
- # print ("Warning: Overwriting log file: %s" %
- # self._AddSuffix(basename, suffix))
- return suffix
+ # Try to pick the oldest file with the suffix and return that one.
+ suffix = str(timestamps.index(min(timestamps)))
+ # print ("Warning: Overwriting log file: %s" %
+ # self._AddSuffix(basename, suffix))
+ return suffix
- def _CreateLogFileHandle(self, name):
- fd = None
- try:
- fd = open(name, 'w')
- except IOError:
- print('Warning: could not open %s for writing.' % name)
- return fd
+ def _CreateLogFileHandle(self, name):
+ fd = None
+ try:
+ fd = open(name, "w")
+ except IOError:
+ print("Warning: could not open %s for writing." % name)
+ return fd
- def _CreateLogFileHandles(self, basename):
- suffix = self._FindSuffix(basename)
- suffixed_basename = self._AddSuffix(basename, suffix)
+ def _CreateLogFileHandles(self, basename):
+ suffix = self._FindSuffix(basename)
+ suffixed_basename = self._AddSuffix(basename, suffix)
- self.cmdfd = self._CreateLogFileHandle('%s.cmd' % suffixed_basename)
- self.stdout = self._CreateLogFileHandle('%s.out' % suffixed_basename)
- self.stderr = self._CreateLogFileHandle('%s.err' % suffixed_basename)
+ self.cmdfd = self._CreateLogFileHandle("%s.cmd" % suffixed_basename)
+ self.stdout = self._CreateLogFileHandle("%s.out" % suffixed_basename)
+ self.stderr = self._CreateLogFileHandle("%s.err" % suffixed_basename)
- self._CreateLogFileSymlinks(basename, suffixed_basename)
+ self._CreateLogFileSymlinks(basename, suffixed_basename)
- # Symlink unsuffixed basename to currently suffixed one.
- def _CreateLogFileSymlinks(self, basename, suffixed_basename):
- try:
- for extension in ['cmd', 'out', 'err']:
- src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
- dest_file = '%s.%s' % (basename, extension)
- if os.path.exists(dest_file):
- os.remove(dest_file)
- os.symlink(src_file, dest_file)
- except Exception as ex:
- print('Exception while creating symlinks: %s' % str(ex))
+ # Symlink unsuffixed basename to currently suffixed one.
+ def _CreateLogFileSymlinks(self, basename, suffixed_basename):
+ try:
+ for extension in ["cmd", "out", "err"]:
+ src_file = "%s.%s" % (
+ os.path.basename(suffixed_basename),
+ extension,
+ )
+ dest_file = "%s.%s" % (basename, extension)
+ if os.path.exists(dest_file):
+ os.remove(dest_file)
+ os.symlink(src_file, dest_file)
+ except Exception as ex:
+ print("Exception while creating symlinks: %s" % str(ex))
- def _WriteTo(self, fd, msg, flush):
- if fd:
- fd.write(msg)
- if flush:
- fd.flush()
+ def _WriteTo(self, fd, msg, flush):
+ if fd:
+ fd.write(msg)
+ if flush:
+ fd.flush()
- def LogStartDots(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.flush()
- term_fd.write('. ')
- term_fd.flush()
+ def LogStartDots(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.flush()
+ term_fd.write(". ")
+ term_fd.flush()
- def LogAppendDot(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.write('. ')
- term_fd.flush()
+ def LogAppendDot(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.write(". ")
+ term_fd.flush()
- def LogEndDots(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.write('\n')
- term_fd.flush()
+ def LogEndDots(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.write("\n")
+ term_fd.flush()
- def LogMsg(self, file_fd, term_fd, msg, flush=True):
- if file_fd:
- self._WriteTo(file_fd, msg, flush)
- if self.print_console:
- self._WriteTo(term_fd, msg, flush)
+ def LogMsg(self, file_fd, term_fd, msg, flush=True):
+ if file_fd:
+ self._WriteTo(file_fd, msg, flush)
+ if self.print_console:
+ self._WriteTo(term_fd, msg, flush)
- def _GetStdout(self, print_to_console):
- if print_to_console:
- return sys.stdout
- return None
+ def _GetStdout(self, print_to_console):
+ if print_to_console:
+ return sys.stdout
+ return None
- def _GetStderr(self, print_to_console):
- if print_to_console:
- return sys.stderr
- return None
+ def _GetStderr(self, print_to_console):
+ if print_to_console:
+ return sys.stderr
+ return None
- def LogCmdToFileOnly(self, cmd, machine='', user=None):
- if not self.cmdfd:
- return
+ def LogCmdToFileOnly(self, cmd, machine="", user=None):
+ if not self.cmdfd:
+ return
- host = ('%s@%s' % (user, machine)) if user else machine
- flush = True
- cmd_string = 'CMD (%s): %s\n' % (host, cmd)
- self._WriteTo(self.cmdfd, cmd_string, flush)
+ host = ("%s@%s" % (user, machine)) if user else machine
+ flush = True
+ cmd_string = "CMD (%s): %s\n" % (host, cmd)
+ self._WriteTo(self.cmdfd, cmd_string, flush)
- def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
- if user:
- host = '%s@%s' % (user, machine)
- else:
- host = machine
+ def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ if user:
+ host = "%s@%s" % (user, machine)
+ else:
+ host = machine
- self.LogMsg(self.cmdfd, self._GetStdout(print_to_console),
- 'CMD (%s): %s\n' % (host, cmd))
+ self.LogMsg(
+ self.cmdfd,
+ self._GetStdout(print_to_console),
+ "CMD (%s): %s\n" % (host, cmd),
+ )
- def LogFatal(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'FATAL: %s\n' % msg)
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- '\n'.join(traceback.format_stack()))
- sys.exit(1)
+ def LogFatal(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), "FATAL: %s\n" % msg
+ )
+ self.LogMsg(
+ self.stderr,
+ self._GetStderr(print_to_console),
+ "\n".join(traceback.format_stack()),
+ )
+ sys.exit(1)
- def LogError(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'ERROR: %s\n' % msg)
+ def LogError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), "ERROR: %s\n" % msg
+ )
- def LogWarning(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'WARNING: %s\n' % msg)
+ def LogWarning(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr,
+ self._GetStderr(print_to_console),
+ "WARNING: %s\n" % msg,
+ )
- def LogOutput(self, msg, print_to_console=True):
- self.LogMsg(self.stdout, self._GetStdout(print_to_console),
- 'OUTPUT: %s\n' % msg)
+ def LogOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg
+ )
- def LogFatalIf(self, condition, msg):
- if condition:
- self.LogFatal(msg)
+ def LogFatalIf(self, condition, msg):
+ if condition:
+ self.LogFatal(msg)
- def LogErrorIf(self, condition, msg):
- if condition:
- self.LogError(msg)
+ def LogErrorIf(self, condition, msg):
+ if condition:
+ self.LogError(msg)
- def LogWarningIf(self, condition, msg):
- if condition:
- self.LogWarning(msg)
+ def LogWarningIf(self, condition, msg):
+ if condition:
+ self.LogWarning(msg)
- def LogCommandOutput(self, msg, print_to_console=True):
- self.LogMsg(
- self.stdout, self._GetStdout(print_to_console), msg, flush=False)
+ def LogCommandOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), msg, flush=False
+ )
- def LogCommandError(self, msg, print_to_console=True):
- self.LogMsg(
- self.stderr, self._GetStderr(print_to_console), msg, flush=False)
+ def LogCommandError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), msg, flush=False
+ )
- def Flush(self):
- self.cmdfd.flush()
- self.stdout.flush()
- self.stderr.flush()
+ def Flush(self):
+ self.cmdfd.flush()
+ self.stdout.flush()
+ self.stderr.flush()
class MockLogger(object):
- """Logging helper class."""
+ """Logging helper class."""
- MAX_LOG_FILES = 10
+ MAX_LOG_FILES = 10
- def __init__(self, *_args, **_kwargs):
- self.stdout = sys.stdout
- self.stderr = sys.stderr
+ def __init__(self, *_args, **_kwargs):
+ self.stdout = sys.stdout
+ self.stderr = sys.stderr
- def _AddSuffix(self, basename, suffix):
- return '%s%s' % (basename, suffix)
+ def _AddSuffix(self, basename, suffix):
+ return "%s%s" % (basename, suffix)
- def _FindSuffix(self, basename):
- timestamps = []
- found_suffix = None
- for i in range(self.MAX_LOG_FILES):
- suffix = str(i)
- suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = '%s.cmd' % suffixed_basename
- if not os.path.exists(cmd_file):
- found_suffix = suffix
- break
- timestamps.append(os.stat(cmd_file).st_mtime)
+ def _FindSuffix(self, basename):
+ timestamps = []
+ found_suffix = None
+ for i in range(self.MAX_LOG_FILES):
+ suffix = str(i)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+ cmd_file = "%s.cmd" % suffixed_basename
+ if not os.path.exists(cmd_file):
+ found_suffix = suffix
+ break
+ timestamps.append(os.stat(cmd_file).st_mtime)
- if found_suffix:
- return found_suffix
+ if found_suffix:
+ return found_suffix
- # Try to pick the oldest file with the suffix and return that one.
- suffix = str(timestamps.index(min(timestamps)))
- # print ("Warning: Overwriting log file: %s" %
- # self._AddSuffix(basename, suffix))
- return suffix
+ # Try to pick the oldest file with the suffix and return that one.
+ suffix = str(timestamps.index(min(timestamps)))
+ # print ("Warning: Overwriting log file: %s" %
+ # self._AddSuffix(basename, suffix))
+ return suffix
- def _CreateLogFileHandle(self, name):
- print('MockLogger: creating open file handle for %s (writing)' % name)
+ def _CreateLogFileHandle(self, name):
+ print("MockLogger: creating open file handle for %s (writing)" % name)
- def _CreateLogFileHandles(self, basename):
- suffix = self._FindSuffix(basename)
- suffixed_basename = self._AddSuffix(basename, suffix)
+ def _CreateLogFileHandles(self, basename):
+ suffix = self._FindSuffix(basename)
+ suffixed_basename = self._AddSuffix(basename, suffix)
- print('MockLogger: opening file %s.cmd' % suffixed_basename)
- print('MockLogger: opening file %s.out' % suffixed_basename)
- print('MockLogger: opening file %s.err' % suffixed_basename)
+ print("MockLogger: opening file %s.cmd" % suffixed_basename)
+ print("MockLogger: opening file %s.out" % suffixed_basename)
+ print("MockLogger: opening file %s.err" % suffixed_basename)
- self._CreateLogFileSymlinks(basename, suffixed_basename)
+ self._CreateLogFileSymlinks(basename, suffixed_basename)
- # Symlink unsuffixed basename to currently suffixed one.
- def _CreateLogFileSymlinks(self, basename, suffixed_basename):
- for extension in ['cmd', 'out', 'err']:
- src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
- dest_file = '%s.%s' % (basename, extension)
- print('MockLogger: Calling os.symlink(%s, %s)' % (src_file, dest_file))
+ # Symlink unsuffixed basename to currently suffixed one.
+ def _CreateLogFileSymlinks(self, basename, suffixed_basename):
+ for extension in ["cmd", "out", "err"]:
+ src_file = "%s.%s" % (
+ os.path.basename(suffixed_basename),
+ extension,
+ )
+ dest_file = "%s.%s" % (basename, extension)
+ print(
+ "MockLogger: Calling os.symlink(%s, %s)" % (src_file, dest_file)
+ )
- def _WriteTo(self, _fd, msg, _flush):
- print('MockLogger: %s' % msg)
+ def _WriteTo(self, _fd, msg, _flush):
+ print("MockLogger: %s" % msg)
- def LogStartDots(self, _print_to_console=True):
- print('. ')
+ def LogStartDots(self, _print_to_console=True):
+ print(". ")
- def LogAppendDot(self, _print_to_console=True):
- print('. ')
+ def LogAppendDot(self, _print_to_console=True):
+ print(". ")
- def LogEndDots(self, _print_to_console=True):
- print('\n')
+ def LogEndDots(self, _print_to_console=True):
+ print("\n")
- def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs):
- print('MockLogger: %s' % msg)
+ def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs):
+ print("MockLogger: %s" % msg)
- def _GetStdout(self, _print_to_console):
- return None
+ def _GetStdout(self, _print_to_console):
+ return None
- def _GetStderr(self, _print_to_console):
- return None
+ def _GetStderr(self, _print_to_console):
+ return None
- def LogCmdToFileOnly(self, *_args, **_kwargs):
- return
+ def LogCmdToFileOnly(self, *_args, **_kwargs):
+ return
- # def LogCmdToFileOnly(self, cmd, machine='', user=None):
- # host = ('%s@%s' % (user, machine)) if user else machine
- # cmd_string = 'CMD (%s): %s\n' % (host, cmd)
- # print('MockLogger: Writing to file ONLY: %s' % cmd_string)
+ # def LogCmdToFileOnly(self, cmd, machine='', user=None):
+ # host = ('%s@%s' % (user, machine)) if user else machine
+ # cmd_string = 'CMD (%s): %s\n' % (host, cmd)
+ # print('MockLogger: Writing to file ONLY: %s' % cmd_string)
- def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
- if user:
- host = '%s@%s' % (user, machine)
- else:
- host = machine
+ def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ if user:
+ host = "%s@%s" % (user, machine)
+ else:
+ host = machine
- self.LogMsg(0, self._GetStdout(print_to_console),
- 'CMD (%s): %s\n' % (host, cmd))
+ self.LogMsg(
+ 0, self._GetStdout(print_to_console), "CMD (%s): %s\n" % (host, cmd)
+ )
- def LogFatal(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'FATAL: %s\n' % msg)
- self.LogMsg(0, self._GetStderr(print_to_console),
- '\n'.join(traceback.format_stack()))
- print('MockLogger: Calling sysexit(1)')
+ def LogFatal(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "FATAL: %s\n" % msg)
+ self.LogMsg(
+ 0,
+ self._GetStderr(print_to_console),
+ "\n".join(traceback.format_stack()),
+ )
+ print("MockLogger: Calling sysexit(1)")
- def LogError(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'ERROR: %s\n' % msg)
+ def LogError(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "ERROR: %s\n" % msg)
- def LogWarning(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'WARNING: %s\n' % msg)
+ def LogWarning(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "WARNING: %s\n" % msg)
- def LogOutput(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStdout(print_to_console), 'OUTPUT: %s\n' % msg)
+ def LogOutput(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg)
- def LogFatalIf(self, condition, msg):
- if condition:
- self.LogFatal(msg)
+ def LogFatalIf(self, condition, msg):
+ if condition:
+ self.LogFatal(msg)
- def LogErrorIf(self, condition, msg):
- if condition:
- self.LogError(msg)
+ def LogErrorIf(self, condition, msg):
+ if condition:
+ self.LogError(msg)
- def LogWarningIf(self, condition, msg):
- if condition:
- self.LogWarning(msg)
+ def LogWarningIf(self, condition, msg):
+ if condition:
+ self.LogWarning(msg)
- def LogCommandOutput(self, msg, print_to_console=True):
- self.LogMsg(
- self.stdout, self._GetStdout(print_to_console), msg, flush=False)
+ def LogCommandOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), msg, flush=False
+ )
- def LogCommandError(self, msg, print_to_console=True):
- self.LogMsg(
- self.stderr, self._GetStderr(print_to_console), msg, flush=False)
+ def LogCommandError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), msg, flush=False
+ )
- def Flush(self):
- print('MockLogger: Flushing cmdfd, stdout, stderr')
+ def Flush(self):
+ print("MockLogger: Flushing cmdfd, stdout, stderr")
main_logger = None
def InitLogger(script_name, log_dir, print_console=True, mock=False):
- """Initialize a global logger. To be called only once."""
- # pylint: disable=global-statement
- global main_logger
- assert not main_logger, 'The logger has already been initialized'
- rootdir, basefilename = GetRoot(script_name)
- if not log_dir:
- log_dir = rootdir
- if not mock:
- main_logger = Logger(log_dir, basefilename, print_console)
- else:
- main_logger = MockLogger(log_dir, basefilename, print_console)
+ """Initialize a global logger. To be called only once."""
+ # pylint: disable=global-statement
+ global main_logger
+ assert not main_logger, "The logger has already been initialized"
+ rootdir, basefilename = GetRoot(script_name)
+ if not log_dir:
+ log_dir = rootdir
+ if not mock:
+ main_logger = Logger(log_dir, basefilename, print_console)
+ else:
+ main_logger = MockLogger(log_dir, basefilename, print_console)
-def GetLogger(log_dir='', mock=False):
- if not main_logger:
- InitLogger(sys.argv[0], log_dir, mock=mock)
- return main_logger
+def GetLogger(log_dir="", mock=False):
+ if not main_logger:
+ InitLogger(sys.argv[0], log_dir, mock=mock)
+ return main_logger
def HandleUncaughtExceptions(fun):
- """Catches all exceptions that would go outside decorated fun scope."""
+ """Catches all exceptions that would go outside decorated fun scope."""
- def _Interceptor(*args, **kwargs):
- try:
- return fun(*args, **kwargs)
- except Exception:
- GetLogger().LogFatal('Uncaught exception:\n%s' % traceback.format_exc())
+ def _Interceptor(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except Exception:
+ GetLogger().LogFatal(
+ "Uncaught exception:\n%s" % traceback.format_exc()
+ )
- return _Interceptor
+ return _Interceptor
diff --git a/cros_utils/machines.py b/cros_utils/machines.py
index 89b51b0..a538573 100644
--- a/cros_utils/machines.py
+++ b/cros_utils/machines.py
@@ -1,27 +1,26 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities relating to machine-specific functions."""
-from __future__ import print_function
from cros_utils import command_executer
-def MachineIsPingable(machine, logging_level='average'):
- """Checks to see if a machine is responding to 'ping'.
+def MachineIsPingable(machine, logging_level="average"):
+ """Checks to see if a machine is responding to 'ping'.
- Args:
- machine: String containing the name or ip address of the machine to check.
- logging_level: The logging level with which to initialize the
- command_executer (from command_executor.LOG_LEVEL enum list).
+ Args:
+ machine: String containing the name or ip address of the machine to check.
+ logging_level: The logging level with which to initialize the
+ command_executer (from command_executor.LOG_LEVEL enum list).
- Returns:
- Boolean indicating whether machine is responding to ping or not.
- """
- ce = command_executer.GetCommandExecuter(log_level=logging_level)
- cmd = 'ping -c 1 -w 3 %s' % machine
- status = ce.RunCommand(cmd)
- return status == 0
+ Returns:
+ Boolean indicating whether machine is responding to ping or not.
+ """
+ ce = command_executer.GetCommandExecuter(log_level=logging_level)
+ cmd = "ping -c 1 -w 3 %s" % machine
+ status = ce.RunCommand(cmd)
+ return status == 0
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index a0d0de7..aabb5ad 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -1,14 +1,12 @@
# -*- coding: utf-8 -*-
-# Copyright 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for toolchain build."""
-from __future__ import division
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
from contextlib import contextmanager
import os
@@ -19,495 +17,545 @@
from cros_utils import command_executer
from cros_utils import logger
-CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts'
-TOOLCHAIN_UTILS_PATH = ('/mnt/host/source/src/third_party/toolchain-utils/'
- 'cros_utils/toolchain_utils.sh')
+
+CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts"
+TOOLCHAIN_UTILS_PATH = (
+ "/mnt/host/source/src/third_party/toolchain-utils/"
+ "cros_utils/toolchain_utils.sh"
+)
def GetChromeOSVersionFromLSBVersion(lsb_version):
- """Get Chromeos version from Lsb version."""
- ce = command_executer.GetCommandExecuter()
- command = ('git ls-remote '
- 'https://chromium.googlesource.com/chromiumos/manifest.git '
- 'refs/heads/release-R*')
- ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False)
- assert ret == 0, 'Command %s failed' % command
- lower = []
- for line in out.splitlines():
- mo = re.search(r'refs/heads/release-R(\d+)-(\d+)\.B', line)
- if mo:
- revision = int(mo.group(1))
- build = int(mo.group(2))
- lsb_build = int(lsb_version.split('.')[0])
- if lsb_build > build:
- lower.append(revision)
- lower = sorted(lower)
- if lower:
- return 'R%d-%s' % (lower[-1] + 1, lsb_version)
- else:
- return 'Unknown'
+ """Get Chromeos version from Lsb version."""
+ ce = command_executer.GetCommandExecuter()
+ command = (
+ "git ls-remote "
+ "https://chromium.googlesource.com/chromiumos/manifest.git "
+ "refs/heads/release-R*"
+ )
+ ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False)
+ assert ret == 0, "Command %s failed" % command
+ lower = []
+ for line in out.splitlines():
+ mo = re.search(r"refs/heads/release-R(\d+)-(\d+)\.B", line)
+ if mo:
+ revision = int(mo.group(1))
+ build = int(mo.group(2))
+ lsb_build = int(lsb_version.split(".")[0])
+ if lsb_build > build:
+ lower.append(revision)
+ lower = sorted(lower)
+ if lower:
+ return "R%d-%s" % (lower[-1] + 1, lsb_version)
+ else:
+ return "Unknown"
def ApplySubs(string, *substitutions):
- for pattern, replacement in substitutions:
- string = re.sub(pattern, replacement, string)
- return string
+ for pattern, replacement in substitutions:
+ string = re.sub(pattern, replacement, string)
+ return string
def UnitToNumber(unit_num, base=1000):
- """Convert a number with unit to float."""
- unit_dict = {'kilo': base, 'mega': base**2, 'giga': base**3}
- unit_num = unit_num.lower()
- mo = re.search(r'(\d*)(.+)?', unit_num)
- number = mo.group(1)
- unit = mo.group(2)
- if not unit:
- return float(number)
- for k, v in unit_dict.items():
- if k.startswith(unit):
- return float(number) * v
- raise RuntimeError('Unit: %s not found in byte: %s!' % (unit, unit_num))
+ """Convert a number with unit to float."""
+ unit_dict = {"kilo": base, "mega": base ** 2, "giga": base ** 3}
+ unit_num = unit_num.lower()
+ mo = re.search(r"(\d*)(.+)?", unit_num)
+ number = mo.group(1)
+ unit = mo.group(2)
+ if not unit:
+ return float(number)
+ for k, v in unit_dict.items():
+ if k.startswith(unit):
+ return float(number) * v
+ raise RuntimeError("Unit: %s not found in byte: %s!" % (unit, unit_num))
def GetFilenameFromString(string):
- return ApplySubs(
- string,
- (r'/', '__'),
- (r'\s', '_'),
- (r'[\\$="?^]', ''),
- )
+ return ApplySubs(
+ string,
+ (r"/", "__"),
+ (r"\s", "_"),
+ (r'[\\$="?^]', ""),
+ )
def GetRoot(scr_name):
- """Break up pathname into (dir+name)."""
- abs_path = os.path.abspath(scr_name)
- return (os.path.dirname(abs_path), os.path.basename(abs_path))
+ """Break up pathname into (dir+name)."""
+ abs_path = os.path.abspath(scr_name)
+ return (os.path.dirname(abs_path), os.path.basename(abs_path))
def GetChromeOSKeyFile(chromeos_root):
- return os.path.join(chromeos_root, 'src', 'scripts', 'mod_for_test_scripts',
- 'ssh_keys', 'testing_rsa')
+ return os.path.join(
+ chromeos_root,
+ "src",
+ "scripts",
+ "mod_for_test_scripts",
+ "ssh_keys",
+ "testing_rsa",
+ )
def GetChrootPath(chromeos_root):
- return os.path.join(chromeos_root, 'chroot')
+ return os.path.join(chromeos_root, "chroot")
def GetInsideChrootPath(chromeos_root, file_path):
- if not file_path.startswith(GetChrootPath(chromeos_root)):
- raise RuntimeError("File: %s doesn't seem to be in the chroot: %s" %
- (file_path, chromeos_root))
- return file_path[len(GetChrootPath(chromeos_root)):]
+ if not file_path.startswith(GetChrootPath(chromeos_root)):
+ raise RuntimeError(
+ "File: %s doesn't seem to be in the chroot: %s"
+ % (file_path, chromeos_root)
+ )
+ return file_path[len(GetChrootPath(chromeos_root)) :]
def GetOutsideChrootPath(chromeos_root, file_path):
- return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip('/'))
+ return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip("/"))
def FormatQuotedCommand(command):
- return ApplySubs(command, ('"', r'\"'))
+ return ApplySubs(command, ('"', r"\""))
def FormatCommands(commands):
- return ApplySubs(str(commands), ('&&', '&&\n'), (';', ';\n'),
- (r'\n+\s*', '\n'))
+ return ApplySubs(
+ str(commands), ("&&", "&&\n"), (";", ";\n"), (r"\n+\s*", "\n")
+ )
def GetImageDir(chromeos_root, board):
- return os.path.join(chromeos_root, 'src', 'build', 'images', board)
+ return os.path.join(chromeos_root, "src", "build", "images", board)
def LabelLatestImage(chromeos_root, board, label, vanilla_path=None):
- image_dir = GetImageDir(chromeos_root, board)
- latest_image_dir = os.path.join(image_dir, 'latest')
- latest_image_dir = os.path.realpath(latest_image_dir)
- latest_image_dir = os.path.basename(latest_image_dir)
- retval = 0
- with WorkingDirectory(image_dir):
- command = 'ln -sf -T %s %s' % (latest_image_dir, label)
- ce = command_executer.GetCommandExecuter()
- retval = ce.RunCommand(command)
- if retval:
- return retval
- if vanilla_path:
- command = 'ln -sf -T %s %s' % (vanilla_path, 'vanilla')
- retval2 = ce.RunCommand(command)
- return retval2
- return retval
+ image_dir = GetImageDir(chromeos_root, board)
+ latest_image_dir = os.path.join(image_dir, "latest")
+ latest_image_dir = os.path.realpath(latest_image_dir)
+ latest_image_dir = os.path.basename(latest_image_dir)
+ retval = 0
+ with WorkingDirectory(image_dir):
+ command = "ln -sf -T %s %s" % (latest_image_dir, label)
+ ce = command_executer.GetCommandExecuter()
+ retval = ce.RunCommand(command)
+ if retval:
+ return retval
+ if vanilla_path:
+ command = "ln -sf -T %s %s" % (vanilla_path, "vanilla")
+ retval2 = ce.RunCommand(command)
+ return retval2
+ return retval
def DoesLabelExist(chromeos_root, board, label):
- image_label = os.path.join(GetImageDir(chromeos_root, board), label)
- return os.path.exists(image_label)
+ image_label = os.path.join(GetImageDir(chromeos_root, board), label)
+ return os.path.exists(image_label)
def GetBuildPackagesCommand(board, usepkg=False, debug=False):
- if usepkg:
- usepkg_flag = '--usepkg'
- else:
- usepkg_flag = '--nousepkg'
- if debug:
- withdebug_flag = '--withdebug'
- else:
- withdebug_flag = '--nowithdebug'
- return ('%s/build_packages %s --withdev --withtest --withautotest '
- '--skip_toolchain_update %s --board=%s '
- '--accept_licenses=@CHROMEOS' %
- (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board))
+ if usepkg:
+ usepkg_flag = "--usepkg"
+ else:
+ usepkg_flag = "--nousepkg"
+ if debug:
+ withdebug_flag = "--withdebug"
+ else:
+ withdebug_flag = "--nowithdebug"
+ return (
+ "%s/build_packages %s --withdev --withtest --withautotest "
+ "--skip_toolchain_update %s --board=%s "
+ "--accept_licenses=@CHROMEOS"
+ % (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board)
+ )
def GetBuildImageCommand(board, dev=False):
- dev_args = ''
- if dev:
- dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs'
- return ('%s/build_image --board=%s %s test' %
- (CHROMEOS_SCRIPTS_DIR, board, dev_args))
+ dev_args = ""
+ if dev:
+ dev_args = "--noenable_rootfs_verification --disk_layout=2gb-rootfs"
+ return "%s/build_image --board=%s %s test" % (
+ CHROMEOS_SCRIPTS_DIR,
+ board,
+ dev_args,
+ )
def GetSetupBoardCommand(board, usepkg=None, force=None):
- """Get setup_board command."""
- options = []
+ """Get setup_board command."""
+ options = []
- if usepkg:
- options.append('--usepkg')
- else:
- options.append('--nousepkg')
+ if usepkg:
+ options.append("--usepkg")
+ else:
+ options.append("--nousepkg")
- if force:
- options.append('--force')
+ if force:
+ options.append("--force")
- options.append('--accept-licenses=@CHROMEOS')
+ options.append("--accept-licenses=@CHROMEOS")
- return 'setup_board --board=%s %s' % (board, ' '.join(options))
+ return "setup_board --board=%s %s" % (board, " ".join(options))
def CanonicalizePath(path):
- path = os.path.expanduser(path)
- path = os.path.realpath(path)
- return path
+ path = os.path.expanduser(path)
+ path = os.path.realpath(path)
+ return path
def GetCtargetFromBoard(board, chromeos_root):
- """Get Ctarget from board."""
- base_board = board.split('_')[0]
- command = ('source %s; get_ctarget_from_board %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
- ce = command_executer.GetCommandExecuter()
- ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
- if ret != 0:
- raise ValueError('Board %s is invalid!' % board)
- # Remove ANSI escape sequences.
- out = StripANSIEscapeSequences(out)
- return out.strip()
+ """Get Ctarget from board."""
+ base_board = board.split("_")[0]
+ command = "source %s; get_ctarget_from_board %s" % (
+ TOOLCHAIN_UTILS_PATH,
+ base_board,
+ )
+ ce = command_executer.GetCommandExecuter()
+ ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
+ if ret != 0:
+ raise ValueError("Board %s is invalid!" % board)
+ # Remove ANSI escape sequences.
+ out = StripANSIEscapeSequences(out)
+ return out.strip()
def GetArchFromBoard(board, chromeos_root):
- """Get Arch from board."""
- base_board = board.split('_')[0]
- command = ('source %s; get_board_arch %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
- ce = command_executer.GetCommandExecuter()
- ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
- if ret != 0:
- raise ValueError('Board %s is invalid!' % board)
- # Remove ANSI escape sequences.
- out = StripANSIEscapeSequences(out)
- return out.strip()
+ """Get Arch from board."""
+ base_board = board.split("_")[0]
+ command = "source %s; get_board_arch %s" % (
+ TOOLCHAIN_UTILS_PATH,
+ base_board,
+ )
+ ce = command_executer.GetCommandExecuter()
+ ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
+ if ret != 0:
+ raise ValueError("Board %s is invalid!" % board)
+ # Remove ANSI escape sequences.
+ out = StripANSIEscapeSequences(out)
+ return out.strip()
def GetGccLibsDestForBoard(board, chromeos_root):
- """Get gcc libs destination from board."""
- arch = GetArchFromBoard(board, chromeos_root)
- if arch == 'x86':
- return '/build/%s/usr/lib/gcc/' % board
- if arch == 'amd64':
- return '/build/%s/usr/lib64/gcc/' % board
- if arch == 'arm':
- return '/build/%s/usr/lib/gcc/' % board
- if arch == 'arm64':
- return '/build/%s/usr/lib/gcc/' % board
- raise ValueError('Arch %s is invalid!' % arch)
+ """Get gcc libs destination from board."""
+ arch = GetArchFromBoard(board, chromeos_root)
+ if arch == "x86":
+ return "/build/%s/usr/lib/gcc/" % board
+ if arch == "amd64":
+ return "/build/%s/usr/lib64/gcc/" % board
+ if arch == "arm":
+ return "/build/%s/usr/lib/gcc/" % board
+ if arch == "arm64":
+ return "/build/%s/usr/lib/gcc/" % board
+ raise ValueError("Arch %s is invalid!" % arch)
def StripANSIEscapeSequences(string):
- string = re.sub(r'\x1b\[[0-9]*[a-zA-Z]', '', string)
- return string
+ string = re.sub(r"\x1b\[[0-9]*[a-zA-Z]", "", string)
+ return string
def GetChromeSrcDir():
- return 'var/cache/distfiles/target/chrome-src/src'
+ return "var/cache/distfiles/target/chrome-src/src"
def GetEnvStringFromDict(env_dict):
- return ' '.join(['%s="%s"' % var for var in env_dict.items()])
+ return " ".join(['%s="%s"' % var for var in env_dict.items()])
def MergeEnvStringWithDict(env_string, env_dict, prepend=True):
- """Merge env string with dict."""
- if not env_string.strip():
- return GetEnvStringFromDict(env_dict)
- override_env_list = []
- ce = command_executer.GetCommandExecuter()
- for k, v in env_dict.items():
- v = v.strip('"\'')
- if prepend:
- new_env = '%s="%s $%s"' % (k, v, k)
- else:
- new_env = '%s="$%s %s"' % (k, k, v)
- command = '; '.join([env_string, new_env, 'echo $%s' % k])
- ret, out, _ = ce.RunCommandWOutput(command)
- override_env_list.append('%s=%r' % (k, out.strip()))
- ret = env_string + ' ' + ' '.join(override_env_list)
- return ret.strip()
+ """Merge env string with dict."""
+ if not env_string.strip():
+ return GetEnvStringFromDict(env_dict)
+ override_env_list = []
+ ce = command_executer.GetCommandExecuter()
+ for k, v in env_dict.items():
+ v = v.strip("\"'")
+ if prepend:
+ new_env = '%s="%s $%s"' % (k, v, k)
+ else:
+ new_env = '%s="$%s %s"' % (k, k, v)
+ command = "; ".join([env_string, new_env, "echo $%s" % k])
+ ret, out, _ = ce.RunCommandWOutput(command)
+ override_env_list.append("%s=%r" % (k, out.strip()))
+ ret = env_string + " " + " ".join(override_env_list)
+ return ret.strip()
def GetAllImages(chromeos_root, board):
- ce = command_executer.GetCommandExecuter()
- command = ('find %s/src/build/images/%s -name chromiumos_test_image.bin' %
- (chromeos_root, board))
- ret, out, _ = ce.RunCommandWOutput(command)
- assert ret == 0, 'Could not run command: %s' % command
- return out.splitlines()
+ ce = command_executer.GetCommandExecuter()
+ command = "find %s/src/build/images/%s -name chromiumos_test_image.bin" % (
+ chromeos_root,
+ board,
+ )
+ ret, out, _ = ce.RunCommandWOutput(command)
+ assert ret == 0, "Could not run command: %s" % command
+ return out.splitlines()
def IsFloat(text):
- if text is None:
- return False
- try:
- float(text)
- return True
- except ValueError:
- return False
+ if text is None:
+ return False
+ try:
+ float(text)
+ return True
+ except ValueError:
+ return False
def RemoveChromeBrowserObjectFiles(chromeos_root, board):
- """Remove any object files from all the posible locations."""
- out_dir = os.path.join(
- GetChrootPath(chromeos_root),
- 'var/cache/chromeos-chrome/chrome-src/src/out_%s' % board)
- if os.path.exists(out_dir):
- shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
- out_dir = os.path.join(
- GetChrootPath(chromeos_root),
- 'var/cache/chromeos-chrome/chrome-src-internal/src/out_%s' % board)
- if os.path.exists(out_dir):
- shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
+ """Remove any object files from all the posible locations."""
+ out_dir = os.path.join(
+ GetChrootPath(chromeos_root),
+ "var/cache/chromeos-chrome/chrome-src/src/out_%s" % board,
+ )
+ if os.path.exists(out_dir):
+ shutil.rmtree(out_dir)
+ logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
+ out_dir = os.path.join(
+ GetChrootPath(chromeos_root),
+ "var/cache/chromeos-chrome/chrome-src-internal/src/out_%s" % board,
+ )
+ if os.path.exists(out_dir):
+ shutil.rmtree(out_dir)
+ logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
@contextmanager
def WorkingDirectory(new_dir):
- """Get the working directory."""
- old_dir = os.getcwd()
- if old_dir != new_dir:
- msg = 'cd %s' % new_dir
- logger.GetLogger().LogCmd(msg)
- os.chdir(new_dir)
- yield new_dir
- if old_dir != new_dir:
- msg = 'cd %s' % old_dir
- logger.GetLogger().LogCmd(msg)
- os.chdir(old_dir)
+ """Get the working directory."""
+ old_dir = os.getcwd()
+ if old_dir != new_dir:
+ msg = "cd %s" % new_dir
+ logger.GetLogger().LogCmd(msg)
+ os.chdir(new_dir)
+ yield new_dir
+ if old_dir != new_dir:
+ msg = "cd %s" % old_dir
+ logger.GetLogger().LogCmd(msg)
+ os.chdir(old_dir)
def HasGitStagedChanges(git_dir):
- """Return True if git repository has staged changes."""
- command = f'cd {git_dir} && git diff --quiet --cached --exit-code HEAD'
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has staged changes."""
+ command = f"cd {git_dir} && git diff --quiet --cached --exit-code HEAD"
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def HasGitUnstagedChanges(git_dir):
- """Return True if git repository has un-staged changes."""
- command = f'cd {git_dir} && git diff --quiet --exit-code HEAD'
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has un-staged changes."""
+ command = f"cd {git_dir} && git diff --quiet --exit-code HEAD"
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def HasGitUntrackedChanges(git_dir):
- """Return True if git repository has un-tracked changes."""
- command = (f'cd {git_dir} && test -z '
- '$(git ls-files --exclude-standard --others)')
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has un-tracked changes."""
+ command = (
+ f"cd {git_dir} && test -z "
+ "$(git ls-files --exclude-standard --others)"
+ )
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def GitGetCommitHash(git_dir, commit_symbolic_name):
- """Return githash for the symbolic git commit.
+ """Return githash for the symbolic git commit.
- For example, commit_symbolic_name could be
- "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git
- hash for this symbolic name.
+ For example, commit_symbolic_name could be
+ "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git
+ hash for this symbolic name.
- Args:
- git_dir: a git working tree.
- commit_symbolic_name: a symbolic name for a particular git commit.
+ Args:
+ git_dir: a git working tree.
+ commit_symbolic_name: a symbolic name for a particular git commit.
- Returns:
- The git hash for the symbolic name or None if fails.
- """
+ Returns:
+ The git hash for the symbolic name or None if fails.
+ """
- command = (f'cd {git_dir} && git log -n 1'
- f' --pretty="format:%H" {commit_symbolic_name}')
- rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
- if rv == 0:
- return out.strip()
- return None
+ command = (
+ f"cd {git_dir} && git log -n 1"
+ f' --pretty="format:%H" {commit_symbolic_name}'
+ )
+ rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ command, print_to_console=False
+ )
+ if rv == 0:
+ return out.strip()
+ return None
def IsGitTreeClean(git_dir):
- """Test if git tree has no local changes.
+ """Test if git tree has no local changes.
- Args:
- git_dir: git tree directory.
+ Args:
+ git_dir: git tree directory.
- Returns:
- True if git dir is clean.
- """
- if HasGitStagedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has staged changes.')
- return False
- if HasGitUnstagedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has unstaged changes.')
- return False
- if HasGitUntrackedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has un-tracked changes.')
- return False
- return True
+ Returns:
+ True if git dir is clean.
+ """
+ if HasGitStagedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has staged changes.")
+ return False
+ if HasGitUnstagedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has unstaged changes.")
+ return False
+ if HasGitUntrackedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has un-tracked changes.")
+ return False
+ return True
def GetGitChangesAsList(git_dir, path=None, staged=False):
- """Get changed files as a list.
+ """Get changed files as a list.
- Args:
- git_dir: git tree directory.
- path: a relative path that is part of the tree directory, could be null.
- staged: whether to include staged files as well.
+ Args:
+ git_dir: git tree directory.
+ path: a relative path that is part of the tree directory, could be null.
+ staged: whether to include staged files as well.
- Returns:
- A list containing all the changed files.
- """
- command = f'cd {git_dir} && git diff --name-only'
- if staged:
- command += ' --cached'
- if path:
- command += ' -- ' + path
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
- rv = []
- for line in out.splitlines():
- rv.append(line)
- return rv
+ Returns:
+ A list containing all the changed files.
+ """
+ command = f"cd {git_dir} && git diff --name-only"
+ if staged:
+ command += " --cached"
+ if path:
+ command += " -- " + path
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ command, print_to_console=False
+ )
+ rv = []
+ for line in out.splitlines():
+ rv.append(line)
+ return rv
def IsChromeOsTree(chromeos_root):
- return (os.path.isdir(
- os.path.join(chromeos_root, 'src/third_party/chromiumos-overlay'))
- and os.path.isdir(os.path.join(chromeos_root, 'manifest')))
+ return os.path.isdir(
+ os.path.join(chromeos_root, "src/third_party/chromiumos-overlay")
+ ) and os.path.isdir(os.path.join(chromeos_root, "manifest"))
def DeleteChromeOsTree(chromeos_root, dry_run=False):
- """Delete a ChromeOs tree *safely*.
+ """Delete a ChromeOs tree *safely*.
- Args:
- chromeos_root: dir of the tree, could be a relative one (but be careful)
- dry_run: only prints out the command if True
+ Args:
+ chromeos_root: dir of the tree, could be a relative one (but be careful)
+ dry_run: only prints out the command if True
- Returns:
- True if everything is ok.
- """
- if not IsChromeOsTree(chromeos_root):
- logger.GetLogger().LogWarning(f'"{chromeos_root}" does not seem to be a'
- ' valid chromeos tree, do nothing.')
- return False
- cmd0 = f'cd {chromeos_root} && cros_sdk --delete'
- if dry_run:
- print(cmd0)
- else:
- if command_executer.GetCommandExecuter().RunCommand(
- cmd0, print_to_console=True) != 0:
- return False
+ Returns:
+ True if everything is ok.
+ """
+ if not IsChromeOsTree(chromeos_root):
+ logger.GetLogger().LogWarning(
+ f'"{chromeos_root}" does not seem to be a'
+ " valid chromeos tree, do nothing."
+ )
+ return False
+ cmd0 = f"cd {chromeos_root} && cros_sdk --delete"
+ if dry_run:
+ print(cmd0)
+ else:
+ if (
+ command_executer.GetCommandExecuter().RunCommand(
+ cmd0, print_to_console=True
+ )
+ != 0
+ ):
+ return False
- cmd1 = (
- f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && '
- f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && '
- 'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME')
- if dry_run:
- print(cmd1)
- return True
-
- return command_executer.GetCommandExecuter().RunCommand(
- cmd1, print_to_console=True) == 0
-
-
-def BooleanPrompt(prompt='Do you want to continue?',
- default=True,
- true_value='yes',
- false_value='no',
- prolog=None):
- """Helper function for processing boolean choice prompts.
-
- Args:
- prompt: The question to present to the user.
- default: Boolean to return if the user just presses enter.
- true_value: The text to display that represents a True returned.
- false_value: The text to display that represents a False returned.
- prolog: The text to display before prompt.
-
- Returns:
- True or False.
- """
- true_value, false_value = true_value.lower(), false_value.lower()
- true_text, false_text = true_value, false_value
- if true_value == false_value:
- raise ValueError('true_value and false_value must differ: got %r' %
- true_value)
-
- if default:
- true_text = true_text[0].upper() + true_text[1:]
- else:
- false_text = false_text[0].upper() + false_text[1:]
-
- prompt = ('\n%s (%s/%s)? ' % (prompt, true_text, false_text))
-
- if prolog:
- prompt = ('\n%s\n%s' % (prolog, prompt))
-
- while True:
- try:
- # pylint: disable=input-builtin, bad-builtin
- response = input(prompt).lower()
- except EOFError:
- # If the user hits CTRL+D, or stdin is disabled, use the default.
- print()
- response = None
- except KeyboardInterrupt:
- # If the user hits CTRL+C, just exit the process.
- print()
- print('CTRL+C detected; exiting')
- sys.exit()
-
- if not response:
- return default
- if true_value.startswith(response):
- if not false_value.startswith(response):
+ cmd1 = (
+ f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && '
+ f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && '
+ "cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME"
+ )
+ if dry_run:
+ print(cmd1)
return True
- # common prefix between the two...
- elif false_value.startswith(response):
- return False
+
+ return (
+ command_executer.GetCommandExecuter().RunCommand(
+ cmd1, print_to_console=True
+ )
+ == 0
+ )
+
+
+def BooleanPrompt(
+ prompt="Do you want to continue?",
+ default=True,
+ true_value="yes",
+ false_value="no",
+ prolog=None,
+):
+ """Helper function for processing boolean choice prompts.
+
+ Args:
+ prompt: The question to present to the user.
+ default: Boolean to return if the user just presses enter.
+ true_value: The text to display that represents a True returned.
+ false_value: The text to display that represents a False returned.
+ prolog: The text to display before prompt.
+
+ Returns:
+ True or False.
+ """
+ true_value, false_value = true_value.lower(), false_value.lower()
+ true_text, false_text = true_value, false_value
+ if true_value == false_value:
+ raise ValueError(
+ "true_value and false_value must differ: got %r" % true_value
+ )
+
+ if default:
+ true_text = true_text[0].upper() + true_text[1:]
+ else:
+ false_text = false_text[0].upper() + false_text[1:]
+
+ prompt = "\n%s (%s/%s)? " % (prompt, true_text, false_text)
+
+ if prolog:
+ prompt = "\n%s\n%s" % (prolog, prompt)
+
+ while True:
+ try:
+ # pylint: disable=input-builtin, bad-builtin
+ response = input(prompt).lower()
+ except EOFError:
+ # If the user hits CTRL+D, or stdin is disabled, use the default.
+ print()
+ response = None
+ except KeyboardInterrupt:
+ # If the user hits CTRL+C, just exit the process.
+ print()
+ print("CTRL+C detected; exiting")
+ sys.exit()
+
+ if not response:
+ return default
+ if true_value.startswith(response):
+ if not false_value.startswith(response):
+ return True
+ # common prefix between the two...
+ elif false_value.startswith(response):
+ return False
# pylint: disable=unused-argument
def rgb2short(r, g, b):
- """Converts RGB values to xterm-256 color."""
+ """Converts RGB values to xterm-256 color."""
- redcolor = [255, 124, 160, 196, 9]
- greencolor = [255, 118, 82, 46, 10]
+ redcolor = [255, 124, 160, 196, 9]
+ greencolor = [255, 118, 82, 46, 10]
- if g == 0:
- return redcolor[r // 52]
- if r == 0:
- return greencolor[g // 52]
- return 4
+ if g == 0:
+ return redcolor[r // 52]
+ if r == 0:
+ return greencolor[g // 52]
+ return 4
diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py
index 21a545e..9e2d110 100755
--- a/cros_utils/misc_test.py
+++ b/cros_utils/misc_test.py
@@ -1,14 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for misc."""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
# System modules
import unittest
@@ -18,39 +17,51 @@
class UtilsTest(unittest.TestCase):
- """Tests for misc."""
+ """Tests for misc."""
- def testGetFilenameFromString(self):
- string = 'a /b=c"d^$?\\'
- filename = misc.GetFilenameFromString(string)
- self.assertEqual(filename, 'a___bcd')
+ def testGetFilenameFromString(self):
+ string = 'a /b=c"d^$?\\'
+ filename = misc.GetFilenameFromString(string)
+ self.assertEqual(filename, "a___bcd")
- def testPrependMergeEnv(self):
- var = 'USE'
- use_flags = 'hello 123'
- added_use_flags = 'bla bla'
- env_string = '%s=%r' % (var, use_flags)
- new_env_string = misc.MergeEnvStringWithDict(env_string,
- {var: added_use_flags})
- expected_new_env = '%s=%r' % (var, ' '.join([added_use_flags, use_flags]))
- self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env]))
+ def testPrependMergeEnv(self):
+ var = "USE"
+ use_flags = "hello 123"
+ added_use_flags = "bla bla"
+ env_string = "%s=%r" % (var, use_flags)
+ new_env_string = misc.MergeEnvStringWithDict(
+ env_string, {var: added_use_flags}
+ )
+ expected_new_env = "%s=%r" % (
+ var,
+ " ".join([added_use_flags, use_flags]),
+ )
+ self.assertEqual(
+ new_env_string, " ".join([env_string, expected_new_env])
+ )
- def testGetChromeOSVersionFromLSBVersion(self):
- versions_dict = {'2630.0.0': '22', '2030.0.0': '19'}
- f = misc.GetChromeOSVersionFromLSBVersion
- for k, v in versions_dict.items():
- self.assertEqual(f(k), 'R%s-%s' % (v, k))
+ def testGetChromeOSVersionFromLSBVersion(self):
+ versions_dict = {"2630.0.0": "22", "2030.0.0": "19"}
+ f = misc.GetChromeOSVersionFromLSBVersion
+ for k, v in versions_dict.items():
+ self.assertEqual(f(k), "R%s-%s" % (v, k))
- def testPostpendMergeEnv(self):
- var = 'USE'
- use_flags = 'hello 123'
- added_use_flags = 'bla bla'
- env_string = '%s=%r' % (var, use_flags)
- new_env_string = misc.MergeEnvStringWithDict(env_string,
- {var: added_use_flags}, False)
- expected_new_env = '%s=%r' % (var, ' '.join([use_flags, added_use_flags]))
- self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env]))
+ def testPostpendMergeEnv(self):
+ var = "USE"
+ use_flags = "hello 123"
+ added_use_flags = "bla bla"
+ env_string = "%s=%r" % (var, use_flags)
+ new_env_string = misc.MergeEnvStringWithDict(
+ env_string, {var: added_use_flags}, False
+ )
+ expected_new_env = "%s=%r" % (
+ var,
+ " ".join([use_flags, added_use_flags]),
+ )
+ self.assertEqual(
+ new_env_string, " ".join([env_string, expected_new_env])
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py
index 10fd960..acc90af 100755
--- a/cros_utils/no_pseudo_terminal_test.py
+++ b/cros_utils/no_pseudo_terminal_test.py
@@ -1,64 +1,64 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test to ensure we're not touching /dev/ptmx when running commands."""
-from __future__ import print_function
import os
import subprocess
import tempfile
import time
import unittest
+
from cros_utils import command_executer
class NoPsuedoTerminalTest(unittest.TestCase):
- """Test to ensure we're not touching /dev/ptmx when running commands."""
+ """Test to ensure we're not touching /dev/ptmx when running commands."""
- _strace_process = None
- STRACE_TIMEOUT = 10
+ _strace_process = None
+ STRACE_TIMEOUT = 10
- def _AttachStraceToSelf(self, output_file):
- """Attaches strace to the current process."""
- args = ['sudo', 'strace', '-o', output_file, '-p', str(os.getpid())]
- print(args)
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp)
- # Wait until we see some activity.
- start_time = time.time()
- while time.time() - start_time < self.STRACE_TIMEOUT:
- if os.path.isfile(output_file) and open(output_file).read(1):
- return True
- time.sleep(1)
- return False
+ def _AttachStraceToSelf(self, output_file):
+ """Attaches strace to the current process."""
+ args = ["sudo", "strace", "-o", output_file, "-p", str(os.getpid())]
+ print(args)
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp)
+ # Wait until we see some activity.
+ start_time = time.time()
+ while time.time() - start_time < self.STRACE_TIMEOUT:
+ if os.path.isfile(output_file) and open(output_file).read(1):
+ return True
+ time.sleep(1)
+ return False
- def _KillStraceProcess(self):
- """Kills strace that was started by _AttachStraceToSelf()."""
- pgid = os.getpgid(self._strace_process.pid)
- args = ['sudo', 'kill', str(pgid)]
- if subprocess.call(args) == 0:
- os.waitpid(pgid, 0)
- return True
- return False
+ def _KillStraceProcess(self):
+ """Kills strace that was started by _AttachStraceToSelf()."""
+ pgid = os.getpgid(self._strace_process.pid)
+ args = ["sudo", "kill", str(pgid)]
+ if subprocess.call(args) == 0:
+ os.waitpid(pgid, 0)
+ return True
+ return False
- def testNoPseudoTerminalWhenRunningCommand(self):
- """Test to make sure we're not touching /dev/ptmx when running commands."""
- temp_file = tempfile.mktemp()
- self.assertTrue(self._AttachStraceToSelf(temp_file))
+ def testNoPseudoTerminalWhenRunningCommand(self):
+ """Test to make sure we're not touching /dev/ptmx when running commands."""
+ temp_file = tempfile.mktemp()
+ self.assertTrue(self._AttachStraceToSelf(temp_file))
- ce = command_executer.GetCommandExecuter()
- ce.RunCommand('echo')
+ ce = command_executer.GetCommandExecuter()
+ ce.RunCommand("echo")
- self.assertTrue(self._KillStraceProcess())
+ self.assertTrue(self._KillStraceProcess())
- strace_contents = open(temp_file).read()
- self.assertFalse('/dev/ptmx' in strace_contents)
+ strace_contents = open(temp_file).read()
+ self.assertFalse("/dev/ptmx" in strace_contents)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py
index b8ddb0c..6647b76 100755
--- a/cros_utils/perf_diff.py
+++ b/cros_utils/perf_diff.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,9 +9,8 @@
A detailed description of perf_diff.
"""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
import argparse
import functools
@@ -21,319 +20,338 @@
from cros_utils import misc
from cros_utils import tabulator
-ROWS_TO_SHOW = 'Rows_to_show_in_the_perf_table'
-TOTAL_EVENTS = 'Total_events_of_this_profile'
+
+ROWS_TO_SHOW = "Rows_to_show_in_the_perf_table"
+TOTAL_EVENTS = "Total_events_of_this_profile"
def GetPerfDictFromReport(report_file):
- output = {}
- perf_report = PerfReport(report_file)
- for k, v in perf_report.sections.items():
- if k not in output:
- output[k] = {}
- output[k][ROWS_TO_SHOW] = 0
- output[k][TOTAL_EVENTS] = 0
- for function in v.functions:
- out_key = '%s' % (function.name)
- output[k][out_key] = function.count
- output[k][TOTAL_EVENTS] += function.count
- if function.percent > 1:
- output[k][ROWS_TO_SHOW] += 1
- return output
+ output = {}
+ perf_report = PerfReport(report_file)
+ for k, v in perf_report.sections.items():
+ if k not in output:
+ output[k] = {}
+ output[k][ROWS_TO_SHOW] = 0
+ output[k][TOTAL_EVENTS] = 0
+ for function in v.functions:
+ out_key = "%s" % (function.name)
+ output[k][out_key] = function.count
+ output[k][TOTAL_EVENTS] += function.count
+ if function.percent > 1:
+ output[k][ROWS_TO_SHOW] += 1
+ return output
def _SortDictionaryByValue(d):
- l = d.items()
+ l = d.items()
- def GetFloat(x):
- if misc.IsFloat(x):
- return float(x)
- else:
- return x
+ def GetFloat(x):
+ if misc.IsFloat(x):
+ return float(x)
+ else:
+ return x
- sorted_l = sorted(l, key=lambda x: GetFloat(x[1]))
- sorted_l.reverse()
- return [f[0] for f in sorted_l]
+ sorted_l = sorted(l, key=lambda x: GetFloat(x[1]))
+ sorted_l.reverse()
+ return [f[0] for f in sorted_l]
class Tabulator(object):
- """Make tables."""
+ """Make tables."""
- def __init__(self, all_dicts):
- self._all_dicts = all_dicts
+ def __init__(self, all_dicts):
+ self._all_dicts = all_dicts
- def PrintTable(self):
- for dicts in self._all_dicts:
- self.PrintTableHelper(dicts)
+ def PrintTable(self):
+ for dicts in self._all_dicts:
+ self.PrintTableHelper(dicts)
- def PrintTableHelper(self, dicts):
- """Transfrom dicts to tables."""
- fields = {}
- for d in dicts:
- for f in d.keys():
- if f not in fields:
- fields[f] = d[f]
- else:
- fields[f] = max(fields[f], d[f])
- table = []
- header = ['name']
- for i in range(len(dicts)):
- header.append(i)
+ def PrintTableHelper(self, dicts):
+ """Transfrom dicts to tables."""
+ fields = {}
+ for d in dicts:
+ for f in d.keys():
+ if f not in fields:
+ fields[f] = d[f]
+ else:
+ fields[f] = max(fields[f], d[f])
+ table = []
+ header = ["name"]
+ for i in range(len(dicts)):
+ header.append(i)
- table.append(header)
+ table.append(header)
- sorted_fields = _SortDictionaryByValue(fields)
+ sorted_fields = _SortDictionaryByValue(fields)
- for f in sorted_fields:
- row = [f]
- for d in dicts:
- if f in d:
- row.append(d[f])
- else:
- row.append('0')
- table.append(row)
+ for f in sorted_fields:
+ row = [f]
+ for d in dicts:
+ if f in d:
+ row.append(d[f])
+ else:
+ row.append("0")
+ table.append(row)
- print(tabulator.GetSimpleTable(table))
+ print(tabulator.GetSimpleTable(table))
class Function(object):
- """Function for formatting."""
+ """Function for formatting."""
- def __init__(self):
- self.count = 0
- self.name = ''
- self.percent = 0
+ def __init__(self):
+ self.count = 0
+ self.name = ""
+ self.percent = 0
class Section(object):
- """Section formatting."""
+ """Section formatting."""
- def __init__(self, contents):
- self.name = ''
- self.raw_contents = contents
- self._ParseSection()
+ def __init__(self, contents):
+ self.name = ""
+ self.raw_contents = contents
+ self._ParseSection()
- def _ParseSection(self):
- matches = re.findall(r'Events: (\w+)\s+(.*)', self.raw_contents)
- assert len(matches) <= 1, 'More than one event found in 1 section'
- if not matches:
- return
- match = matches[0]
- self.name = match[1]
- self.count = misc.UnitToNumber(match[0])
+ def _ParseSection(self):
+ matches = re.findall(r"Events: (\w+)\s+(.*)", self.raw_contents)
+ assert len(matches) <= 1, "More than one event found in 1 section"
+ if not matches:
+ return
+ match = matches[0]
+ self.name = match[1]
+ self.count = misc.UnitToNumber(match[0])
- self.functions = []
- for line in self.raw_contents.splitlines():
- if not line.strip():
- continue
- if '%' not in line:
- continue
- if not line.startswith('#'):
- fields = [f for f in line.split(' ') if f]
- function = Function()
- function.percent = float(fields[0].strip('%'))
- function.count = int(fields[1])
- function.name = ' '.join(fields[2:])
- self.functions.append(function)
+ self.functions = []
+ for line in self.raw_contents.splitlines():
+ if not line.strip():
+ continue
+ if "%" not in line:
+ continue
+ if not line.startswith("#"):
+ fields = [f for f in line.split(" ") if f]
+ function = Function()
+ function.percent = float(fields[0].strip("%"))
+ function.count = int(fields[1])
+ function.name = " ".join(fields[2:])
+ self.functions.append(function)
class PerfReport(object):
- """Get report from raw report."""
+ """Get report from raw report."""
- def __init__(self, perf_file):
- self.perf_file = perf_file
- self._ReadFile()
- self.sections = {}
- self.metadata = {}
- self._section_contents = []
- self._section_header = ''
- self._SplitSections()
- self._ParseSections()
- self._ParseSectionHeader()
+ def __init__(self, perf_file):
+ self.perf_file = perf_file
+ self._ReadFile()
+ self.sections = {}
+ self.metadata = {}
+ self._section_contents = []
+ self._section_header = ""
+ self._SplitSections()
+ self._ParseSections()
+ self._ParseSectionHeader()
- def _ParseSectionHeader(self):
- """Parse a header of a perf report file."""
- # The "captured on" field is inaccurate - this actually refers to when the
- # report was generated, not when the data was captured.
- for line in self._section_header.splitlines():
- line = line[2:]
- if ':' in line:
- key, val = line.strip().split(':', 1)
- key = key.strip()
- val = val.strip()
- self.metadata[key] = val
+ def _ParseSectionHeader(self):
+ """Parse a header of a perf report file."""
+ # The "captured on" field is inaccurate - this actually refers to when the
+ # report was generated, not when the data was captured.
+ for line in self._section_header.splitlines():
+ line = line[2:]
+ if ":" in line:
+ key, val = line.strip().split(":", 1)
+ key = key.strip()
+ val = val.strip()
+ self.metadata[key] = val
- def _ReadFile(self):
- self._perf_contents = open(self.perf_file).read()
+ def _ReadFile(self):
+ self._perf_contents = open(self.perf_file).read()
- def _ParseSections(self):
- self.event_counts = {}
- self.sections = {}
- for section_content in self._section_contents:
- section = Section(section_content)
- section.name = self._GetHumanReadableName(section.name)
- self.sections[section.name] = section
+ def _ParseSections(self):
+ self.event_counts = {}
+ self.sections = {}
+ for section_content in self._section_contents:
+ section = Section(section_content)
+ section.name = self._GetHumanReadableName(section.name)
+ self.sections[section.name] = section
- # TODO(asharif): Do this better.
- def _GetHumanReadableName(self, section_name):
- if not 'raw' in section_name:
- return section_name
- raw_number = section_name.strip().split(' ')[-1]
- for line in self._section_header.splitlines():
- if raw_number in line:
- name = line.strip().split(' ')[5]
- return name
+ # TODO(asharif): Do this better.
+ def _GetHumanReadableName(self, section_name):
+ if not "raw" in section_name:
+ return section_name
+ raw_number = section_name.strip().split(" ")[-1]
+ for line in self._section_header.splitlines():
+ if raw_number in line:
+ name = line.strip().split(" ")[5]
+ return name
- def _SplitSections(self):
- self._section_contents = []
- indices = [m.start() for m in re.finditer('# Events:', self._perf_contents)]
- indices.append(len(self._perf_contents))
- for i in range(len(indices) - 1):
- section_content = self._perf_contents[indices[i]:indices[i + 1]]
- self._section_contents.append(section_content)
- self._section_header = ''
- if indices:
- self._section_header = self._perf_contents[0:indices[0]]
+ def _SplitSections(self):
+ self._section_contents = []
+ indices = [
+ m.start() for m in re.finditer("# Events:", self._perf_contents)
+ ]
+ indices.append(len(self._perf_contents))
+ for i in range(len(indices) - 1):
+ section_content = self._perf_contents[indices[i] : indices[i + 1]]
+ self._section_contents.append(section_content)
+ self._section_header = ""
+ if indices:
+ self._section_header = self._perf_contents[0 : indices[0]]
class PerfDiffer(object):
- """Perf differ class."""
+ """Perf differ class."""
- def __init__(self, reports, num_symbols, common_only):
- self._reports = reports
- self._num_symbols = num_symbols
- self._common_only = common_only
- self._common_function_names = {}
+ def __init__(self, reports, num_symbols, common_only):
+ self._reports = reports
+ self._num_symbols = num_symbols
+ self._common_only = common_only
+ self._common_function_names = {}
- def DoDiff(self):
- """The function that does the diff."""
- section_names = self._FindAllSections()
+ def DoDiff(self):
+ """The function that does the diff."""
+ section_names = self._FindAllSections()
- filename_dicts = []
- summary_dicts = []
- for report in self._reports:
- d = {}
- filename_dicts.append({'file': report.perf_file})
- for section_name in section_names:
- if section_name in report.sections:
- d[section_name] = report.sections[section_name].count
- summary_dicts.append(d)
+ filename_dicts = []
+ summary_dicts = []
+ for report in self._reports:
+ d = {}
+ filename_dicts.append({"file": report.perf_file})
+ for section_name in section_names:
+ if section_name in report.sections:
+ d[section_name] = report.sections[section_name].count
+ summary_dicts.append(d)
- all_dicts = [filename_dicts, summary_dicts]
+ all_dicts = [filename_dicts, summary_dicts]
- for section_name in section_names:
- function_names = self._GetTopFunctions(section_name, self._num_symbols)
- self._FindCommonFunctions(section_name)
- dicts = []
- for report in self._reports:
+ for section_name in section_names:
+ function_names = self._GetTopFunctions(
+ section_name, self._num_symbols
+ )
+ self._FindCommonFunctions(section_name)
+ dicts = []
+ for report in self._reports:
+ d = {}
+ if section_name in report.sections:
+ section = report.sections[section_name]
+
+ # Get a common scaling factor for this report.
+ common_scaling_factor = self._GetCommonScalingFactor(
+ section
+ )
+
+ for function in section.functions:
+ if function.name in function_names:
+ key = "%s %s" % (section.name, function.name)
+ d[key] = function.count
+ # Compute a factor to scale the function count by in common_only
+ # mode.
+ if self._common_only and (
+ function.name
+ in self._common_function_names[section.name]
+ ):
+ d[key + " scaled"] = (
+ common_scaling_factor * function.count
+ )
+ dicts.append(d)
+
+ all_dicts.append(dicts)
+
+ mytabulator = Tabulator(all_dicts)
+ mytabulator.PrintTable()
+
+ def _FindAllSections(self):
+ sections = {}
+ for report in self._reports:
+ for section in report.sections.values():
+ if section.name not in sections:
+ sections[section.name] = section.count
+ else:
+ sections[section.name] = max(
+ sections[section.name], section.count
+ )
+ return _SortDictionaryByValue(sections)
+
+ def _GetCommonScalingFactor(self, section):
+ unique_count = self._GetCount(
+ section, lambda x: x in self._common_function_names[section.name]
+ )
+ return 100.0 / unique_count
+
+ def _GetCount(self, section, filter_fun=None):
+ total_count = 0
+ for function in section.functions:
+ if not filter_fun or filter_fun(function.name):
+ total_count += int(function.count)
+ return total_count
+
+ def _FindCommonFunctions(self, section_name):
+ function_names_list = []
+ for report in self._reports:
+ if section_name in report.sections:
+ section = report.sections[section_name]
+ function_names = {f.name for f in section.functions}
+ function_names_list.append(function_names)
+
+ self._common_function_names[section_name] = functools.reduce(
+ set.intersection, function_names_list
+ )
+
+ def _GetTopFunctions(self, section_name, num_functions):
+ all_functions = {}
+ for report in self._reports:
+ if section_name in report.sections:
+ section = report.sections[section_name]
+ for f in section.functions[:num_functions]:
+ if f.name in all_functions:
+ all_functions[f.name] = max(
+ all_functions[f.name], f.count
+ )
+ else:
+ all_functions[f.name] = f.count
+ # FIXME(asharif): Don't really need to sort these...
+ return _SortDictionaryByValue(all_functions)
+
+ def _GetFunctionsDict(self, section, function_names):
d = {}
- if section_name in report.sections:
- section = report.sections[section_name]
-
- # Get a common scaling factor for this report.
- common_scaling_factor = self._GetCommonScalingFactor(section)
-
- for function in section.functions:
+ for function in section.functions:
if function.name in function_names:
- key = '%s %s' % (section.name, function.name)
- d[key] = function.count
- # Compute a factor to scale the function count by in common_only
- # mode.
- if self._common_only and (
- function.name in self._common_function_names[section.name]):
- d[key + ' scaled'] = common_scaling_factor * function.count
- dicts.append(d)
-
- all_dicts.append(dicts)
-
- mytabulator = Tabulator(all_dicts)
- mytabulator.PrintTable()
-
- def _FindAllSections(self):
- sections = {}
- for report in self._reports:
- for section in report.sections.values():
- if section.name not in sections:
- sections[section.name] = section.count
- else:
- sections[section.name] = max(sections[section.name], section.count)
- return _SortDictionaryByValue(sections)
-
- def _GetCommonScalingFactor(self, section):
- unique_count = self._GetCount(
- section, lambda x: x in self._common_function_names[section.name])
- return 100.0 / unique_count
-
- def _GetCount(self, section, filter_fun=None):
- total_count = 0
- for function in section.functions:
- if not filter_fun or filter_fun(function.name):
- total_count += int(function.count)
- return total_count
-
- def _FindCommonFunctions(self, section_name):
- function_names_list = []
- for report in self._reports:
- if section_name in report.sections:
- section = report.sections[section_name]
- function_names = {f.name for f in section.functions}
- function_names_list.append(function_names)
-
- self._common_function_names[section_name] = (
- functools.reduce(set.intersection, function_names_list))
-
- def _GetTopFunctions(self, section_name, num_functions):
- all_functions = {}
- for report in self._reports:
- if section_name in report.sections:
- section = report.sections[section_name]
- for f in section.functions[:num_functions]:
- if f.name in all_functions:
- all_functions[f.name] = max(all_functions[f.name], f.count)
- else:
- all_functions[f.name] = f.count
- # FIXME(asharif): Don't really need to sort these...
- return _SortDictionaryByValue(all_functions)
-
- def _GetFunctionsDict(self, section, function_names):
- d = {}
- for function in section.functions:
- if function.name in function_names:
- d[function.name] = function.count
- return d
+ d[function.name] = function.count
+ return d
def Main(argv):
- """The entry of the main."""
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-n',
- '--num_symbols',
- dest='num_symbols',
- default='5',
- help='The number of symbols to show.')
- parser.add_argument(
- '-c',
- '--common_only',
- dest='common_only',
- action='store_true',
- default=False,
- help='Diff common symbols only.')
+ """The entry of the main."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-n",
+ "--num_symbols",
+ dest="num_symbols",
+ default="5",
+ help="The number of symbols to show.",
+ )
+ parser.add_argument(
+ "-c",
+ "--common_only",
+ dest="common_only",
+ action="store_true",
+ default=False,
+ help="Diff common symbols only.",
+ )
- options, args = parser.parse_known_args(argv)
+ options, args = parser.parse_known_args(argv)
- try:
- reports = []
- for report in args[1:]:
- report = PerfReport(report)
- reports.append(report)
- pd = PerfDiffer(reports, int(options.num_symbols), options.common_only)
- pd.DoDiff()
- finally:
- pass
+ try:
+ reports = []
+ for report in args[1:]:
+ report = PerfReport(report)
+ reports.append(report)
+ pd = PerfDiffer(reports, int(options.num_symbols), options.common_only)
+ pd.DoDiff()
+ finally:
+ pass
- return 0
+ return 0
-if __name__ == '__main__':
- sys.exit(Main(sys.argv))
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv))
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index 1a3fd4a..d079ea2 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -61,966 +61,1024 @@
print tp.Print()
"""
-from __future__ import division
-from __future__ import print_function
import collections
import getpass
import math
import statistics
import sys
+
+from cros_utils import misc
+from cros_utils.email_sender import EmailSender
+
# TODO(crbug.com/980719): Drop scipy in the future.
# pylint: disable=import-error
import scipy
-from cros_utils.email_sender import EmailSender
-from cros_utils import misc
-
def _AllFloat(values):
- return all([misc.IsFloat(v) for v in values])
+ return all([misc.IsFloat(v) for v in values])
def _GetFloats(values):
- return [float(v) for v in values]
+ return [float(v) for v in values]
def _StripNone(results):
- res = []
- for result in results:
- if result is not None:
- res.append(result)
- return res
+ res = []
+ for result in results:
+ if result is not None:
+ res.append(result)
+ return res
def _RemoveMinMax(cell, values):
- if len(values) < 3:
- print('WARNING: Values count is less than 3, not ignoring min/max values')
- print('WARNING: Cell name:', cell.name, 'Values:', values)
- return values
+ if len(values) < 3:
+ print(
+ "WARNING: Values count is less than 3, not ignoring min/max values"
+ )
+ print("WARNING: Cell name:", cell.name, "Values:", values)
+ return values
- values.remove(min(values))
- values.remove(max(values))
- return values
+ values.remove(min(values))
+ values.remove(max(values))
+ return values
class TableGenerator(object):
- """Creates a table from a list of list of dicts.
+ """Creates a table from a list of list of dicts.
- The main public function is called GetTable().
- """
- SORT_BY_KEYS = 0
- SORT_BY_KEYS_DESC = 1
- SORT_BY_VALUES = 2
- SORT_BY_VALUES_DESC = 3
- NO_SORT = 4
-
- MISSING_VALUE = 'x'
-
- def __init__(self, d, l, sort=NO_SORT, key_name='keys'):
- self._runs = d
- self._labels = l
- self._sort = sort
- self._key_name = key_name
-
- def _AggregateKeys(self):
- keys = collections.OrderedDict()
- for run_list in self._runs:
- for run in run_list:
- keys.update(dict.fromkeys(run.keys()))
- return list(keys.keys())
-
- def _GetHighestValue(self, key):
- values = []
- for run_list in self._runs:
- for run in run_list:
- if key in run:
- values.append(run[key])
- values = _StripNone(values)
- if _AllFloat(values):
- values = _GetFloats(values)
- return max(values)
-
- def _GetLowestValue(self, key):
- values = []
- for run_list in self._runs:
- for run in run_list:
- if key in run:
- values.append(run[key])
- values = _StripNone(values)
- if _AllFloat(values):
- values = _GetFloats(values)
- return min(values)
-
- def _SortKeys(self, keys):
- if self._sort == self.SORT_BY_KEYS:
- return sorted(keys)
- elif self._sort == self.SORT_BY_VALUES:
- # pylint: disable=unnecessary-lambda
- return sorted(keys, key=lambda x: self._GetLowestValue(x))
- elif self._sort == self.SORT_BY_VALUES_DESC:
- # pylint: disable=unnecessary-lambda
- return sorted(keys, key=lambda x: self._GetHighestValue(x), reverse=True)
- elif self._sort == self.NO_SORT:
- return keys
- else:
- assert 0, 'Unimplemented sort %s' % self._sort
-
- def _GetKeys(self):
- keys = self._AggregateKeys()
- return self._SortKeys(keys)
-
- def GetTable(self, number_of_rows=sys.maxsize):
- """Returns a table from a list of list of dicts.
-
- Examples:
- We have the following runs:
- [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}],
- [{"k1": "v4", "k4": "v5"}]]
- and the following labels:
- ["vanilla", "modified"]
- it will return:
- [["Key", "vanilla", "modified"]
- ["k1", ["v1", "v3"], ["v4"]]
- ["k2", ["v2"], []]
- ["k4", [], ["v5"]]]
- The returned table can then be processed further by other classes in this
- module.
-
- The list of list of dicts is passed into the constructor of TableGenerator.
- This method converts that into a canonical list of lists which represents a
- table of values.
-
- Args:
- number_of_rows: Maximum number of rows to return from the table.
-
- Returns:
- A list of lists which is the table.
+ The main public function is called GetTable().
"""
- keys = self._GetKeys()
- header = [self._key_name] + self._labels
- table = [header]
- rows = 0
- for k in keys:
- row = [k]
- unit = None
- for run_list in self._runs:
- v = []
- for run in run_list:
- if k in run:
- if isinstance(run[k], list):
- val = run[k][0]
- unit = run[k][1]
- else:
- val = run[k]
- v.append(val)
- else:
- v.append(None)
- row.append(v)
- # If we got a 'unit' value, append the units name to the key name.
- if unit:
- keyname = row[0] + ' (%s) ' % unit
- row[0] = keyname
- table.append(row)
- rows += 1
- if rows == number_of_rows:
- break
- return table
+
+ SORT_BY_KEYS = 0
+ SORT_BY_KEYS_DESC = 1
+ SORT_BY_VALUES = 2
+ SORT_BY_VALUES_DESC = 3
+ NO_SORT = 4
+
+ MISSING_VALUE = "x"
+
+ def __init__(self, d, l, sort=NO_SORT, key_name="keys"):
+ self._runs = d
+ self._labels = l
+ self._sort = sort
+ self._key_name = key_name
+
+ def _AggregateKeys(self):
+ keys = collections.OrderedDict()
+ for run_list in self._runs:
+ for run in run_list:
+ keys.update(dict.fromkeys(run.keys()))
+ return list(keys.keys())
+
+ def _GetHighestValue(self, key):
+ values = []
+ for run_list in self._runs:
+ for run in run_list:
+ if key in run:
+ values.append(run[key])
+ values = _StripNone(values)
+ if _AllFloat(values):
+ values = _GetFloats(values)
+ return max(values)
+
+ def _GetLowestValue(self, key):
+ values = []
+ for run_list in self._runs:
+ for run in run_list:
+ if key in run:
+ values.append(run[key])
+ values = _StripNone(values)
+ if _AllFloat(values):
+ values = _GetFloats(values)
+ return min(values)
+
+ def _SortKeys(self, keys):
+ if self._sort == self.SORT_BY_KEYS:
+ return sorted(keys)
+ elif self._sort == self.SORT_BY_VALUES:
+ # pylint: disable=unnecessary-lambda
+ return sorted(keys, key=lambda x: self._GetLowestValue(x))
+ elif self._sort == self.SORT_BY_VALUES_DESC:
+ # pylint: disable=unnecessary-lambda
+ return sorted(
+ keys, key=lambda x: self._GetHighestValue(x), reverse=True
+ )
+ elif self._sort == self.NO_SORT:
+ return keys
+ else:
+ assert 0, "Unimplemented sort %s" % self._sort
+
+ def _GetKeys(self):
+ keys = self._AggregateKeys()
+ return self._SortKeys(keys)
+
+ def GetTable(self, number_of_rows=sys.maxsize):
+ """Returns a table from a list of list of dicts.
+
+ Examples:
+ We have the following runs:
+ [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}],
+ [{"k1": "v4", "k4": "v5"}]]
+ and the following labels:
+ ["vanilla", "modified"]
+ it will return:
+ [["Key", "vanilla", "modified"]
+ ["k1", ["v1", "v3"], ["v4"]]
+ ["k2", ["v2"], []]
+ ["k4", [], ["v5"]]]
+ The returned table can then be processed further by other classes in this
+ module.
+
+ The list of list of dicts is passed into the constructor of TableGenerator.
+ This method converts that into a canonical list of lists which represents a
+ table of values.
+
+ Args:
+ number_of_rows: Maximum number of rows to return from the table.
+
+ Returns:
+ A list of lists which is the table.
+ """
+ keys = self._GetKeys()
+ header = [self._key_name] + self._labels
+ table = [header]
+ rows = 0
+ for k in keys:
+ row = [k]
+ unit = None
+ for run_list in self._runs:
+ v = []
+ for run in run_list:
+ if k in run:
+ if isinstance(run[k], list):
+ val = run[k][0]
+ unit = run[k][1]
+ else:
+ val = run[k]
+ v.append(val)
+ else:
+ v.append(None)
+ row.append(v)
+ # If we got a 'unit' value, append the units name to the key name.
+ if unit:
+ keyname = row[0] + " (%s) " % unit
+ row[0] = keyname
+ table.append(row)
+ rows += 1
+ if rows == number_of_rows:
+ break
+ return table
class SamplesTableGenerator(TableGenerator):
- """Creates a table with only samples from the results
+ """Creates a table with only samples from the results
- The main public function is called GetTable().
+ The main public function is called GetTable().
- Different than TableGenerator, self._runs is now a dict of {benchmark: runs}
- We are expecting there is 'samples' in `runs`.
- """
-
- def __init__(self, run_keyvals, label_list, iter_counts, weights):
- TableGenerator.__init__(
- self, run_keyvals, label_list, key_name='Benchmarks')
- self._iter_counts = iter_counts
- self._weights = weights
-
- def _GetKeys(self):
- keys = self._runs.keys()
- return self._SortKeys(keys)
-
- def GetTable(self, number_of_rows=sys.maxsize):
- """Returns a tuple, which contains three args:
-
- 1) a table from a list of list of dicts.
- 2) updated benchmark_results run_keyvals with composite benchmark
- 3) updated benchmark_results iter_count with composite benchmark
-
- The dict of list of list of dicts is passed into the constructor of
- SamplesTableGenerator.
- This method converts that into a canonical list of lists which
- represents a table of values.
-
- Examples:
- We have the following runs:
- {bench1: [[{"samples": "v1"}, {"samples": "v2"}],
- [{"samples": "v3"}, {"samples": "v4"}]]
- bench2: [[{"samples": "v21"}, None],
- [{"samples": "v22"}, {"samples": "v23"}]]}
- and weights of benchmarks:
- {bench1: w1, bench2: w2}
- and the following labels:
- ["vanilla", "modified"]
- it will return:
- [["Benchmark", "Weights", "vanilla", "modified"]
- ["bench1", w1,
- ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])]
- ["bench2", w2,
- ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])]
- ["Composite Benchmark", N/A,
- ((1, 1), ["v1*w1+v21*w2", None]),
- ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- The returned table can then be processed further by other classes in this
- module.
-
- Args:
- number_of_rows: Maximum number of rows to return from the table.
-
- Returns:
- A list of lists which is the table.
+ Different than TableGenerator, self._runs is now a dict of {benchmark: runs}
+ We are expecting there is 'samples' in `runs`.
"""
- keys = self._GetKeys()
- header = [self._key_name, 'Weights'] + self._labels
- table = [header]
- rows = 0
- iterations = 0
- for k in keys:
- bench_runs = self._runs[k]
- unit = None
- all_runs_empty = all(not dict for label in bench_runs for dict in label)
- if all_runs_empty:
- cell = Cell()
- cell.string_value = ('Benchmark %s contains no result.'
- ' Is the benchmark name valid?' % k)
- table.append([cell])
- else:
- row = [k]
- row.append(self._weights[k])
- for run_list in bench_runs:
- run_pass = 0
- run_fail = 0
- v = []
- for run in run_list:
- if 'samples' in run:
- if isinstance(run['samples'], list):
- val = run['samples'][0] * self._weights[k]
- unit = run['samples'][1]
- else:
- val = run['samples'] * self._weights[k]
- v.append(val)
- run_pass += 1
+ def __init__(self, run_keyvals, label_list, iter_counts, weights):
+ TableGenerator.__init__(
+ self, run_keyvals, label_list, key_name="Benchmarks"
+ )
+ self._iter_counts = iter_counts
+ self._weights = weights
+
+ def _GetKeys(self):
+ keys = self._runs.keys()
+ return self._SortKeys(keys)
+
+ def GetTable(self, number_of_rows=sys.maxsize):
+ """Returns a tuple, which contains three args:
+
+ 1) a table from a list of list of dicts.
+ 2) updated benchmark_results run_keyvals with composite benchmark
+ 3) updated benchmark_results iter_count with composite benchmark
+
+ The dict of list of list of dicts is passed into the constructor of
+ SamplesTableGenerator.
+ This method converts that into a canonical list of lists which
+ represents a table of values.
+
+ Examples:
+ We have the following runs:
+ {bench1: [[{"samples": "v1"}, {"samples": "v2"}],
+ [{"samples": "v3"}, {"samples": "v4"}]]
+ bench2: [[{"samples": "v21"}, None],
+ [{"samples": "v22"}, {"samples": "v23"}]]}
+ and weights of benchmarks:
+ {bench1: w1, bench2: w2}
+ and the following labels:
+ ["vanilla", "modified"]
+ it will return:
+ [["Benchmark", "Weights", "vanilla", "modified"]
+ ["bench1", w1,
+ ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])]
+ ["bench2", w2,
+ ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])]
+ ["Composite Benchmark", N/A,
+ ((1, 1), ["v1*w1+v21*w2", None]),
+ ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
+ The returned table can then be processed further by other classes in this
+ module.
+
+ Args:
+ number_of_rows: Maximum number of rows to return from the table.
+
+ Returns:
+ A list of lists which is the table.
+ """
+ keys = self._GetKeys()
+ header = [self._key_name, "Weights"] + self._labels
+ table = [header]
+ rows = 0
+ iterations = 0
+
+ for k in keys:
+ bench_runs = self._runs[k]
+ unit = None
+ all_runs_empty = all(
+ not dict for label in bench_runs for dict in label
+ )
+ if all_runs_empty:
+ cell = Cell()
+ cell.string_value = (
+ "Benchmark %s contains no result."
+ " Is the benchmark name valid?" % k
+ )
+ table.append([cell])
else:
- v.append(None)
- run_fail += 1
- one_tuple = ((run_pass, run_fail), v)
- if iterations not in (0, run_pass + run_fail):
- raise ValueError('Iterations of each benchmark run ' \
- 'are not the same')
- iterations = run_pass + run_fail
- row.append(one_tuple)
- if unit:
- keyname = row[0] + ' (%s) ' % unit
- row[0] = keyname
- table.append(row)
- rows += 1
- if rows == number_of_rows:
- break
+ row = [k]
+ row.append(self._weights[k])
+ for run_list in bench_runs:
+ run_pass = 0
+ run_fail = 0
+ v = []
+ for run in run_list:
+ if "samples" in run:
+ if isinstance(run["samples"], list):
+ val = run["samples"][0] * self._weights[k]
+ unit = run["samples"][1]
+ else:
+ val = run["samples"] * self._weights[k]
+ v.append(val)
+ run_pass += 1
+ else:
+ v.append(None)
+ run_fail += 1
+ one_tuple = ((run_pass, run_fail), v)
+ if iterations not in (0, run_pass + run_fail):
+ raise ValueError(
+ "Iterations of each benchmark run "
+ "are not the same"
+ )
+ iterations = run_pass + run_fail
+ row.append(one_tuple)
+ if unit:
+ keyname = row[0] + " (%s) " % unit
+ row[0] = keyname
+ table.append(row)
+ rows += 1
+ if rows == number_of_rows:
+ break
- k = 'Composite Benchmark'
- if k in keys:
- raise RuntimeError('Composite benchmark already exists in results')
+ k = "Composite Benchmark"
+ if k in keys:
+ raise RuntimeError("Composite benchmark already exists in results")
- # Create a new composite benchmark row at the bottom of the summary table
- # The new row will be like the format in example:
- # ["Composite Benchmark", N/A,
- # ((1, 1), ["v1*w1+v21*w2", None]),
- # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- # First we will create a row of [key, weight, [[0] * iterations] * labels]
- row = [None] * len(header)
- row[0] = '%s (samples)' % k
- row[1] = 'N/A'
- for label_index in range(2, len(row)):
- row[label_index] = [0] * iterations
-
- for cur_row in table[1:]:
- # Iterate through each benchmark
- if len(cur_row) > 1:
- for label_index in range(2, len(cur_row)):
- # Iterate through each run in a single benchmark
- # each result should look like ((pass, fail), [values_list])
- bench_runs = cur_row[label_index][1]
- for index in range(iterations):
- # Accumulate each run result to composite benchmark run
- # If any run fails, then we set this run for composite benchmark
- # to None so that we know it fails.
- if bench_runs[index] and row[label_index][index] is not None:
- row[label_index][index] += bench_runs[index]
- else:
- row[label_index][index] = None
- else:
- # One benchmark totally fails, no valid data will be in final result
+ # Create a new composite benchmark row at the bottom of the summary table
+ # The new row will be like the format in example:
+ # ["Composite Benchmark", N/A,
+ # ((1, 1), ["v1*w1+v21*w2", None]),
+ # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
+ # First we will create a row of [key, weight, [[0] * iterations] * labels]
+ row = [None] * len(header)
+ row[0] = "%s (samples)" % k
+ row[1] = "N/A"
for label_index in range(2, len(row)):
- row[label_index] = [None] * iterations
- break
- # Calculate pass and fail count for composite benchmark
- for label_index in range(2, len(row)):
- run_pass = 0
- run_fail = 0
- for run in row[label_index]:
- if run:
- run_pass += 1
- else:
- run_fail += 1
- row[label_index] = ((run_pass, run_fail), row[label_index])
- table.append(row)
+ row[label_index] = [0] * iterations
- # Now that we have the table genearted, we want to store this new composite
- # benchmark into the benchmark_result in ResultReport object.
- # This will be used to generate a full table which contains our composite
- # benchmark.
- # We need to create composite benchmark result and add it to keyvals in
- # benchmark_results.
- v = []
- for label in row[2:]:
- # each label's result looks like ((pass, fail), [values])
- benchmark_runs = label[1]
- # List of values of each label
- single_run_list = []
- for run in benchmark_runs:
- # Result of each run under the same label is a dict of keys.
- # Here the only key we will add for composite benchmark is the
- # weighted_samples we added up.
- one_dict = {}
- if run:
- one_dict[u'weighted_samples'] = [run, u'samples']
- one_dict['retval'] = 0
- else:
- one_dict['retval'] = 1
- single_run_list.append(one_dict)
- v.append(single_run_list)
+ for cur_row in table[1:]:
+ # Iterate through each benchmark
+ if len(cur_row) > 1:
+ for label_index in range(2, len(cur_row)):
+ # Iterate through each run in a single benchmark
+ # each result should look like ((pass, fail), [values_list])
+ bench_runs = cur_row[label_index][1]
+ for index in range(iterations):
+ # Accumulate each run result to composite benchmark run
+ # If any run fails, then we set this run for composite benchmark
+ # to None so that we know it fails.
+ if (
+ bench_runs[index]
+ and row[label_index][index] is not None
+ ):
+ row[label_index][index] += bench_runs[index]
+ else:
+ row[label_index][index] = None
+ else:
+ # One benchmark totally fails, no valid data will be in final result
+ for label_index in range(2, len(row)):
+ row[label_index] = [None] * iterations
+ break
+ # Calculate pass and fail count for composite benchmark
+ for label_index in range(2, len(row)):
+ run_pass = 0
+ run_fail = 0
+ for run in row[label_index]:
+ if run:
+ run_pass += 1
+ else:
+ run_fail += 1
+ row[label_index] = ((run_pass, run_fail), row[label_index])
+ table.append(row)
- self._runs[k] = v
- self._iter_counts[k] = iterations
+ # Now that we have the table genearted, we want to store this new composite
+ # benchmark into the benchmark_result in ResultReport object.
+ # This will be used to generate a full table which contains our composite
+ # benchmark.
+ # We need to create composite benchmark result and add it to keyvals in
+ # benchmark_results.
+ v = []
+ for label in row[2:]:
+ # each label's result looks like ((pass, fail), [values])
+ benchmark_runs = label[1]
+ # List of values of each label
+ single_run_list = []
+ for run in benchmark_runs:
+ # Result of each run under the same label is a dict of keys.
+ # Here the only key we will add for composite benchmark is the
+ # weighted_samples we added up.
+ one_dict = {}
+ if run:
+ one_dict[u"weighted_samples"] = [run, u"samples"]
+ one_dict["retval"] = 0
+ else:
+ one_dict["retval"] = 1
+ single_run_list.append(one_dict)
+ v.append(single_run_list)
- return (table, self._runs, self._iter_counts)
+ self._runs[k] = v
+ self._iter_counts[k] = iterations
+
+ return (table, self._runs, self._iter_counts)
class Result(object):
- """A class that respresents a single result.
+ """A class that respresents a single result.
- This single result is obtained by condensing the information from a list of
- runs and a list of baseline runs.
- """
-
- def __init__(self):
- pass
-
- def _AllStringsSame(self, values):
- values_set = set(values)
- return len(values_set) == 1
-
- def NeedsBaseline(self):
- return False
-
- # pylint: disable=unused-argument
- def _Literal(self, cell, values, baseline_values):
- cell.value = ' '.join([str(v) for v in values])
-
- def _ComputeFloat(self, cell, values, baseline_values):
- self._Literal(cell, values, baseline_values)
-
- def _ComputeString(self, cell, values, baseline_values):
- self._Literal(cell, values, baseline_values)
-
- def _InvertIfLowerIsBetter(self, cell):
- pass
-
- def _GetGmean(self, values):
- if not values:
- return float('nan')
- if any([v < 0 for v in values]):
- return float('nan')
- if any([v == 0 for v in values]):
- return 0.0
- log_list = [math.log(v) for v in values]
- gmean_log = sum(log_list) / len(log_list)
- return math.exp(gmean_log)
-
- def Compute(self, cell, values, baseline_values):
- """Compute the result given a list of values and baseline values.
-
- Args:
- cell: A cell data structure to populate.
- values: List of values.
- baseline_values: List of baseline values. Can be none if this is the
- baseline itself.
+ This single result is obtained by condensing the information from a list of
+ runs and a list of baseline runs.
"""
- all_floats = True
- values = _StripNone(values)
- if not values:
- cell.value = ''
- return
- if _AllFloat(values):
- float_values = _GetFloats(values)
- else:
- all_floats = False
- if baseline_values:
- baseline_values = _StripNone(baseline_values)
- if baseline_values:
- if _AllFloat(baseline_values):
- float_baseline_values = _GetFloats(baseline_values)
- else:
- all_floats = False
- else:
- if self.NeedsBaseline():
- cell.value = ''
- return
- float_baseline_values = None
- if all_floats:
- self._ComputeFloat(cell, float_values, float_baseline_values)
- self._InvertIfLowerIsBetter(cell)
- else:
- self._ComputeString(cell, values, baseline_values)
+
+ def __init__(self):
+ pass
+
+ def _AllStringsSame(self, values):
+ values_set = set(values)
+ return len(values_set) == 1
+
+ def NeedsBaseline(self):
+ return False
+
+ # pylint: disable=unused-argument
+ def _Literal(self, cell, values, baseline_values):
+ cell.value = " ".join([str(v) for v in values])
+
+ def _ComputeFloat(self, cell, values, baseline_values):
+ self._Literal(cell, values, baseline_values)
+
+ def _ComputeString(self, cell, values, baseline_values):
+ self._Literal(cell, values, baseline_values)
+
+ def _InvertIfLowerIsBetter(self, cell):
+ pass
+
+ def _GetGmean(self, values):
+ if not values:
+ return float("nan")
+ if any([v < 0 for v in values]):
+ return float("nan")
+ if any([v == 0 for v in values]):
+ return 0.0
+ log_list = [math.log(v) for v in values]
+ gmean_log = sum(log_list) / len(log_list)
+ return math.exp(gmean_log)
+
+ def Compute(self, cell, values, baseline_values):
+ """Compute the result given a list of values and baseline values.
+
+ Args:
+ cell: A cell data structure to populate.
+ values: List of values.
+ baseline_values: List of baseline values. Can be none if this is the
+ baseline itself.
+ """
+ all_floats = True
+ values = _StripNone(values)
+ if not values:
+ cell.value = ""
+ return
+ if _AllFloat(values):
+ float_values = _GetFloats(values)
+ else:
+ all_floats = False
+ if baseline_values:
+ baseline_values = _StripNone(baseline_values)
+ if baseline_values:
+ if _AllFloat(baseline_values):
+ float_baseline_values = _GetFloats(baseline_values)
+ else:
+ all_floats = False
+ else:
+ if self.NeedsBaseline():
+ cell.value = ""
+ return
+ float_baseline_values = None
+ if all_floats:
+ self._ComputeFloat(cell, float_values, float_baseline_values)
+ self._InvertIfLowerIsBetter(cell)
+ else:
+ self._ComputeString(cell, values, baseline_values)
class LiteralResult(Result):
- """A literal result."""
+ """A literal result."""
- def __init__(self, iteration=0):
- super(LiteralResult, self).__init__()
- self.iteration = iteration
+ def __init__(self, iteration=0):
+ super(LiteralResult, self).__init__()
+ self.iteration = iteration
- def Compute(self, cell, values, baseline_values):
- try:
- cell.value = values[self.iteration]
- except IndexError:
- cell.value = '-'
+ def Compute(self, cell, values, baseline_values):
+ try:
+ cell.value = values[self.iteration]
+ except IndexError:
+ cell.value = "-"
class NonEmptyCountResult(Result):
- """A class that counts the number of non-empty results.
+ """A class that counts the number of non-empty results.
- The number of non-empty values will be stored in the cell.
- """
-
- def Compute(self, cell, values, baseline_values):
- """Put the number of non-empty values in the cell result.
-
- Args:
- cell: Put the result in cell.value.
- values: A list of values for the row.
- baseline_values: A list of baseline values for the row.
+ The number of non-empty values will be stored in the cell.
"""
- cell.value = len(_StripNone(values))
- if not baseline_values:
- return
- base_value = len(_StripNone(baseline_values))
- if cell.value == base_value:
- return
- f = ColorBoxFormat()
- len_values = len(values)
- len_baseline_values = len(baseline_values)
- tmp_cell = Cell()
- tmp_cell.value = 1.0 + (
- float(cell.value - base_value) / (max(len_values, len_baseline_values)))
- f.Compute(tmp_cell)
- cell.bgcolor = tmp_cell.bgcolor
+
+ def Compute(self, cell, values, baseline_values):
+ """Put the number of non-empty values in the cell result.
+
+ Args:
+ cell: Put the result in cell.value.
+ values: A list of values for the row.
+ baseline_values: A list of baseline values for the row.
+ """
+ cell.value = len(_StripNone(values))
+ if not baseline_values:
+ return
+ base_value = len(_StripNone(baseline_values))
+ if cell.value == base_value:
+ return
+ f = ColorBoxFormat()
+ len_values = len(values)
+ len_baseline_values = len(baseline_values)
+ tmp_cell = Cell()
+ tmp_cell.value = 1.0 + (
+ float(cell.value - base_value)
+ / (max(len_values, len_baseline_values))
+ )
+ f.Compute(tmp_cell)
+ cell.bgcolor = tmp_cell.bgcolor
class StringMeanResult(Result):
- """Mean of string values."""
+ """Mean of string values."""
- def _ComputeString(self, cell, values, baseline_values):
- if self._AllStringsSame(values):
- cell.value = str(values[0])
- else:
- cell.value = '?'
+ def _ComputeString(self, cell, values, baseline_values):
+ if self._AllStringsSame(values):
+ cell.value = str(values[0])
+ else:
+ cell.value = "?"
class AmeanResult(StringMeanResult):
- """Arithmetic mean."""
+ """Arithmetic mean."""
- def __init__(self, ignore_min_max=False):
- super(AmeanResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(AmeanResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- cell.value = statistics.mean(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ cell.value = statistics.mean(values)
class RawResult(Result):
- """Raw result."""
+ """Raw result."""
class IterationResult(Result):
- """Iteration result."""
+ """Iteration result."""
class MinResult(Result):
- """Minimum."""
+ """Minimum."""
- def _ComputeFloat(self, cell, values, baseline_values):
- cell.value = min(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ cell.value = min(values)
- def _ComputeString(self, cell, values, baseline_values):
- if values:
- cell.value = min(values)
- else:
- cell.value = ''
+ def _ComputeString(self, cell, values, baseline_values):
+ if values:
+ cell.value = min(values)
+ else:
+ cell.value = ""
class MaxResult(Result):
- """Maximum."""
+ """Maximum."""
- def _ComputeFloat(self, cell, values, baseline_values):
- cell.value = max(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ cell.value = max(values)
- def _ComputeString(self, cell, values, baseline_values):
- if values:
- cell.value = max(values)
- else:
- cell.value = ''
+ def _ComputeString(self, cell, values, baseline_values):
+ if values:
+ cell.value = max(values)
+ else:
+ cell.value = ""
class NumericalResult(Result):
- """Numerical result."""
+ """Numerical result."""
- def _ComputeString(self, cell, values, baseline_values):
- cell.value = '?'
+ def _ComputeString(self, cell, values, baseline_values):
+ cell.value = "?"
class StdResult(NumericalResult):
- """Standard deviation."""
+ """Standard deviation."""
- def __init__(self, ignore_min_max=False):
- super(StdResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(StdResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- cell.value = statistics.pstdev(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ cell.value = statistics.pstdev(values)
class CoeffVarResult(NumericalResult):
- """Standard deviation / Mean"""
+ """Standard deviation / Mean"""
- def __init__(self, ignore_min_max=False):
- super(CoeffVarResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(CoeffVarResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- if statistics.mean(values) != 0.0:
- noise = abs(statistics.pstdev(values) / statistics.mean(values))
- else:
- noise = 0.0
- cell.value = noise
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ if statistics.mean(values) != 0.0:
+ noise = abs(statistics.pstdev(values) / statistics.mean(values))
+ else:
+ noise = 0.0
+ cell.value = noise
class ComparisonResult(Result):
- """Same or Different."""
+ """Same or Different."""
- def NeedsBaseline(self):
- return True
+ def NeedsBaseline(self):
+ return True
- def _ComputeString(self, cell, values, baseline_values):
- value = None
- baseline_value = None
- if self._AllStringsSame(values):
- value = values[0]
- if self._AllStringsSame(baseline_values):
- baseline_value = baseline_values[0]
- if value is not None and baseline_value is not None:
- if value == baseline_value:
- cell.value = 'SAME'
- else:
- cell.value = 'DIFFERENT'
- else:
- cell.value = '?'
+ def _ComputeString(self, cell, values, baseline_values):
+ value = None
+ baseline_value = None
+ if self._AllStringsSame(values):
+ value = values[0]
+ if self._AllStringsSame(baseline_values):
+ baseline_value = baseline_values[0]
+ if value is not None and baseline_value is not None:
+ if value == baseline_value:
+ cell.value = "SAME"
+ else:
+ cell.value = "DIFFERENT"
+ else:
+ cell.value = "?"
class PValueResult(ComparisonResult):
- """P-value."""
+ """P-value."""
- def __init__(self, ignore_min_max=False):
- super(PValueResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(PValueResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
- if len(values) < 2 or len(baseline_values) < 2:
- cell.value = float('nan')
- return
- _, cell.value = scipy.stats.ttest_ind(values, baseline_values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
+ if len(values) < 2 or len(baseline_values) < 2:
+ cell.value = float("nan")
+ return
+ _, cell.value = scipy.stats.ttest_ind(values, baseline_values)
- def _ComputeString(self, cell, values, baseline_values):
- return float('nan')
+ def _ComputeString(self, cell, values, baseline_values):
+ return float("nan")
class KeyAwareComparisonResult(ComparisonResult):
- """Automatic key aware comparison."""
+ """Automatic key aware comparison."""
- def _IsLowerBetter(self, key):
- # Units in histograms should include directions
- if 'smallerIsBetter' in key:
- return True
- if 'biggerIsBetter' in key:
- return False
+ def _IsLowerBetter(self, key):
+ # Units in histograms should include directions
+ if "smallerIsBetter" in key:
+ return True
+ if "biggerIsBetter" in key:
+ return False
- # For units in chartjson:
- # TODO(llozano): Trying to guess direction by looking at the name of the
- # test does not seem like a good idea. Test frameworks should provide this
- # info explicitly. I believe Telemetry has this info. Need to find it out.
- #
- # Below are some test names for which we are not sure what the
- # direction is.
- #
- # For these we dont know what the direction is. But, since we dont
- # specify anything, crosperf will assume higher is better:
- # --percent_impl_scrolled--percent_impl_scrolled--percent
- # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count
- # --total_image_cache_hit_count--total_image_cache_hit_count--count
- # --total_texture_upload_time_by_url
- #
- # About these we are doubtful but we made a guess:
- # --average_num_missing_tiles_by_url--*--units (low is good)
- # --experimental_mean_frame_time_by_url--*--units (low is good)
- # --experimental_median_frame_time_by_url--*--units (low is good)
- # --texture_upload_count--texture_upload_count--count (high is good)
- # --total_deferred_image_decode_count--count (low is good)
- # --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
- lower_is_better_keys = [
- 'milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes', 'wrbytes',
- 'dropped_percent', '(ms)', '(seconds)', '--ms',
- '--average_num_missing_tiles', '--experimental_jank',
- '--experimental_mean_frame', '--experimental_median_frame_time',
- '--total_deferred_image_decode_count', '--seconds', 'samples', 'bytes'
- ]
+ # For units in chartjson:
+ # TODO(llozano): Trying to guess direction by looking at the name of the
+ # test does not seem like a good idea. Test frameworks should provide this
+ # info explicitly. I believe Telemetry has this info. Need to find it out.
+ #
+ # Below are some test names for which we are not sure what the
+ # direction is.
+ #
+ # For these we dont know what the direction is. But, since we dont
+ # specify anything, crosperf will assume higher is better:
+ # --percent_impl_scrolled--percent_impl_scrolled--percent
+ # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count
+ # --total_image_cache_hit_count--total_image_cache_hit_count--count
+ # --total_texture_upload_time_by_url
+ #
+ # About these we are doubtful but we made a guess:
+ # --average_num_missing_tiles_by_url--*--units (low is good)
+ # --experimental_mean_frame_time_by_url--*--units (low is good)
+ # --experimental_median_frame_time_by_url--*--units (low is good)
+ # --texture_upload_count--texture_upload_count--count (high is good)
+ # --total_deferred_image_decode_count--count (low is good)
+ # --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
+ lower_is_better_keys = [
+ "milliseconds",
+ "ms_",
+ "seconds_",
+ "KB",
+ "rdbytes",
+ "wrbytes",
+ "dropped_percent",
+ "(ms)",
+ "(seconds)",
+ "--ms",
+ "--average_num_missing_tiles",
+ "--experimental_jank",
+ "--experimental_mean_frame",
+ "--experimental_median_frame_time",
+ "--total_deferred_image_decode_count",
+ "--seconds",
+ "samples",
+ "bytes",
+ ]
- return any([l in key for l in lower_is_better_keys])
+ return any([l in key for l in lower_is_better_keys])
- def _InvertIfLowerIsBetter(self, cell):
- if self._IsLowerBetter(cell.name):
- if cell.value:
- cell.value = 1.0 / cell.value
+ def _InvertIfLowerIsBetter(self, cell):
+ if self._IsLowerBetter(cell.name):
+ if cell.value:
+ cell.value = 1.0 / cell.value
class AmeanRatioResult(KeyAwareComparisonResult):
- """Ratio of arithmetic means of values vs. baseline values."""
+ """Ratio of arithmetic means of values vs. baseline values."""
- def __init__(self, ignore_min_max=False):
- super(AmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(AmeanRatioResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
- baseline_mean = statistics.mean(baseline_values)
- values_mean = statistics.mean(values)
- if baseline_mean != 0:
- cell.value = values_mean / baseline_mean
- elif values_mean != 0:
- cell.value = 0.00
- # cell.value = 0 means the values and baseline_values have big difference
- else:
- cell.value = 1.00
- # no difference if both values and baseline_values are 0
+ baseline_mean = statistics.mean(baseline_values)
+ values_mean = statistics.mean(values)
+ if baseline_mean != 0:
+ cell.value = values_mean / baseline_mean
+ elif values_mean != 0:
+ cell.value = 0.00
+ # cell.value = 0 means the values and baseline_values have big difference
+ else:
+ cell.value = 1.00
+ # no difference if both values and baseline_values are 0
class GmeanRatioResult(KeyAwareComparisonResult):
- """Ratio of geometric means of values vs. baseline values."""
+ """Ratio of geometric means of values vs. baseline values."""
- def __init__(self, ignore_min_max=False):
- super(GmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(GmeanRatioResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
- if self._GetGmean(baseline_values) != 0:
- cell.value = self._GetGmean(values) / self._GetGmean(baseline_values)
- elif self._GetGmean(values) != 0:
- cell.value = 0.00
- else:
- cell.value = 1.00
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
+ if self._GetGmean(baseline_values) != 0:
+ cell.value = self._GetGmean(values) / self._GetGmean(
+ baseline_values
+ )
+ elif self._GetGmean(values) != 0:
+ cell.value = 0.00
+ else:
+ cell.value = 1.00
class Color(object):
- """Class that represents color in RGBA format."""
+ """Class that represents color in RGBA format."""
- def __init__(self, r=0, g=0, b=0, a=0):
- self.r = r
- self.g = g
- self.b = b
- self.a = a
+ def __init__(self, r=0, g=0, b=0, a=0):
+ self.r = r
+ self.g = g
+ self.b = b
+ self.a = a
- def __str__(self):
- return 'r: %s g: %s: b: %s: a: %s' % (self.r, self.g, self.b, self.a)
+ def __str__(self):
+ return "r: %s g: %s: b: %s: a: %s" % (self.r, self.g, self.b, self.a)
- def Round(self):
- """Round RGBA values to the nearest integer."""
- self.r = int(self.r)
- self.g = int(self.g)
- self.b = int(self.b)
- self.a = int(self.a)
+ def Round(self):
+ """Round RGBA values to the nearest integer."""
+ self.r = int(self.r)
+ self.g = int(self.g)
+ self.b = int(self.b)
+ self.a = int(self.a)
- def GetRGB(self):
- """Get a hex representation of the color."""
- return '%02x%02x%02x' % (self.r, self.g, self.b)
+ def GetRGB(self):
+ """Get a hex representation of the color."""
+ return "%02x%02x%02x" % (self.r, self.g, self.b)
- @classmethod
- def Lerp(cls, ratio, a, b):
- """Perform linear interpolation between two colors.
+ @classmethod
+ def Lerp(cls, ratio, a, b):
+ """Perform linear interpolation between two colors.
- Args:
- ratio: The ratio to use for linear polation.
- a: The first color object (used when ratio is 0).
- b: The second color object (used when ratio is 1).
+ Args:
+ ratio: The ratio to use for linear polation.
+ a: The first color object (used when ratio is 0).
+ b: The second color object (used when ratio is 1).
- Returns:
- Linearly interpolated color.
- """
- ret = cls()
- ret.r = (b.r - a.r) * ratio + a.r
- ret.g = (b.g - a.g) * ratio + a.g
- ret.b = (b.b - a.b) * ratio + a.b
- ret.a = (b.a - a.a) * ratio + a.a
- return ret
+ Returns:
+ Linearly interpolated color.
+ """
+ ret = cls()
+ ret.r = (b.r - a.r) * ratio + a.r
+ ret.g = (b.g - a.g) * ratio + a.g
+ ret.b = (b.b - a.b) * ratio + a.b
+ ret.a = (b.a - a.a) * ratio + a.a
+ return ret
class Format(object):
- """A class that represents the format of a column."""
+ """A class that represents the format of a column."""
- def __init__(self):
- pass
+ def __init__(self):
+ pass
- def Compute(self, cell):
- """Computes the attributes of a cell based on its value.
+ def Compute(self, cell):
+ """Computes the attributes of a cell based on its value.
- Attributes typically are color, width, etc.
+ Attributes typically are color, width, etc.
- Args:
- cell: The cell whose attributes are to be populated.
- """
- if cell.value is None:
- cell.string_value = ''
- if isinstance(cell.value, float):
- self._ComputeFloat(cell)
- else:
- self._ComputeString(cell)
+ Args:
+ cell: The cell whose attributes are to be populated.
+ """
+ if cell.value is None:
+ cell.string_value = ""
+ if isinstance(cell.value, float):
+ self._ComputeFloat(cell)
+ else:
+ self._ComputeString(cell)
- def _ComputeFloat(self, cell):
- cell.string_value = '{0:.2f}'.format(cell.value)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "{0:.2f}".format(cell.value)
- def _ComputeString(self, cell):
- cell.string_value = str(cell.value)
+ def _ComputeString(self, cell):
+ cell.string_value = str(cell.value)
- def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0):
- min_value = 0.0
- max_value = 2.0
- if math.isnan(value):
- return mid
- if value > mid_value:
- value = max_value - mid_value / value
+ def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0):
+ min_value = 0.0
+ max_value = 2.0
+ if math.isnan(value):
+ return mid
+ if value > mid_value:
+ value = max_value - mid_value / value
- return self._GetColorBetweenRange(value, min_value, mid_value, max_value,
- low, mid, high, power)
+ return self._GetColorBetweenRange(
+ value, min_value, mid_value, max_value, low, mid, high, power
+ )
- def _GetColorBetweenRange(self, value, min_value, mid_value, max_value,
- low_color, mid_color, high_color, power):
- assert value <= max_value
- assert value >= min_value
- if value > mid_value:
- value = (max_value - value) / (max_value - mid_value)
- value **= power
- ret = Color.Lerp(value, high_color, mid_color)
- else:
- value = (value - min_value) / (mid_value - min_value)
- value **= power
- ret = Color.Lerp(value, low_color, mid_color)
- ret.Round()
- return ret
+ def _GetColorBetweenRange(
+ self,
+ value,
+ min_value,
+ mid_value,
+ max_value,
+ low_color,
+ mid_color,
+ high_color,
+ power,
+ ):
+ assert value <= max_value
+ assert value >= min_value
+ if value > mid_value:
+ value = (max_value - value) / (max_value - mid_value)
+ value **= power
+ ret = Color.Lerp(value, high_color, mid_color)
+ else:
+ value = (value - min_value) / (mid_value - min_value)
+ value **= power
+ ret = Color.Lerp(value, low_color, mid_color)
+ ret.Round()
+ return ret
class PValueFormat(Format):
- """Formatting for p-value."""
+ """Formatting for p-value."""
- def _ComputeFloat(self, cell):
- cell.string_value = '%0.2f' % float(cell.value)
- if float(cell.value) < 0.05:
- cell.bgcolor = self._GetColor(
- cell.value,
- Color(255, 255, 0, 0),
- Color(255, 255, 255, 0),
- Color(255, 255, 255, 0),
- mid_value=0.05,
- power=1)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%0.2f" % float(cell.value)
+ if float(cell.value) < 0.05:
+ cell.bgcolor = self._GetColor(
+ cell.value,
+ Color(255, 255, 0, 0),
+ Color(255, 255, 255, 0),
+ Color(255, 255, 255, 0),
+ mid_value=0.05,
+ power=1,
+ )
class WeightFormat(Format):
- """Formatting for weight in cwp mode."""
+ """Formatting for weight in cwp mode."""
- def _ComputeFloat(self, cell):
- cell.string_value = '%0.4f' % float(cell.value)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%0.4f" % float(cell.value)
class StorageFormat(Format):
- """Format the cell as a storage number.
+ """Format the cell as a storage number.
- Examples:
- If the cell contains a value of 1024, the string_value will be 1.0K.
- """
+ Examples:
+ If the cell contains a value of 1024, the string_value will be 1.0K.
+ """
- def _ComputeFloat(self, cell):
- base = 1024
- suffices = ['K', 'M', 'G']
- v = float(cell.value)
- current = 0
- while v >= base**(current + 1) and current < len(suffices):
- current += 1
+ def _ComputeFloat(self, cell):
+ base = 1024
+ suffices = ["K", "M", "G"]
+ v = float(cell.value)
+ current = 0
+ while v >= base ** (current + 1) and current < len(suffices):
+ current += 1
- if current:
- divisor = base**current
- cell.string_value = '%1.1f%s' % ((v / divisor), suffices[current - 1])
- else:
- cell.string_value = str(cell.value)
+ if current:
+ divisor = base ** current
+ cell.string_value = "%1.1f%s" % (
+ (v / divisor),
+ suffices[current - 1],
+ )
+ else:
+ cell.string_value = str(cell.value)
class CoeffVarFormat(Format):
- """Format the cell as a percent.
+ """Format the cell as a percent.
- Examples:
- If the cell contains a value of 1.5, the string_value will be +150%.
- """
+ Examples:
+ If the cell contains a value of 1.5, the string_value will be +150%.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%1.1f%%' % (float(cell.value) * 100)
- cell.color = self._GetColor(
- cell.value,
- Color(0, 255, 0, 0),
- Color(0, 0, 0, 0),
- Color(255, 0, 0, 0),
- mid_value=0.02,
- power=1)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%1.1f%%" % (float(cell.value) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(0, 255, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(255, 0, 0, 0),
+ mid_value=0.02,
+ power=1,
+ )
class PercentFormat(Format):
- """Format the cell as a percent.
+ """Format the cell as a percent.
- Examples:
- If the cell contains a value of 1.5, the string_value will be +50%.
- """
+ Examples:
+ If the cell contains a value of 1.5, the string_value will be +50%.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(0, 0, 0, 0), Color(0, 255, 0, 0))
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%+1.1f%%" % ((float(cell.value) - 1) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(0, 255, 0, 0),
+ )
class RatioFormat(Format):
- """Format the cell as a ratio.
+ """Format the cell as a ratio.
- Examples:
- If the cell contains a value of 1.5642, the string_value will be 1.56.
- """
+ Examples:
+ If the cell contains a value of 1.5642, the string_value will be 1.56.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(0, 0, 0, 0), Color(0, 255, 0, 0))
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%+1.1f%%" % ((cell.value - 1) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(0, 255, 0, 0),
+ )
class ColorBoxFormat(Format):
- """Format the cell as a color box.
+ """Format the cell as a color box.
- Examples:
- If the cell contains a value of 1.5, it will get a green color.
- If the cell contains a value of 0.5, it will get a red color.
- The intensity of the green/red will be determined by how much above or below
- 1.0 the value is.
- """
+ Examples:
+ If the cell contains a value of 1.5, it will get a green color.
+ If the cell contains a value of 0.5, it will get a red color.
+ The intensity of the green/red will be determined by how much above or below
+ 1.0 the value is.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '--'
- bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(255, 255, 255, 0), Color(0, 255, 0, 0))
- cell.bgcolor = bgcolor
- cell.color = bgcolor
+ def _ComputeFloat(self, cell):
+ cell.string_value = "--"
+ bgcolor = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(255, 255, 255, 0),
+ Color(0, 255, 0, 0),
+ )
+ cell.bgcolor = bgcolor
+ cell.color = bgcolor
class Cell(object):
- """A class to represent a cell in a table.
+ """A class to represent a cell in a table.
- Attributes:
- value: The raw value of the cell.
- color: The color of the cell.
- bgcolor: The background color of the cell.
- string_value: The string value of the cell.
- suffix: A string suffix to be attached to the value when displaying.
- prefix: A string prefix to be attached to the value when displaying.
- color_row: Indicates whether the whole row is to inherit this cell's color.
- bgcolor_row: Indicates whether the whole row is to inherit this cell's
- bgcolor.
- width: Optional specifier to make a column narrower than the usual width.
- The usual width of a column is the max of all its cells widths.
- colspan: Set the colspan of the cell in the HTML table, this is used for
- table headers. Default value is 1.
- name: the test name of the cell.
- header: Whether this is a header in html.
- """
+ Attributes:
+ value: The raw value of the cell.
+ color: The color of the cell.
+ bgcolor: The background color of the cell.
+ string_value: The string value of the cell.
+ suffix: A string suffix to be attached to the value when displaying.
+ prefix: A string prefix to be attached to the value when displaying.
+ color_row: Indicates whether the whole row is to inherit this cell's color.
+ bgcolor_row: Indicates whether the whole row is to inherit this cell's
+ bgcolor.
+ width: Optional specifier to make a column narrower than the usual width.
+ The usual width of a column is the max of all its cells widths.
+ colspan: Set the colspan of the cell in the HTML table, this is used for
+ table headers. Default value is 1.
+ name: the test name of the cell.
+ header: Whether this is a header in html.
+ """
- def __init__(self):
- self.value = None
- self.color = None
- self.bgcolor = None
- self.string_value = None
- self.suffix = None
- self.prefix = None
- # Entire row inherits this color.
- self.color_row = False
- self.bgcolor_row = False
- self.width = 0
- self.colspan = 1
- self.name = None
- self.header = False
+ def __init__(self):
+ self.value = None
+ self.color = None
+ self.bgcolor = None
+ self.string_value = None
+ self.suffix = None
+ self.prefix = None
+ # Entire row inherits this color.
+ self.color_row = False
+ self.bgcolor_row = False
+ self.width = 0
+ self.colspan = 1
+ self.name = None
+ self.header = False
- def __str__(self):
- l = []
- l.append('value: %s' % self.value)
- l.append('string_value: %s' % self.string_value)
- return ' '.join(l)
+ def __str__(self):
+ l = []
+ l.append("value: %s" % self.value)
+ l.append("string_value: %s" % self.string_value)
+ return " ".join(l)
class Column(object):
- """Class representing a column in a table.
+ """Class representing a column in a table.
- Attributes:
- result: an object of the Result class.
- fmt: an object of the Format class.
- """
+ Attributes:
+ result: an object of the Result class.
+ fmt: an object of the Format class.
+ """
- def __init__(self, result, fmt, name=''):
- self.result = result
- self.fmt = fmt
- self.name = name
+ def __init__(self, result, fmt, name=""):
+ self.result = result
+ self.fmt = fmt
+ self.name = name
# Takes in:
@@ -1033,536 +1091,561 @@
# ["k", avg("v", "v2"), stddev("v", "v2"), etc.]]
# according to format string
class TableFormatter(object):
- """Class to convert a plain table into a cell-table.
+ """Class to convert a plain table into a cell-table.
- This class takes in a table generated by TableGenerator and a list of column
- formats to apply to the table and returns a table of cells.
- """
-
- def __init__(self, table, columns, samples_table=False):
- """The constructor takes in a table and a list of columns.
-
- Args:
- table: A list of lists of values.
- columns: A list of column containing what to produce and how to format
- it.
- samples_table: A flag to check whether we are generating a table of
- samples in CWP apporximation mode.
+ This class takes in a table generated by TableGenerator and a list of column
+ formats to apply to the table and returns a table of cells.
"""
- self._table = table
- self._columns = columns
- self._samples_table = samples_table
- self._table_columns = []
- self._out_table = []
- def GenerateCellTable(self, table_type):
- row_index = 0
- all_failed = False
+ def __init__(self, table, columns, samples_table=False):
+ """The constructor takes in a table and a list of columns.
- for row in self._table[1:]:
- # If we are generating samples_table, the second value will be weight
- # rather than values.
- start_col = 2 if self._samples_table else 1
- # It does not make sense to put retval in the summary table.
- if str(row[0]) == 'retval' and table_type == 'summary':
- # Check to see if any runs passed, and update all_failed.
- all_failed = True
- for values in row[start_col:]:
- if 0 in values:
- all_failed = False
- continue
- key = Cell()
- key.string_value = str(row[0])
- out_row = [key]
- if self._samples_table:
- # Add one column for weight if in samples_table mode
- weight = Cell()
- weight.value = row[1]
- f = WeightFormat()
- f.Compute(weight)
- out_row.append(weight)
- baseline = None
- for results in row[start_col:]:
- column_start = 0
- values = None
- # If generating sample table, we will split a tuple of iterations info
- # from the results
- if isinstance(results, tuple):
- it, values = results
- column_start = 1
- cell = Cell()
- cell.string_value = '[%d: %d]' % (it[0], it[1])
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(self._columns[0])
- else:
- values = results
- # Parse each column
- for column in self._columns[column_start:]:
- cell = Cell()
- cell.name = key.string_value
- if not column.result.NeedsBaseline() or baseline is not None:
- column.result.Compute(cell, values, baseline)
- column.fmt.Compute(cell)
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(column)
+ Args:
+ table: A list of lists of values.
+ columns: A list of column containing what to produce and how to format
+ it.
+ samples_table: A flag to check whether we are generating a table of
+ samples in CWP apporximation mode.
+ """
+ self._table = table
+ self._columns = columns
+ self._samples_table = samples_table
+ self._table_columns = []
+ self._out_table = []
- if baseline is None:
- baseline = values
- self._out_table.append(out_row)
- row_index += 1
+ def GenerateCellTable(self, table_type):
+ row_index = 0
+ all_failed = False
- # If this is a summary table, and the only row in it is 'retval', and
- # all the test runs failed, we need to a 'Results' row to the output
- # table.
- if table_type == 'summary' and all_failed and len(self._table) == 2:
- labels_row = self._table[0]
- key = Cell()
- key.string_value = 'Results'
- out_row = [key]
- baseline = None
- for _ in labels_row[1:]:
- for column in self._columns:
- cell = Cell()
- cell.name = key.string_value
- column.result.Compute(cell, ['Fail'], baseline)
- column.fmt.Compute(cell)
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(column)
- self._out_table.append(out_row)
+ for row in self._table[1:]:
+ # If we are generating samples_table, the second value will be weight
+ # rather than values.
+ start_col = 2 if self._samples_table else 1
+ # It does not make sense to put retval in the summary table.
+ if str(row[0]) == "retval" and table_type == "summary":
+ # Check to see if any runs passed, and update all_failed.
+ all_failed = True
+ for values in row[start_col:]:
+ if 0 in values:
+ all_failed = False
+ continue
+ key = Cell()
+ key.string_value = str(row[0])
+ out_row = [key]
+ if self._samples_table:
+ # Add one column for weight if in samples_table mode
+ weight = Cell()
+ weight.value = row[1]
+ f = WeightFormat()
+ f.Compute(weight)
+ out_row.append(weight)
+ baseline = None
+ for results in row[start_col:]:
+ column_start = 0
+ values = None
+ # If generating sample table, we will split a tuple of iterations info
+ # from the results
+ if isinstance(results, tuple):
+ it, values = results
+ column_start = 1
+ cell = Cell()
+ cell.string_value = "[%d: %d]" % (it[0], it[1])
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(self._columns[0])
+ else:
+ values = results
+ # Parse each column
+ for column in self._columns[column_start:]:
+ cell = Cell()
+ cell.name = key.string_value
+ if (
+ not column.result.NeedsBaseline()
+ or baseline is not None
+ ):
+ column.result.Compute(cell, values, baseline)
+ column.fmt.Compute(cell)
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(column)
- def AddColumnName(self):
- """Generate Column name at the top of table."""
- key = Cell()
- key.header = True
- key.string_value = 'Keys' if not self._samples_table else 'Benchmarks'
- header = [key]
- if self._samples_table:
- weight = Cell()
- weight.header = True
- weight.string_value = 'Weights'
- header.append(weight)
- for column in self._table_columns:
- cell = Cell()
- cell.header = True
- if column.name:
- cell.string_value = column.name
- else:
- result_name = column.result.__class__.__name__
- format_name = column.fmt.__class__.__name__
+ if baseline is None:
+ baseline = values
+ self._out_table.append(out_row)
+ row_index += 1
- cell.string_value = '%s %s' % (
- result_name.replace('Result', ''),
- format_name.replace('Format', ''),
+ # If this is a summary table, and the only row in it is 'retval', and
+ # all the test runs failed, we need to a 'Results' row to the output
+ # table.
+ if table_type == "summary" and all_failed and len(self._table) == 2:
+ labels_row = self._table[0]
+ key = Cell()
+ key.string_value = "Results"
+ out_row = [key]
+ baseline = None
+ for _ in labels_row[1:]:
+ for column in self._columns:
+ cell = Cell()
+ cell.name = key.string_value
+ column.result.Compute(cell, ["Fail"], baseline)
+ column.fmt.Compute(cell)
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(column)
+ self._out_table.append(out_row)
+
+ def AddColumnName(self):
+ """Generate Column name at the top of table."""
+ key = Cell()
+ key.header = True
+ key.string_value = "Keys" if not self._samples_table else "Benchmarks"
+ header = [key]
+ if self._samples_table:
+ weight = Cell()
+ weight.header = True
+ weight.string_value = "Weights"
+ header.append(weight)
+ for column in self._table_columns:
+ cell = Cell()
+ cell.header = True
+ if column.name:
+ cell.string_value = column.name
+ else:
+ result_name = column.result.__class__.__name__
+ format_name = column.fmt.__class__.__name__
+
+ cell.string_value = "%s %s" % (
+ result_name.replace("Result", ""),
+ format_name.replace("Format", ""),
+ )
+
+ header.append(cell)
+
+ self._out_table = [header] + self._out_table
+
+ def AddHeader(self, s):
+ """Put additional string on the top of the table."""
+ cell = Cell()
+ cell.header = True
+ cell.string_value = str(s)
+ header = [cell]
+ colspan = max(1, max(len(row) for row in self._table))
+ cell.colspan = colspan
+ self._out_table = [header] + self._out_table
+
+ def GetPassesAndFails(self, values):
+ passes = 0
+ fails = 0
+ for val in values:
+ if val == 0:
+ passes = passes + 1
+ else:
+ fails = fails + 1
+ return passes, fails
+
+ def AddLabelName(self):
+ """Put label on the top of the table."""
+ top_header = []
+ base_colspan = len(
+ [c for c in self._columns if not c.result.NeedsBaseline()]
)
+ compare_colspan = len(self._columns)
+ # Find the row with the key 'retval', if it exists. This
+ # will be used to calculate the number of iterations that passed and
+ # failed for each image label.
+ retval_row = None
+ for row in self._table:
+ if row[0] == "retval":
+ retval_row = row
+ # The label is organized as follows
+ # "keys" label_base, label_comparison1, label_comparison2
+ # The first cell has colspan 1, the second is base_colspan
+ # The others are compare_colspan
+ column_position = 0
+ for label in self._table[0]:
+ cell = Cell()
+ cell.header = True
+ # Put the number of pass/fail iterations in the image label header.
+ if column_position > 0 and retval_row:
+ retval_values = retval_row[column_position]
+ if isinstance(retval_values, list):
+ passes, fails = self.GetPassesAndFails(retval_values)
+ cell.string_value = str(label) + " (pass:%d fail:%d)" % (
+ passes,
+ fails,
+ )
+ else:
+ cell.string_value = str(label)
+ else:
+ cell.string_value = str(label)
+ if top_header:
+ if not self._samples_table or (
+ self._samples_table and len(top_header) == 2
+ ):
+ cell.colspan = base_colspan
+ if len(top_header) > 1:
+ if not self._samples_table or (
+ self._samples_table and len(top_header) > 2
+ ):
+ cell.colspan = compare_colspan
+ top_header.append(cell)
+ column_position = column_position + 1
+ self._out_table = [top_header] + self._out_table
- header.append(cell)
+ def _PrintOutTable(self):
+ o = ""
+ for row in self._out_table:
+ for cell in row:
+ o += str(cell) + " "
+ o += "\n"
+ print(o)
- self._out_table = [header] + self._out_table
+ def GetCellTable(self, table_type="full", headers=True):
+ """Function to return a table of cells.
- def AddHeader(self, s):
- """Put additional string on the top of the table."""
- cell = Cell()
- cell.header = True
- cell.string_value = str(s)
- header = [cell]
- colspan = max(1, max(len(row) for row in self._table))
- cell.colspan = colspan
- self._out_table = [header] + self._out_table
+ The table (list of lists) is converted into a table of cells by this
+ function.
- def GetPassesAndFails(self, values):
- passes = 0
- fails = 0
- for val in values:
- if val == 0:
- passes = passes + 1
- else:
- fails = fails + 1
- return passes, fails
+ Args:
+ table_type: Can be 'full' or 'summary'
+ headers: A boolean saying whether we want default headers
- def AddLabelName(self):
- """Put label on the top of the table."""
- top_header = []
- base_colspan = len(
- [c for c in self._columns if not c.result.NeedsBaseline()])
- compare_colspan = len(self._columns)
- # Find the row with the key 'retval', if it exists. This
- # will be used to calculate the number of iterations that passed and
- # failed for each image label.
- retval_row = None
- for row in self._table:
- if row[0] == 'retval':
- retval_row = row
- # The label is organized as follows
- # "keys" label_base, label_comparison1, label_comparison2
- # The first cell has colspan 1, the second is base_colspan
- # The others are compare_colspan
- column_position = 0
- for label in self._table[0]:
- cell = Cell()
- cell.header = True
- # Put the number of pass/fail iterations in the image label header.
- if column_position > 0 and retval_row:
- retval_values = retval_row[column_position]
- if isinstance(retval_values, list):
- passes, fails = self.GetPassesAndFails(retval_values)
- cell.string_value = str(label) + ' (pass:%d fail:%d)' % (passes,
- fails)
- else:
- cell.string_value = str(label)
- else:
- cell.string_value = str(label)
- if top_header:
- if not self._samples_table or (self._samples_table and
- len(top_header) == 2):
- cell.colspan = base_colspan
- if len(top_header) > 1:
- if not self._samples_table or (self._samples_table and
- len(top_header) > 2):
- cell.colspan = compare_colspan
- top_header.append(cell)
- column_position = column_position + 1
- self._out_table = [top_header] + self._out_table
-
- def _PrintOutTable(self):
- o = ''
- for row in self._out_table:
- for cell in row:
- o += str(cell) + ' '
- o += '\n'
- print(o)
-
- def GetCellTable(self, table_type='full', headers=True):
- """Function to return a table of cells.
-
- The table (list of lists) is converted into a table of cells by this
- function.
-
- Args:
- table_type: Can be 'full' or 'summary'
- headers: A boolean saying whether we want default headers
-
- Returns:
- A table of cells with each cell having the properties and string values as
- requiested by the columns passed in the constructor.
- """
- # Generate the cell table, creating a list of dynamic columns on the fly.
- if not self._out_table:
- self.GenerateCellTable(table_type)
- if headers:
- self.AddColumnName()
- self.AddLabelName()
- return self._out_table
+ Returns:
+ A table of cells with each cell having the properties and string values as
+ requiested by the columns passed in the constructor.
+ """
+ # Generate the cell table, creating a list of dynamic columns on the fly.
+ if not self._out_table:
+ self.GenerateCellTable(table_type)
+ if headers:
+ self.AddColumnName()
+ self.AddLabelName()
+ return self._out_table
class TablePrinter(object):
- """Class to print a cell table to the console, file or html."""
- PLAIN = 0
- CONSOLE = 1
- HTML = 2
- TSV = 3
- EMAIL = 4
+ """Class to print a cell table to the console, file or html."""
- def __init__(self, table, output_type):
- """Constructor that stores the cell table and output type."""
- self._table = table
- self._output_type = output_type
- self._row_styles = []
- self._column_styles = []
+ PLAIN = 0
+ CONSOLE = 1
+ HTML = 2
+ TSV = 3
+ EMAIL = 4
- # Compute whole-table properties like max-size, etc.
- def _ComputeStyle(self):
- self._row_styles = []
- for row in self._table:
- row_style = Cell()
- for cell in row:
- if cell.color_row:
- assert cell.color, 'Cell color not set but color_row set!'
- assert not row_style.color, 'Multiple row_style.colors found!'
- row_style.color = cell.color
- if cell.bgcolor_row:
- assert cell.bgcolor, 'Cell bgcolor not set but bgcolor_row set!'
- assert not row_style.bgcolor, 'Multiple row_style.bgcolors found!'
- row_style.bgcolor = cell.bgcolor
- self._row_styles.append(row_style)
+ def __init__(self, table, output_type):
+ """Constructor that stores the cell table and output type."""
+ self._table = table
+ self._output_type = output_type
+ self._row_styles = []
+ self._column_styles = []
- self._column_styles = []
- if len(self._table) < 2:
- return
+ # Compute whole-table properties like max-size, etc.
+ def _ComputeStyle(self):
+ self._row_styles = []
+ for row in self._table:
+ row_style = Cell()
+ for cell in row:
+ if cell.color_row:
+ assert cell.color, "Cell color not set but color_row set!"
+ assert (
+ not row_style.color
+ ), "Multiple row_style.colors found!"
+ row_style.color = cell.color
+ if cell.bgcolor_row:
+ assert (
+ cell.bgcolor
+ ), "Cell bgcolor not set but bgcolor_row set!"
+ assert (
+ not row_style.bgcolor
+ ), "Multiple row_style.bgcolors found!"
+ row_style.bgcolor = cell.bgcolor
+ self._row_styles.append(row_style)
- for i in range(max(len(row) for row in self._table)):
- column_style = Cell()
- for row in self._table:
- if not any([cell.colspan != 1 for cell in row]):
- column_style.width = max(column_style.width, len(row[i].string_value))
- self._column_styles.append(column_style)
+ self._column_styles = []
+ if len(self._table) < 2:
+ return
- def _GetBGColorFix(self, color):
- if self._output_type == self.CONSOLE:
- prefix = misc.rgb2short(color.r, color.g, color.b)
- # pylint: disable=anomalous-backslash-in-string
- prefix = '\033[48;5;%sm' % prefix
- suffix = '\033[0m'
- elif self._output_type in [self.EMAIL, self.HTML]:
- rgb = color.GetRGB()
- prefix = ('<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb))
- suffix = '</FONT>'
- elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ''
- suffix = ''
- return prefix, suffix
+ for i in range(max(len(row) for row in self._table)):
+ column_style = Cell()
+ for row in self._table:
+ if not any([cell.colspan != 1 for cell in row]):
+ column_style.width = max(
+ column_style.width, len(row[i].string_value)
+ )
+ self._column_styles.append(column_style)
- def _GetColorFix(self, color):
- if self._output_type == self.CONSOLE:
- prefix = misc.rgb2short(color.r, color.g, color.b)
- # pylint: disable=anomalous-backslash-in-string
- prefix = '\033[38;5;%sm' % prefix
- suffix = '\033[0m'
- elif self._output_type in [self.EMAIL, self.HTML]:
- rgb = color.GetRGB()
- prefix = '<FONT COLOR=#{0}>'.format(rgb)
- suffix = '</FONT>'
- elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ''
- suffix = ''
- return prefix, suffix
+ def _GetBGColorFix(self, color):
+ if self._output_type == self.CONSOLE:
+ prefix = misc.rgb2short(color.r, color.g, color.b)
+ # pylint: disable=anomalous-backslash-in-string
+ prefix = "\033[48;5;%sm" % prefix
+ suffix = "\033[0m"
+ elif self._output_type in [self.EMAIL, self.HTML]:
+ rgb = color.GetRGB()
+ prefix = '<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb)
+ suffix = "</FONT>"
+ elif self._output_type in [self.PLAIN, self.TSV]:
+ prefix = ""
+ suffix = ""
+ return prefix, suffix
- def Print(self):
- """Print the table to a console, html, etc.
+ def _GetColorFix(self, color):
+ if self._output_type == self.CONSOLE:
+ prefix = misc.rgb2short(color.r, color.g, color.b)
+ # pylint: disable=anomalous-backslash-in-string
+ prefix = "\033[38;5;%sm" % prefix
+ suffix = "\033[0m"
+ elif self._output_type in [self.EMAIL, self.HTML]:
+ rgb = color.GetRGB()
+ prefix = "<FONT COLOR=#{0}>".format(rgb)
+ suffix = "</FONT>"
+ elif self._output_type in [self.PLAIN, self.TSV]:
+ prefix = ""
+ suffix = ""
+ return prefix, suffix
- Returns:
- A string that contains the desired representation of the table.
- """
- self._ComputeStyle()
- return self._GetStringValue()
+ def Print(self):
+ """Print the table to a console, html, etc.
- def _GetCellValue(self, i, j):
- cell = self._table[i][j]
- out = cell.string_value
- raw_width = len(out)
+ Returns:
+ A string that contains the desired representation of the table.
+ """
+ self._ComputeStyle()
+ return self._GetStringValue()
- if cell.color:
- p, s = self._GetColorFix(cell.color)
- out = '%s%s%s' % (p, out, s)
+ def _GetCellValue(self, i, j):
+ cell = self._table[i][j]
+ out = cell.string_value
+ raw_width = len(out)
- if cell.bgcolor:
- p, s = self._GetBGColorFix(cell.bgcolor)
- out = '%s%s%s' % (p, out, s)
+ if cell.color:
+ p, s = self._GetColorFix(cell.color)
+ out = "%s%s%s" % (p, out, s)
- if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]:
- if cell.width:
- width = cell.width
- else:
- if self._column_styles:
- width = self._column_styles[j].width
- else:
- width = len(cell.string_value)
- if cell.colspan > 1:
- width = 0
- start = 0
- for k in range(j):
- start += self._table[i][k].colspan
- for k in range(cell.colspan):
- width += self._column_styles[start + k].width
- if width > raw_width:
- padding = ('%' + str(width - raw_width) + 's') % ''
- out = padding + out
+ if cell.bgcolor:
+ p, s = self._GetBGColorFix(cell.bgcolor)
+ out = "%s%s%s" % (p, out, s)
- if self._output_type == self.HTML:
- if cell.header:
- tag = 'th'
- else:
- tag = 'td'
- out = '<{0} colspan = "{2}"> {1} </{0}>'.format(tag, out, cell.colspan)
+ if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]:
+ if cell.width:
+ width = cell.width
+ else:
+ if self._column_styles:
+ width = self._column_styles[j].width
+ else:
+ width = len(cell.string_value)
+ if cell.colspan > 1:
+ width = 0
+ start = 0
+ for k in range(j):
+ start += self._table[i][k].colspan
+ for k in range(cell.colspan):
+ width += self._column_styles[start + k].width
+ if width > raw_width:
+ padding = ("%" + str(width - raw_width) + "s") % ""
+ out = padding + out
- return out
+ if self._output_type == self.HTML:
+ if cell.header:
+ tag = "th"
+ else:
+ tag = "td"
+ out = '<{0} colspan = "{2}"> {1} </{0}>'.format(
+ tag, out, cell.colspan
+ )
- def _GetHorizontalSeparator(self):
- if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]:
- return ' '
- if self._output_type == self.HTML:
- return ''
- if self._output_type == self.TSV:
- return '\t'
+ return out
- def _GetVerticalSeparator(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return '\n'
- if self._output_type == self.HTML:
- return '</tr>\n<tr>'
+ def _GetHorizontalSeparator(self):
+ if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]:
+ return " "
+ if self._output_type == self.HTML:
+ return ""
+ if self._output_type == self.TSV:
+ return "\t"
- def _GetPrefix(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ''
- if self._output_type == self.HTML:
- return '<p></p><table id="box-table-a">\n<tr>'
+ def _GetVerticalSeparator(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return "\n"
+ if self._output_type == self.HTML:
+ return "</tr>\n<tr>"
- def _GetSuffix(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ''
- if self._output_type == self.HTML:
- return '</tr>\n</table>'
+ def _GetPrefix(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return ""
+ if self._output_type == self.HTML:
+ return '<p></p><table id="box-table-a">\n<tr>'
- def _GetStringValue(self):
- o = ''
- o += self._GetPrefix()
- for i in range(len(self._table)):
- row = self._table[i]
- # Apply row color and bgcolor.
- p = s = bgp = bgs = ''
- if self._row_styles[i].bgcolor:
- bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor)
- if self._row_styles[i].color:
- p, s = self._GetColorFix(self._row_styles[i].color)
- o += p + bgp
- for j in range(len(row)):
- out = self._GetCellValue(i, j)
- o += out + self._GetHorizontalSeparator()
- o += s + bgs
- o += self._GetVerticalSeparator()
- o += self._GetSuffix()
- return o
+ def _GetSuffix(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return ""
+ if self._output_type == self.HTML:
+ return "</tr>\n</table>"
+
+ def _GetStringValue(self):
+ o = ""
+ o += self._GetPrefix()
+ for i in range(len(self._table)):
+ row = self._table[i]
+ # Apply row color and bgcolor.
+ p = s = bgp = bgs = ""
+ if self._row_styles[i].bgcolor:
+ bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor)
+ if self._row_styles[i].color:
+ p, s = self._GetColorFix(self._row_styles[i].color)
+ o += p + bgp
+ for j in range(len(row)):
+ out = self._GetCellValue(i, j)
+ o += out + self._GetHorizontalSeparator()
+ o += s + bgs
+ o += self._GetVerticalSeparator()
+ o += self._GetSuffix()
+ return o
# Some common drivers
def GetSimpleTable(table, out_to=TablePrinter.CONSOLE):
- """Prints a simple table.
+ """Prints a simple table.
- This is used by code that has a very simple list-of-lists and wants to
- produce a table with ameans, a percentage ratio of ameans and a colorbox.
+ This is used by code that has a very simple list-of-lists and wants to
+ produce a table with ameans, a percentage ratio of ameans and a colorbox.
- Examples:
- GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
- will produce a colored table that can be printed to the console.
+ Examples:
+ GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
+ will produce a colored table that can be printed to the console.
- Args:
- table: a list of lists.
- out_to: specify the fomat of output. Currently it supports HTML and CONSOLE.
+ Args:
+ table: a list of lists.
+ out_to: specify the fomat of output. Currently it supports HTML and CONSOLE.
- Returns:
- A string version of the table that can be printed to the console.
- """
- columns = [
- Column(AmeanResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), ColorBoxFormat()),
- ]
- our_table = [table[0]]
- for row in table[1:]:
- our_row = [row[0]]
- for v in row[1:]:
- our_row.append([v])
- our_table.append(our_row)
+ Returns:
+ A string version of the table that can be printed to the console.
+ """
+ columns = [
+ Column(AmeanResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), ColorBoxFormat()),
+ ]
+ our_table = [table[0]]
+ for row in table[1:]:
+ our_row = [row[0]]
+ for v in row[1:]:
+ our_row.append([v])
+ our_table.append(our_row)
- tf = TableFormatter(our_table, columns)
- cell_table = tf.GetCellTable()
- tp = TablePrinter(cell_table, out_to)
- return tp.Print()
+ tf = TableFormatter(our_table, columns)
+ cell_table = tf.GetCellTable()
+ tp = TablePrinter(cell_table, out_to)
+ return tp.Print()
# pylint: disable=redefined-outer-name
def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
- """Prints a complex table.
+ """Prints a complex table.
- This can be used to generate a table with arithmetic mean, standard deviation,
- coefficient of variation, p-values, etc.
+ This can be used to generate a table with arithmetic mean, standard deviation,
+ coefficient of variation, p-values, etc.
- Args:
- runs: A list of lists with data to tabulate.
- labels: A list of labels that correspond to the runs.
- out_to: specifies the format of the table (example CONSOLE or HTML).
+ Args:
+ runs: A list of lists with data to tabulate.
+ labels: A list of labels that correspond to the runs.
+ out_to: specifies the format of the table (example CONSOLE or HTML).
- Returns:
- A string table that can be printed to the console or put in an HTML file.
- """
- tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
- table = tg.GetTable()
- columns = [
- Column(LiteralResult(), Format(), 'Literal'),
- Column(AmeanResult(), Format()),
- Column(StdResult(), Format()),
- Column(CoeffVarResult(), CoeffVarFormat()),
- Column(NonEmptyCountResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), RatioFormat()),
- Column(GmeanRatioResult(), RatioFormat()),
- Column(PValueResult(), PValueFormat())
- ]
- tf = TableFormatter(table, columns)
- cell_table = tf.GetCellTable()
- tp = TablePrinter(cell_table, out_to)
- return tp.Print()
+ Returns:
+ A string table that can be printed to the console or put in an HTML file.
+ """
+ tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
+ table = tg.GetTable()
+ columns = [
+ Column(LiteralResult(), Format(), "Literal"),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format()),
+ Column(CoeffVarResult(), CoeffVarFormat()),
+ Column(NonEmptyCountResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), RatioFormat()),
+ Column(GmeanRatioResult(), RatioFormat()),
+ Column(PValueResult(), PValueFormat()),
+ ]
+ tf = TableFormatter(table, columns)
+ cell_table = tf.GetCellTable()
+ tp = TablePrinter(cell_table, out_to)
+ return tp.Print()
-if __name__ == '__main__':
- # Run a few small tests here.
- run1 = {
- 'k1': '10',
- 'k2': '12',
- 'k5': '40',
- 'k6': '40',
- 'ms_1': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS',
- 'k10': '0'
- }
- run2 = {
- 'k1': '13',
- 'k2': '14',
- 'k3': '15',
- 'ms_1': '10',
- 'k8': 'PASS',
- 'k9': 'FAIL',
- 'k10': '0'
- }
- run3 = {
- 'k1': '50',
- 'k2': '51',
- 'k3': '52',
- 'k4': '53',
- 'k5': '35',
- 'k6': '45',
- 'ms_1': '200',
- 'ms_2': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS'
- }
- runs = [[run1, run2], [run3]]
- labels = ['vanilla', 'modified']
- t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
- print(t)
- email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
+if __name__ == "__main__":
+ # Run a few small tests here.
+ run1 = {
+ "k1": "10",
+ "k2": "12",
+ "k5": "40",
+ "k6": "40",
+ "ms_1": "20",
+ "k7": "FAIL",
+ "k8": "PASS",
+ "k9": "PASS",
+ "k10": "0",
+ }
+ run2 = {
+ "k1": "13",
+ "k2": "14",
+ "k3": "15",
+ "ms_1": "10",
+ "k8": "PASS",
+ "k9": "FAIL",
+ "k10": "0",
+ }
+ run3 = {
+ "k1": "50",
+ "k2": "51",
+ "k3": "52",
+ "k4": "53",
+ "k5": "35",
+ "k6": "45",
+ "ms_1": "200",
+ "ms_2": "20",
+ "k7": "FAIL",
+ "k8": "PASS",
+ "k9": "PASS",
+ }
+ runs = [[run1, run2], [run3]]
+ labels = ["vanilla", "modified"]
+ t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
+ print(t)
+ email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
- runs = [[{
- 'k1': '1'
- }, {
- 'k1': '1.1'
- }, {
- 'k1': '1.2'
- }], [{
- 'k1': '5'
- }, {
- 'k1': '5.1'
- }, {
- 'k1': '5.2'
- }]]
- t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
- print(t)
+ runs = [
+ [{"k1": "1"}, {"k1": "1.1"}, {"k1": "1.2"}],
+ [{"k1": "5"}, {"k1": "5.1"}, {"k1": "5.2"}],
+ ]
+ t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
+ print(t)
- simple_table = [
- ['binary', 'b1', 'b2', 'b3'],
- ['size', 100, 105, 108],
- ['rodata', 100, 80, 70],
- ['data', 100, 100, 100],
- ['debug', 100, 140, 60],
- ]
- t = GetSimpleTable(simple_table)
- print(t)
- email += GetSimpleTable(simple_table, TablePrinter.HTML)
- email_to = [getpass.getuser()]
- email = "<pre style='font-size: 13px'>%s</pre>" % email
- EmailSender().SendEmail(email_to, 'SimpleTableTest', email, msg_type='html')
+ simple_table = [
+ ["binary", "b1", "b2", "b3"],
+ ["size", 100, 105, 108],
+ ["rodata", 100, 80, 70],
+ ["data", 100, 100, 100],
+ ["debug", 100, 140, 60],
+ ]
+ t = GetSimpleTable(simple_table)
+ print(t)
+ email += GetSimpleTable(simple_table, TablePrinter.HTML)
+ email_to = [getpass.getuser()]
+ email = "<pre style='font-size: 13px'>%s</pre>" % email
+ EmailSender().SendEmail(email_to, "SimpleTableTest", email, msg_type="html")
diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py
index 9dd4828..91ce8fd 100755
--- a/cros_utils/tabulator_test.py
+++ b/cros_utils/tabulator_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the tabulator module."""
-from __future__ import print_function
__author__ = 'asharif@google.com (Ahmad Sharif)'
diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py
index cce0b05..f18a39b 100644
--- a/cros_utils/timeline.py
+++ b/cros_utils/timeline.py
@@ -1,55 +1,55 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tools for recording and reporting timeline of benchmark_run."""
-from __future__ import print_function
-__author__ = 'yunlian@google.com (Yunlian Jiang)'
+__author__ = "yunlian@google.com (Yunlian Jiang)"
import time
class Event(object):
- """One event on the timeline."""
+ """One event on the timeline."""
- def __init__(self, name='', cur_time=0):
- self.name = name
- self.timestamp = cur_time
+ def __init__(self, name="", cur_time=0):
+ self.name = name
+ self.timestamp = cur_time
class Timeline(object):
- """Use a dict to store the timeline."""
+ """Use a dict to store the timeline."""
- def __init__(self):
- self.events = []
+ def __init__(self):
+ self.events = []
- def Record(self, event):
- for e in self.events:
- assert e.name != event, (
- 'The event {0} is already recorded.'.format(event))
- cur_event = Event(name=event, cur_time=time.time())
- self.events.append(cur_event)
+ def Record(self, event):
+ for e in self.events:
+ assert e.name != event, "The event {0} is already recorded.".format(
+ event
+ )
+ cur_event = Event(name=event, cur_time=time.time())
+ self.events.append(cur_event)
- def GetEvents(self):
- return ([e.name for e in self.events])
+ def GetEvents(self):
+ return [e.name for e in self.events]
- def GetEventDict(self):
- tl = {}
- for e in self.events:
- tl[e.name] = e.timestamp
- return tl
+ def GetEventDict(self):
+ tl = {}
+ for e in self.events:
+ tl[e.name] = e.timestamp
+ return tl
- def GetEventTime(self, event):
- for e in self.events:
- if e.name == event:
- return e.timestamp
- raise IndexError('The event {0} is not recorded'.format(event))
+ def GetEventTime(self, event):
+ for e in self.events:
+ if e.name == event:
+ return e.timestamp
+ raise IndexError("The event {0} is not recorded".format(event))
- def GetLastEventTime(self):
- return self.events[-1].timestamp
+ def GetLastEventTime(self):
+ return self.events[-1].timestamp
- def GetLastEvent(self):
- return self.events[-1].name
+ def GetLastEvent(self):
+ return self.events[-1].name
diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py
index 8a10e54..aceab2d 100755
--- a/cros_utils/timeline_test.py
+++ b/cros_utils/timeline_test.py
@@ -1,14 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for time_line.py."""
-from __future__ import print_function
-__author__ = 'yunlian@google.com (Yunlian Jiang)'
+__author__ = "yunlian@google.com (Yunlian Jiang)"
import time
import unittest
@@ -17,46 +16,46 @@
class TimeLineTest(unittest.TestCase):
- """Tests for the Timeline class."""
+ """Tests for the Timeline class."""
- def testRecord(self):
- tl = timeline.Timeline()
- tl.Record('A')
- t = time.time()
- t1 = tl.events[0].timestamp
- self.assertEqual(int(t1 - t), 0)
- self.assertRaises(AssertionError, tl.Record, 'A')
+ def testRecord(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.events[0].timestamp
+ self.assertEqual(int(t1 - t), 0)
+ self.assertRaises(AssertionError, tl.Record, "A")
- def testGetEvents(self):
- tl = timeline.Timeline()
- tl.Record('A')
- e = tl.GetEvents()
- self.assertEqual(e, ['A'])
- tl.Record('B')
- e = tl.GetEvents()
- self.assertEqual(e, ['A', 'B'])
+ def testGetEvents(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ e = tl.GetEvents()
+ self.assertEqual(e, ["A"])
+ tl.Record("B")
+ e = tl.GetEvents()
+ self.assertEqual(e, ["A", "B"])
- def testGetEventTime(self):
- tl = timeline.Timeline()
- tl.Record('A')
- t = time.time()
- t1 = tl.GetEventTime('A')
- self.assertEqual(int(t1 - t), 0)
- self.assertRaises(IndexError, tl.GetEventTime, 'B')
+ def testGetEventTime(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.GetEventTime("A")
+ self.assertEqual(int(t1 - t), 0)
+ self.assertRaises(IndexError, tl.GetEventTime, "B")
- def testGetLastEventTime(self):
- tl = timeline.Timeline()
- self.assertRaises(IndexError, tl.GetLastEventTime)
- tl.Record('A')
- t = time.time()
- t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1 - t), 0)
- time.sleep(2)
- tl.Record('B')
- t = time.time()
- t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1 - t), 0)
+ def testGetLastEventTime(self):
+ tl = timeline.Timeline()
+ self.assertRaises(IndexError, tl.GetLastEventTime)
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.GetLastEventTime()
+ self.assertEqual(int(t1 - t), 0)
+ time.sleep(2)
+ tl.Record("B")
+ t = time.time()
+ t1 = tl.GetLastEventTime()
+ self.assertEqual(int(t1 - t), 0)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py
index 629e771..6168a24 100644
--- a/cros_utils/tiny_render.py
+++ b/cros_utils/tiny_render.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -51,18 +51,18 @@
The rendering functions should never mutate your input.
"""
-from __future__ import print_function
import collections
import html
import typing as t
-Bold = collections.namedtuple('Bold', ['inner'])
-LineBreak = collections.namedtuple('LineBreak', [])
-Link = collections.namedtuple('Link', ['href', 'inner'])
-UnorderedList = collections.namedtuple('UnorderedList', ['items'])
+
+Bold = collections.namedtuple("Bold", ["inner"])
+LineBreak = collections.namedtuple("LineBreak", [])
+Link = collections.namedtuple("Link", ["href", "inner"])
+UnorderedList = collections.namedtuple("UnorderedList", ["items"])
# Outputs different data depending on whether we're emitting text or HTML.
-Switch = collections.namedtuple('Switch', ['text', 'html'])
+Switch = collections.namedtuple("Switch", ["text", "html"])
line_break = LineBreak()
@@ -85,97 +85,98 @@
Piece = t.Any # pylint: disable=invalid-name
-def _render_text_pieces(piece: Piece, indent_level: int,
- into: t.List[str]) -> None:
- """Helper for |render_text_pieces|. Accumulates strs into |into|."""
- if isinstance(piece, LineBreak):
- into.append('\n' + indent_level * ' ')
- return
+def _render_text_pieces(
+ piece: Piece, indent_level: int, into: t.List[str]
+) -> None:
+ """Helper for |render_text_pieces|. Accumulates strs into |into|."""
+ if isinstance(piece, LineBreak):
+ into.append("\n" + indent_level * " ")
+ return
- if isinstance(piece, str):
- into.append(piece)
- return
+ if isinstance(piece, str):
+ into.append(piece)
+ return
- if isinstance(piece, Bold):
- into.append('**')
- _render_text_pieces(piece.inner, indent_level, into)
- into.append('**')
- return
+ if isinstance(piece, Bold):
+ into.append("**")
+ _render_text_pieces(piece.inner, indent_level, into)
+ into.append("**")
+ return
- if isinstance(piece, Link):
- # Don't even try; it's ugly more often than not.
- _render_text_pieces(piece.inner, indent_level, into)
- return
+ if isinstance(piece, Link):
+ # Don't even try; it's ugly more often than not.
+ _render_text_pieces(piece.inner, indent_level, into)
+ return
- if isinstance(piece, UnorderedList):
- for p in piece.items:
- _render_text_pieces([line_break, '- ', p], indent_level + 2, into)
- return
+ if isinstance(piece, UnorderedList):
+ for p in piece.items:
+ _render_text_pieces([line_break, "- ", p], indent_level + 2, into)
+ return
- if isinstance(piece, Switch):
- _render_text_pieces(piece.text, indent_level, into)
- return
+ if isinstance(piece, Switch):
+ _render_text_pieces(piece.text, indent_level, into)
+ return
- if isinstance(piece, (list, tuple)):
- for p in piece:
- _render_text_pieces(p, indent_level, into)
- return
+ if isinstance(piece, (list, tuple)):
+ for p in piece:
+ _render_text_pieces(p, indent_level, into)
+ return
- raise ValueError('Unknown piece type: %s' % type(piece))
+ raise ValueError("Unknown piece type: %s" % type(piece))
def render_text_pieces(piece: Piece) -> str:
- """Renders the given Pieces into text."""
- into = []
- _render_text_pieces(piece, 0, into)
- return ''.join(into)
+ """Renders the given Pieces into text."""
+ into = []
+ _render_text_pieces(piece, 0, into)
+ return "".join(into)
def _render_html_pieces(piece: Piece, into: t.List[str]) -> None:
- """Helper for |render_html_pieces|. Accumulates strs into |into|."""
- if piece is line_break:
- into.append('<br />\n')
- return
+ """Helper for |render_html_pieces|. Accumulates strs into |into|."""
+ if piece is line_break:
+ into.append("<br />\n")
+ return
- if isinstance(piece, str):
- into.append(html.escape(piece))
- return
+ if isinstance(piece, str):
+ into.append(html.escape(piece))
+ return
- if isinstance(piece, Bold):
- into.append('<b>')
- _render_html_pieces(piece.inner, into)
- into.append('</b>')
- return
+ if isinstance(piece, Bold):
+ into.append("<b>")
+ _render_html_pieces(piece.inner, into)
+ into.append("</b>")
+ return
- if isinstance(piece, Link):
- into.append('<a href="' + piece.href + '">')
- _render_html_pieces(piece.inner, into)
- into.append('</a>')
- return
+ if isinstance(piece, Link):
+ into.append('<a href="' + piece.href + '">')
+ _render_html_pieces(piece.inner, into)
+ into.append("</a>")
+ return
- if isinstance(piece, UnorderedList):
- into.append('<ul>\n')
- for p in piece.items:
- into.append('<li>')
- _render_html_pieces(p, into)
- into.append('</li>\n')
- into.append('</ul>\n')
- return
+ if isinstance(piece, UnorderedList):
+ into.append("<ul>\n")
+ for p in piece.items:
+ into.append("<li>")
+ _render_html_pieces(p, into)
+ into.append("</li>\n")
+ into.append("</ul>\n")
+ return
- if isinstance(piece, Switch):
- _render_html_pieces(piece.html, into)
- return
+ if isinstance(piece, Switch):
+ _render_html_pieces(piece.html, into)
+ return
- if isinstance(piece, (list, tuple)):
- for p in piece:
- _render_html_pieces(p, into)
- return
+ if isinstance(piece, (list, tuple)):
+ for p in piece:
+ _render_html_pieces(p, into)
+ return
- raise ValueError('Unknown piece type: %s' % type(piece))
+ raise ValueError("Unknown piece type: %s" % type(piece))
def render_html_pieces(piece: Piece) -> str:
- """Renders the given Pieces into HTML."""
- into = []
- _render_html_pieces(piece, into)
- return ''.join(into)
+ """Renders the given Pieces into HTML."""
+ into = []
+ _render_html_pieces(piece, into)
+ return "".join(into)
diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py
index 114a179..9c4d750 100755
--- a/cros_utils/tiny_render_test.py
+++ b/cros_utils/tiny_render_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for tiny_render."""
-from __future__ import print_function
import unittest
@@ -18,160 +17,184 @@
# shipped alongside the plain-text, the hope is that people won't have to
# subject themselves to reading the HTML often. :)
class Test(unittest.TestCase):
- """Tests for tiny_render."""
+ """Tests for tiny_render."""
- def test_bold(self):
- pieces = [
- tiny_render.Bold('hello'),
- ', ',
- tiny_render.Bold(['world', '!']),
- ]
+ def test_bold(self):
+ pieces = [
+ tiny_render.Bold("hello"),
+ ", ",
+ tiny_render.Bold(["world", "!"]),
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '**hello**, **world!**',
- )
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "**hello**, **world!**",
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '<b>hello</b>, <b>world!</b>',
- )
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "<b>hello</b>, <b>world!</b>",
+ )
- def test_line_break(self):
- pieces = [
- 'hello',
- tiny_render.line_break,
- ['world', '!'],
- ]
+ def test_line_break(self):
+ pieces = [
+ "hello",
+ tiny_render.line_break,
+ ["world", "!"],
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- 'hello\nworld!',
- )
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "hello\nworld!",
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- 'hello<br />\nworld!',
- )
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "hello<br />\nworld!",
+ )
- def test_linkification(self):
- pieces = [
- 'hello ',
- tiny_render.Link(href='https://google.com', inner='world!'),
- ]
+ def test_linkification(self):
+ pieces = [
+ "hello ",
+ tiny_render.Link(href="https://google.com", inner="world!"),
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- 'hello world!',
- )
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "hello world!",
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- 'hello <a href="https://google.com">world!</a>',
- )
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ 'hello <a href="https://google.com">world!</a>',
+ )
- def test_unordered_list(self):
- pieces = [
- 'hello:',
- tiny_render.UnorderedList([
- 'world',
- 'w o r l d',
- ]),
- ]
+ def test_unordered_list(self):
+ pieces = [
+ "hello:",
+ tiny_render.UnorderedList(
+ [
+ "world",
+ "w o r l d",
+ ]
+ ),
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '\n'.join((
- 'hello:',
- ' - world',
- ' - w o r l d',
- )),
- )
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello:",
+ " - world",
+ " - w o r l d",
+ )
+ ),
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '\n'.join((
- 'hello:<ul>',
- '<li>world</li>',
- '<li>w o r l d</li>',
- '</ul>',
- '',
- )),
- )
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello:<ul>",
+ "<li>world</li>",
+ "<li>w o r l d</li>",
+ "</ul>",
+ "",
+ )
+ ),
+ )
- def test_nested_unordered_list(self):
- pieces = [
- 'hello:',
- tiny_render.UnorderedList([
- 'world',
- ['and more:', tiny_render.UnorderedList(['w o r l d'])],
- 'world2',
- ])
- ]
+ def test_nested_unordered_list(self):
+ pieces = [
+ "hello:",
+ tiny_render.UnorderedList(
+ [
+ "world",
+ ["and more:", tiny_render.UnorderedList(["w o r l d"])],
+ "world2",
+ ]
+ ),
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '\n'.join((
- 'hello:',
- ' - world',
- ' - and more:',
- ' - w o r l d',
- ' - world2',
- )),
- )
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello:",
+ " - world",
+ " - and more:",
+ " - w o r l d",
+ " - world2",
+ )
+ ),
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '\n'.join((
- 'hello:<ul>',
- '<li>world</li>',
- '<li>and more:<ul>',
- '<li>w o r l d</li>',
- '</ul>',
- '</li>',
- '<li>world2</li>',
- '</ul>',
- '',
- )),
- )
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello:<ul>",
+ "<li>world</li>",
+ "<li>and more:<ul>",
+ "<li>w o r l d</li>",
+ "</ul>",
+ "</li>",
+ "<li>world2</li>",
+ "</ul>",
+ "",
+ )
+ ),
+ )
- def test_switch(self):
- pieces = ['hello ', tiny_render.Switch(text='text', html='html')]
- self.assertEqual(tiny_render.render_text_pieces(pieces), 'hello text')
- self.assertEqual(tiny_render.render_html_pieces(pieces), 'hello html')
+ def test_switch(self):
+ pieces = ["hello ", tiny_render.Switch(text="text", html="html")]
+ self.assertEqual(tiny_render.render_text_pieces(pieces), "hello text")
+ self.assertEqual(tiny_render.render_html_pieces(pieces), "hello html")
- def test_golden(self):
- pieces = [
- 'hello',
- tiny_render.UnorderedList([
- tiny_render.Switch(text='text', html=tiny_render.Bold('html')),
- 'the',
- tiny_render.Bold('sun'),
- ]),
- tiny_render.line_break,
- ['is', ' out!'],
- ]
+ def test_golden(self):
+ pieces = [
+ "hello",
+ tiny_render.UnorderedList(
+ [
+ tiny_render.Switch(
+ text="text", html=tiny_render.Bold("html")
+ ),
+ "the",
+ tiny_render.Bold("sun"),
+ ]
+ ),
+ tiny_render.line_break,
+ ["is", " out!"],
+ ]
- self.assertEqual(
- tiny_render.render_text_pieces(pieces), '\n'.join((
- 'hello',
- ' - text',
- ' - the',
- ' - **sun**',
- 'is out!',
- )))
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello",
+ " - text",
+ " - the",
+ " - **sun**",
+ "is out!",
+ )
+ ),
+ )
- self.assertEqual(
- tiny_render.render_html_pieces(pieces), '\n'.join((
- 'hello<ul>',
- '<li><b>html</b></li>',
- '<li>the</li>',
- '<li><b>sun</b></li>',
- '</ul>',
- '<br />',
- 'is out!',
- )))
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello<ul>",
+ "<li><b>html</b></li>",
+ "<li>the</li>",
+ "<li><b>sun</b></li>",
+ "</ul>",
+ "<br />",
+ "is out!",
+ )
+ ),
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/toolchain_utils.sh b/cros_utils/toolchain_utils.sh
index 5e9a2a3..b5403bb 100644
--- a/cros_utils/toolchain_utils.sh
+++ b/cros_utils/toolchain_utils.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 0413b59..f9de0cf 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,87 +1,91 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Define a type that wraps a Benchmark instance."""
-from __future__ import division
-from __future__ import print_function
import math
+
# FIXME(denik): Fix the import in chroot.
# pylint: disable=import-error
from scipy import stats
+
# See crbug.com/673558 for how these are estimated.
_estimated_stddev = {
- 'octane': 0.015,
- 'kraken': 0.019,
- 'speedometer': 0.007,
- 'speedometer2': 0.006,
- 'dromaeo.domcoreattr': 0.023,
- 'dromaeo.domcoremodify': 0.011,
- 'graphics_WebGLAquarium': 0.008,
- 'page_cycler_v2.typical_25': 0.021,
- 'loading.desktop': 0.021, # Copied from page_cycler initially
+ "octane": 0.015,
+ "kraken": 0.019,
+ "speedometer": 0.007,
+ "speedometer2": 0.006,
+ "dromaeo.domcoreattr": 0.023,
+ "dromaeo.domcoremodify": 0.011,
+ "graphics_WebGLAquarium": 0.008,
+ "page_cycler_v2.typical_25": 0.021,
+ "loading.desktop": 0.021, # Copied from page_cycler initially
}
# Get #samples needed to guarantee a given confidence interval, assuming the
# samples follow normal distribution.
def _samples(b):
- # TODO: Make this an option
- # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
- p = 0.9
- e = 0.02
- if b not in _estimated_stddev:
- return 1
- d = _estimated_stddev[b]
- # Get at least 2 samples so as to calculate standard deviation, which is
- # needed in T-test for p-value.
- n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
- return n if n > 1 else 2
+ # TODO: Make this an option
+ # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
+ p = 0.9
+ e = 0.02
+ if b not in _estimated_stddev:
+ return 1
+ d = _estimated_stddev[b]
+ # Get at least 2 samples so as to calculate standard deviation, which is
+ # needed in T-test for p-value.
+ n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e) ** 2))
+ return n if n > 1 else 2
class Benchmark(object):
- """Class representing a benchmark to be run.
+ """Class representing a benchmark to be run.
- Contains details of the benchmark suite, arguments to pass to the suite,
- iterations to run the benchmark suite and so on. Note that the benchmark name
- can be different to the test suite name. For example, you may want to have
- two different benchmarks which run the same test_name with different
- arguments.
- """
+ Contains details of the benchmark suite, arguments to pass to the suite,
+ iterations to run the benchmark suite and so on. Note that the benchmark name
+ can be different to the test suite name. For example, you may want to have
+ two different benchmarks which run the same test_name with different
+ arguments.
+ """
- def __init__(self,
- name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite='',
- show_all_results=False,
- retries=0,
- run_local=False,
- cwp_dso='',
- weight=0):
- self.name = name
- # For telemetry, this is the benchmark name.
- self.test_name = test_name
- # For telemetry, this is the data.
- self.test_args = test_args
- self.iterations = iterations if iterations > 0 else _samples(name)
- self.perf_args = perf_args
- self.rm_chroot_tmp = rm_chroot_tmp
- self.iteration_adjusted = False
- self.suite = suite
- self.show_all_results = show_all_results
- self.retries = retries
- if self.suite == 'telemetry':
- self.show_all_results = True
- if run_local and self.suite != 'telemetry_Crosperf':
- raise RuntimeError('run_local is only supported by telemetry_Crosperf.')
- self.run_local = run_local
- self.cwp_dso = cwp_dso
- self.weight = weight
+ def __init__(
+ self,
+ name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite="",
+ show_all_results=False,
+ retries=0,
+ run_local=False,
+ cwp_dso="",
+ weight=0,
+ ):
+ self.name = name
+ # For telemetry, this is the benchmark name.
+ self.test_name = test_name
+ # For telemetry, this is the data.
+ self.test_args = test_args
+ self.iterations = iterations if iterations > 0 else _samples(name)
+ self.perf_args = perf_args
+ self.rm_chroot_tmp = rm_chroot_tmp
+ self.iteration_adjusted = False
+ self.suite = suite
+ self.show_all_results = show_all_results
+ self.retries = retries
+ if self.suite == "telemetry":
+ self.show_all_results = True
+ if run_local and self.suite != "telemetry_Crosperf":
+ raise RuntimeError(
+ "run_local is only supported by telemetry_Crosperf."
+ )
+ self.run_local = run_local
+ self.cwp_dso = cwp_dso
+ self.weight = weight
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index b5912c1..84797d1 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,10 +1,9 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module of benchmark runs."""
-from __future__ import print_function
import datetime
import threading
@@ -13,254 +12,339 @@
from cros_utils import command_executer
from cros_utils import timeline
-
-from suite_runner import SuiteRunner
from results_cache import MockResult
from results_cache import MockResultsCache
from results_cache import Result
from results_cache import ResultsCache
+from suite_runner import SuiteRunner
-STATUS_FAILED = 'FAILED'
-STATUS_SUCCEEDED = 'SUCCEEDED'
-STATUS_IMAGING = 'IMAGING'
-STATUS_RUNNING = 'RUNNING'
-STATUS_WAITING = 'WAITING'
-STATUS_PENDING = 'PENDING'
+
+STATUS_FAILED = "FAILED"
+STATUS_SUCCEEDED = "SUCCEEDED"
+STATUS_IMAGING = "IMAGING"
+STATUS_RUNNING = "RUNNING"
+STATUS_WAITING = "WAITING"
+STATUS_PENDING = "PENDING"
class BenchmarkRun(threading.Thread):
- """The benchmarkrun class."""
+ """The benchmarkrun class."""
- def __init__(self, name, benchmark, label, iteration, cache_conditions,
- machine_manager, logger_to_use, log_level, share_cache,
- dut_config):
- threading.Thread.__init__(self)
- self.name = name
- self._logger = logger_to_use
- self.log_level = log_level
- self.benchmark = benchmark
- self.iteration = iteration
- self.label = label
- self.result = None
- self.terminated = False
- self.retval = None
- self.run_completed = False
- self.machine_manager = machine_manager
- self.suite_runner = SuiteRunner(dut_config, self._logger, self.log_level)
- self.machine = None
- self.cache_conditions = cache_conditions
- self.runs_complete = 0
- self.cache_hit = False
- self.failure_reason = ''
- self.test_args = benchmark.test_args
- self.cache = None
- self.profiler_args = self.GetExtraAutotestArgs()
- self._ce = command_executer.GetCommandExecuter(
- self._logger, log_level=self.log_level)
- self.timeline = timeline.Timeline()
- self.timeline.Record(STATUS_PENDING)
- self.share_cache = share_cache
- self.cache_has_been_read = False
+ def __init__(
+ self,
+ name,
+ benchmark,
+ label,
+ iteration,
+ cache_conditions,
+ machine_manager,
+ logger_to_use,
+ log_level,
+ share_cache,
+ dut_config,
+ ):
+ threading.Thread.__init__(self)
+ self.name = name
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self.benchmark = benchmark
+ self.iteration = iteration
+ self.label = label
+ self.result = None
+ self.terminated = False
+ self.retval = None
+ self.run_completed = False
+ self.machine_manager = machine_manager
+ self.suite_runner = SuiteRunner(
+ dut_config, self._logger, self.log_level
+ )
+ self.machine = None
+ self.cache_conditions = cache_conditions
+ self.runs_complete = 0
+ self.cache_hit = False
+ self.failure_reason = ""
+ self.test_args = benchmark.test_args
+ self.cache = None
+ self.profiler_args = self.GetExtraAutotestArgs()
+ self._ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level
+ )
+ self.timeline = timeline.Timeline()
+ self.timeline.Record(STATUS_PENDING)
+ self.share_cache = share_cache
+ self.cache_has_been_read = False
- # This is used by schedv2.
- self.owner_thread = None
+ # This is used by schedv2.
+ self.owner_thread = None
- def ReadCache(self):
- # Just use the first machine for running the cached version,
- # without locking it.
- self.cache = ResultsCache()
- self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
- self.benchmark.test_name, self.iteration, self.test_args,
- self.profiler_args, self.machine_manager, self.machine,
- self.label.board, self.cache_conditions, self._logger,
- self.log_level, self.label, self.share_cache,
- self.benchmark.suite, self.benchmark.show_all_results,
- self.benchmark.run_local, self.benchmark.cwp_dso)
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = ResultsCache()
+ self.cache.Init(
+ self.label.chromeos_image,
+ self.label.chromeos_root,
+ self.benchmark.test_name,
+ self.iteration,
+ self.test_args,
+ self.profiler_args,
+ self.machine_manager,
+ self.machine,
+ self.label.board,
+ self.cache_conditions,
+ self._logger,
+ self.log_level,
+ self.label,
+ self.share_cache,
+ self.benchmark.suite,
+ self.benchmark.show_all_results,
+ self.benchmark.run_local,
+ self.benchmark.cwp_dso,
+ )
- self.result = self.cache.ReadResult()
- self.cache_hit = (self.result is not None)
- self.cache_has_been_read = True
+ self.result = self.cache.ReadResult()
+ self.cache_hit = self.result is not None
+ self.cache_has_been_read = True
- def run(self):
- try:
- if not self.cache_has_been_read:
- self.ReadCache()
+ def run(self):
+ try:
+ if not self.cache_has_been_read:
+ self.ReadCache()
- if self.result:
- self._logger.LogOutput('%s: Cache hit.' % self.name)
- self._logger.LogOutput(self.result.out, print_to_console=False)
- self._logger.LogError(self.result.err, print_to_console=False)
+ if self.result:
+ self._logger.LogOutput("%s: Cache hit." % self.name)
+ self._logger.LogOutput(self.result.out, print_to_console=False)
+ self._logger.LogError(self.result.err, print_to_console=False)
- elif self.label.cache_only:
- self._logger.LogOutput('%s: No cache hit.' % self.name)
- output = '%s: No Cache hit.' % self.name
- retval = 1
- err = 'No cache hit.'
- self.result = Result.CreateFromRun(
- self._logger, self.log_level, self.label, self.machine, output, err,
- retval, self.benchmark.test_name, self.benchmark.suite,
- self.benchmark.cwp_dso)
+ elif self.label.cache_only:
+ self._logger.LogOutput("%s: No cache hit." % self.name)
+ output = "%s: No Cache hit." % self.name
+ retval = 1
+ err = "No cache hit."
+ self.result = Result.CreateFromRun(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ output,
+ err,
+ retval,
+ self.benchmark.test_name,
+ self.benchmark.suite,
+ self.benchmark.cwp_dso,
+ )
- else:
- self._logger.LogOutput('%s: No cache hit.' % self.name)
- self.timeline.Record(STATUS_WAITING)
- # Try to acquire a machine now.
- self.machine = self.AcquireMachine()
- self.cache.machine = self.machine
- self.result = self.RunTest(self.machine)
+ else:
+ self._logger.LogOutput("%s: No cache hit." % self.name)
+ self.timeline.Record(STATUS_WAITING)
+ # Try to acquire a machine now.
+ self.machine = self.AcquireMachine()
+ self.cache.machine = self.machine
+ self.result = self.RunTest(self.machine)
- self.cache.remote = self.machine.name
- self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
- self.cache.StoreResult(self.result)
+ self.cache.remote = self.machine.name
+ self.label.chrome_version = (
+ self.machine_manager.GetChromeVersion(self.machine)
+ )
+ self.cache.StoreResult(self.result)
- if not self.label.chrome_version:
- if self.machine:
- self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
- elif self.result.chrome_version:
- self.label.chrome_version = self.result.chrome_version
+ if not self.label.chrome_version:
+ if self.machine:
+ self.label.chrome_version = (
+ self.machine_manager.GetChromeVersion(self.machine)
+ )
+ elif self.result.chrome_version:
+ self.label.chrome_version = self.result.chrome_version
- if self.terminated:
- return
+ if self.terminated:
+ return
- if not self.result.retval:
- self.timeline.Record(STATUS_SUCCEEDED)
- else:
+ if not self.result.retval:
+ self.timeline.Record(STATUS_SUCCEEDED)
+ else:
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.failure_reason = (
+ "Return value of test suite was non-zero."
+ )
+ self.timeline.Record(STATUS_FAILED)
+
+ except Exception as e:
+ self._logger.LogError(
+ "Benchmark run: '%s' failed: %s" % (self.name, e)
+ )
+ traceback.print_exc()
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = str(e)
+ finally:
+ if self.owner_thread is not None:
+ # In schedv2 mode, we do not lock machine locally. So noop here.
+ pass
+ elif self.machine:
+ if not self.machine.IsReachable():
+ self._logger.LogOutput(
+ "Machine %s is not reachable, removing it."
+ % self.machine.name
+ )
+ self.machine_manager.RemoveMachine(self.machine.name)
+ self._logger.LogOutput(
+ "Releasing machine: %s" % self.machine.name
+ )
+ self.machine_manager.ReleaseMachine(self.machine)
+ self._logger.LogOutput(
+ "Released machine: %s" % self.machine.name
+ )
+
+ def Terminate(self):
+ self.terminated = True
+ self.suite_runner.Terminate()
if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.failure_reason = 'Return value of test suite was non-zero.'
- self.timeline.Record(STATUS_FAILED)
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = "Thread terminated."
- except Exception as e:
- self._logger.LogError("Benchmark run: '%s' failed: %s" % (self.name, e))
- traceback.print_exc()
- if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.timeline.Record(STATUS_FAILED)
- self.failure_reason = str(e)
- finally:
- if self.owner_thread is not None:
- # In schedv2 mode, we do not lock machine locally. So noop here.
- pass
- elif self.machine:
- if not self.machine.IsReachable():
- self._logger.LogOutput(
- 'Machine %s is not reachable, removing it.' % self.machine.name)
- self.machine_manager.RemoveMachine(self.machine.name)
- self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
- self.machine_manager.ReleaseMachine(self.machine)
- self._logger.LogOutput('Released machine: %s' % self.machine.name)
+ def AcquireMachine(self):
+ if self.owner_thread is not None:
+ # No need to lock machine locally, DutWorker, which is a thread, is
+ # responsible for running br.
+ return self.owner_thread.dut()
+ while True:
+ machine = None
+ if self.terminated:
+ raise RuntimeError(
+ "Thread terminated while trying to acquire machine."
+ )
- def Terminate(self):
- self.terminated = True
- self.suite_runner.Terminate()
- if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.timeline.Record(STATUS_FAILED)
- self.failure_reason = 'Thread terminated.'
+ machine = self.machine_manager.AcquireMachine(self.label)
- def AcquireMachine(self):
- if self.owner_thread is not None:
- # No need to lock machine locally, DutWorker, which is a thread, is
- # responsible for running br.
- return self.owner_thread.dut()
- while True:
- machine = None
- if self.terminated:
- raise RuntimeError('Thread terminated while trying to acquire machine.')
+ if machine:
+ self._logger.LogOutput(
+ "%s: Machine %s acquired at %s"
+ % (self.name, machine.name, datetime.datetime.now())
+ )
+ break
+ time.sleep(10)
+ return machine
- machine = self.machine_manager.AcquireMachine(self.label)
+ def GetExtraAutotestArgs(self):
+ if (
+ self.benchmark.perf_args
+ and self.benchmark.suite != "telemetry_Crosperf"
+ ):
+ self._logger.LogError(
+ "Non-telemetry benchmark does not support profiler."
+ )
+ self.benchmark.perf_args = ""
- if machine:
- self._logger.LogOutput(
- '%s: Machine %s acquired at %s' % (self.name, machine.name,
- datetime.datetime.now()))
- break
- time.sleep(10)
- return machine
+ if self.benchmark.perf_args:
+ perf_args_list = self.benchmark.perf_args.split(" ")
+ perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
+ perf_args = " ".join(perf_args_list)
+ if not perf_args_list[0] in ["record", "stat"]:
+ raise SyntaxError(
+ "perf_args must start with either record or stat"
+ )
+ extra_test_args = [
+ "--profiler=custom_perf",
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args),
+ ]
+ return " ".join(extra_test_args)
+ else:
+ return ""
- def GetExtraAutotestArgs(self):
- if (self.benchmark.perf_args and
- self.benchmark.suite != 'telemetry_Crosperf'):
- self._logger.LogError(
- 'Non-telemetry benchmark does not support profiler.')
- self.benchmark.perf_args = ''
+ def RunTest(self, machine):
+ self.timeline.Record(STATUS_IMAGING)
+ if self.owner_thread is not None:
+ # In schedv2 mode, do not even call ImageMachine. Machine image is
+ # guarenteed.
+ pass
+ else:
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ retval, out, err = self.suite_runner.Run(
+ machine,
+ self.label,
+ self.benchmark,
+ self.test_args,
+ self.profiler_args,
+ )
+ self.run_completed = True
+ return Result.CreateFromRun(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ out,
+ err,
+ retval,
+ self.benchmark.test_name,
+ self.benchmark.suite,
+ self.benchmark.cwp_dso,
+ )
- if self.benchmark.perf_args:
- perf_args_list = self.benchmark.perf_args.split(' ')
- perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:]
- perf_args = ' '.join(perf_args_list)
- if not perf_args_list[0] in ['record', 'stat']:
- raise SyntaxError('perf_args must start with either record or stat')
- extra_test_args = [
- '--profiler=custom_perf',
- ('--profiler_args=\'perf_options="%s"\'' % perf_args)
- ]
- return ' '.join(extra_test_args)
- else:
- return ''
+ def SetCacheConditions(self, cache_conditions):
+ self.cache_conditions = cache_conditions
- def RunTest(self, machine):
- self.timeline.Record(STATUS_IMAGING)
- if self.owner_thread is not None:
- # In schedv2 mode, do not even call ImageMachine. Machine image is
- # guarenteed.
- pass
- else:
- self.machine_manager.ImageMachine(machine, self.label)
- self.timeline.Record(STATUS_RUNNING)
- retval, out, err = self.suite_runner.Run(
- machine, self.label, self.benchmark, self.test_args, self.profiler_args)
- self.run_completed = True
- return Result.CreateFromRun(self._logger, self.log_level, self.label,
- self.machine, out, err, retval,
- self.benchmark.test_name, self.benchmark.suite,
- self.benchmark.cwp_dso)
+ def logger(self):
+ """Return the logger, only used by unittest.
- def SetCacheConditions(self, cache_conditions):
- self.cache_conditions = cache_conditions
+ Returns:
+ self._logger
+ """
- def logger(self):
- """Return the logger, only used by unittest.
+ return self._logger
- Returns:
- self._logger
- """
+ def __str__(self):
+ """For better debugging."""
- return self._logger
-
- def __str__(self):
- """For better debugging."""
-
- return 'BenchmarkRun[name="{}"]'.format(self.name)
+ return 'BenchmarkRun[name="{}"]'.format(self.name)
class MockBenchmarkRun(BenchmarkRun):
- """Inherited from BenchmarkRun."""
+ """Inherited from BenchmarkRun."""
- def ReadCache(self):
- # Just use the first machine for running the cached version,
- # without locking it.
- self.cache = MockResultsCache()
- self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
- self.benchmark.test_name, self.iteration, self.test_args,
- self.profiler_args, self.machine_manager, self.machine,
- self.label.board, self.cache_conditions, self._logger,
- self.log_level, self.label, self.share_cache,
- self.benchmark.suite, self.benchmark.show_all_results,
- self.benchmark.run_local, self.benchmark.cwp_dso)
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = MockResultsCache()
+ self.cache.Init(
+ self.label.chromeos_image,
+ self.label.chromeos_root,
+ self.benchmark.test_name,
+ self.iteration,
+ self.test_args,
+ self.profiler_args,
+ self.machine_manager,
+ self.machine,
+ self.label.board,
+ self.cache_conditions,
+ self._logger,
+ self.log_level,
+ self.label,
+ self.share_cache,
+ self.benchmark.suite,
+ self.benchmark.show_all_results,
+ self.benchmark.run_local,
+ self.benchmark.cwp_dso,
+ )
- self.result = self.cache.ReadResult()
- self.cache_hit = (self.result is not None)
+ self.result = self.cache.ReadResult()
+ self.cache_hit = self.result is not None
- def RunTest(self, machine):
- """Remove Result.CreateFromRun for testing."""
- self.timeline.Record(STATUS_IMAGING)
- self.machine_manager.ImageMachine(machine, self.label)
- self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(
- machine, self.label, self.benchmark, self.test_args, self.profiler_args)
- self.run_completed = True
- rr = MockResult('logger', self.label, self.log_level, machine)
- rr.out = out
- rr.err = err
- rr.retval = retval
- return rr
+ def RunTest(self, machine):
+ """Remove Result.CreateFromRun for testing."""
+ self.timeline.Record(STATUS_IMAGING)
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ [retval, out, err] = self.suite_runner.Run(
+ machine,
+ self.label,
+ self.benchmark,
+ self.test_args,
+ self.profiler_args,
+ )
+ self.run_completed = True
+ rr = MockResult("logger", self.label, self.log_level, machine)
+ rr.out = out
+ rr.err = err
+ rr.retval = retval
+ return rr
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 9d815b8..0013e19 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -1,442 +1,545 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Testing of benchmark_run."""
-from __future__ import print_function
import inspect
import unittest
import unittest.mock as mock
-import benchmark_run
-
-from cros_utils import logger
-from suite_runner import MockSuiteRunner
-from suite_runner import SuiteRunner
-from label import MockLabel
from benchmark import Benchmark
-from machine_manager import MockMachineManager
+import benchmark_run
+from cros_utils import logger
+from label import MockLabel
from machine_manager import MachineManager
from machine_manager import MockCrosMachine
-from results_cache import MockResultsCache
+from machine_manager import MockMachineManager
from results_cache import CacheConditions
+from results_cache import MockResultsCache
from results_cache import Result
from results_cache import ResultsCache
+from suite_runner import MockSuiteRunner
+from suite_runner import SuiteRunner
class BenchmarkRunTest(unittest.TestCase):
- """Unit tests for the BenchmarkRun class and all of its methods."""
+ """Unit tests for the BenchmarkRun class and all of its methods."""
- def setUp(self):
- self.status = []
- self.called_ReadCache = None
- self.log_error = []
- self.log_output = []
- self.err_msg = None
- self.test_benchmark = Benchmark(
- 'page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
+ def setUp(self):
+ self.status = []
+ self.called_ReadCache = None
+ self.log_error = []
+ self.log_output = []
+ self.err_msg = None
+ self.test_benchmark = Benchmark(
+ "page_cycler.netsim.top_10", # name
+ "page_cycler.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
- self.test_label = MockLabel(
- 'test1',
- 'build',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc',
- crosfleet=False)
+ self.test_label = MockLabel(
+ "test1",
+ "build",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos2-row1-rack4-host9.cros",
+ image_args="",
+ cache_dir="",
+ cache_only=False,
+ log_level="average",
+ compiler="gcc",
+ crosfleet=False,
+ )
- self.test_cache_conditions = [
- CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
- ]
+ self.test_cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ]
- self.mock_logger = logger.GetLogger(log_dir='', mock=True)
+ self.mock_logger = logger.GetLogger(log_dir="", mock=True)
- self.mock_machine_manager = mock.Mock(spec=MachineManager)
+ self.mock_machine_manager = mock.Mock(spec=MachineManager)
- def testDryRun(self):
- my_label = MockLabel(
- 'test1',
- 'build',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc',
- crosfleet=False)
+ def testDryRun(self):
+ my_label = MockLabel(
+ "test1",
+ "build",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos2-row1-rack4-host9.cros",
+ image_args="",
+ cache_dir="",
+ cache_only=False,
+ log_level="average",
+ compiler="gcc",
+ crosfleet=False,
+ )
- logging_level = 'average'
- m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
- m.AddMachine('chromeos2-row1-rack4-host9.cros')
- bench = Benchmark(
- 'page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
- dut_conf = {
- 'cooldown_time': 5,
- 'cooldown_temp': 45,
- 'governor': 'powersave',
- 'cpu_usage': 'big_only',
- 'cpu_freq_pct': 80,
- }
- b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
- logger.GetLogger(), logging_level, '',
- dut_conf)
- b.cache = MockResultsCache()
- b.suite_runner = MockSuiteRunner()
- b.start()
+ logging_level = "average"
+ m = MockMachineManager("/tmp/chromeos_root", 0, logging_level, "")
+ m.AddMachine("chromeos2-row1-rack4-host9.cros")
+ bench = Benchmark(
+ "page_cycler.netsim.top_10", # name
+ "page_cycler.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
+ dut_conf = {
+ "cooldown_time": 5,
+ "cooldown_temp": 45,
+ "governor": "powersave",
+ "cpu_usage": "big_only",
+ "cpu_freq_pct": 80,
+ }
+ b = benchmark_run.MockBenchmarkRun(
+ "test run",
+ bench,
+ my_label,
+ 1,
+ [],
+ m,
+ logger.GetLogger(),
+ logging_level,
+ "",
+ dut_conf,
+ )
+ b.cache = MockResultsCache()
+ b.suite_runner = MockSuiteRunner()
+ b.start()
- # Make sure the arguments to BenchmarkRun.__init__ have not changed
- # since the last time this test was updated:
- args_list = [
- 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions',
- 'machine_manager', 'logger_to_use', 'log_level', 'share_cache',
- 'dut_config'
- ]
- arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__)
- self.assertEqual(len(arg_spec.args), len(args_list))
- self.assertEqual(arg_spec.args, args_list)
+ # Make sure the arguments to BenchmarkRun.__init__ have not changed
+ # since the last time this test was updated:
+ args_list = [
+ "self",
+ "name",
+ "benchmark",
+ "label",
+ "iteration",
+ "cache_conditions",
+ "machine_manager",
+ "logger_to_use",
+ "log_level",
+ "share_cache",
+ "dut_config",
+ ]
+ arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ self.assertEqual(arg_spec.args, args_list)
- def test_init(self):
- # Nothing really worth testing here; just field assignments.
- pass
+ def test_init(self):
+ # Nothing really worth testing here; just field assignments.
+ pass
- def test_read_cache(self):
- # Nothing really worth testing here, either.
- pass
+ def test_read_cache(self):
+ # Nothing really worth testing here, either.
+ pass
- def test_run(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_run(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- def MockLogOutput(msg, print_to_console=False):
- """Helper function for test_run."""
- del print_to_console
- self.log_output.append(msg)
+ def MockLogOutput(msg, print_to_console=False):
+ """Helper function for test_run."""
+ del print_to_console
+ self.log_output.append(msg)
- def MockLogError(msg, print_to_console=False):
- """Helper function for test_run."""
- del print_to_console
- self.log_error.append(msg)
+ def MockLogError(msg, print_to_console=False):
+ """Helper function for test_run."""
+ del print_to_console
+ self.log_error.append(msg)
- def MockRecordStatus(msg):
- """Helper function for test_run."""
- self.status.append(msg)
+ def MockRecordStatus(msg):
+ """Helper function for test_run."""
+ self.status.append(msg)
- def FakeReadCache():
- """Helper function for test_run."""
- br.cache = mock.Mock(spec=ResultsCache)
- self.called_ReadCache = True
- return 0
+ def FakeReadCache():
+ """Helper function for test_run."""
+ br.cache = mock.Mock(spec=ResultsCache)
+ self.called_ReadCache = True
+ return 0
- def FakeReadCacheSucceed():
- """Helper function for test_run."""
- br.cache = mock.Mock(spec=ResultsCache)
- br.result = mock.Mock(spec=Result)
- br.result.out = 'result.out stuff'
- br.result.err = 'result.err stuff'
- br.result.retval = 0
- self.called_ReadCache = True
- return 0
+ def FakeReadCacheSucceed():
+ """Helper function for test_run."""
+ br.cache = mock.Mock(spec=ResultsCache)
+ br.result = mock.Mock(spec=Result)
+ br.result.out = "result.out stuff"
+ br.result.err = "result.err stuff"
+ br.result.retval = 0
+ self.called_ReadCache = True
+ return 0
- def FakeReadCacheException():
- """Helper function for test_run."""
- raise RuntimeError('This is an exception test; it is supposed to happen')
+ def FakeReadCacheException():
+ """Helper function for test_run."""
+ raise RuntimeError(
+ "This is an exception test; it is supposed to happen"
+ )
- def FakeAcquireMachine():
- """Helper function for test_run."""
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- return mock_machine
+ def FakeAcquireMachine():
+ """Helper function for test_run."""
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ return mock_machine
- def FakeRunTest(_machine):
- """Helper function for test_run."""
- mock_result = mock.Mock(spec=Result)
- mock_result.retval = 0
- return mock_result
+ def FakeRunTest(_machine):
+ """Helper function for test_run."""
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 0
+ return mock_result
- def FakeRunTestFail(_machine):
- """Helper function for test_run."""
- mock_result = mock.Mock(spec=Result)
- mock_result.retval = 1
- return mock_result
+ def FakeRunTestFail(_machine):
+ """Helper function for test_run."""
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 1
+ return mock_result
- def ResetTestValues():
- """Helper function for test_run."""
- self.log_output = []
- self.log_error = []
- self.status = []
- br.result = None
- self.called_ReadCache = False
+ def ResetTestValues():
+ """Helper function for test_run."""
+ self.log_output = []
+ self.log_error = []
+ self.status = []
+ br.result = None
+ self.called_ReadCache = False
- # Assign all the fake functions to the appropriate objects.
- br.logger().LogOutput = MockLogOutput
- br.logger().LogError = MockLogError
- br.timeline.Record = MockRecordStatus
- br.ReadCache = FakeReadCache
- br.RunTest = FakeRunTest
- br.AcquireMachine = FakeAcquireMachine
+ # Assign all the fake functions to the appropriate objects.
+ br.logger().LogOutput = MockLogOutput
+ br.logger().LogError = MockLogError
+ br.timeline.Record = MockRecordStatus
+ br.ReadCache = FakeReadCache
+ br.RunTest = FakeRunTest
+ br.AcquireMachine = FakeAcquireMachine
- # First test: No cache hit, all goes well.
- ResetTestValues()
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
+ # First test: No cache hit, all goes well.
+ ResetTestValues()
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING", "SUCCEEDED"])
- # Second test: No cached result found; test run was "terminated" for some
- # reason.
- ResetTestValues()
- br.terminated = True
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING'])
+ # Second test: No cached result found; test run was "terminated" for some
+ # reason.
+ ResetTestValues()
+ br.terminated = True
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING"])
- # Third test. No cached result found; RunTest failed for some reason.
- ResetTestValues()
- br.terminated = False
- br.RunTest = FakeRunTestFail
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING', 'FAILED'])
+ # Third test. No cached result found; RunTest failed for some reason.
+ ResetTestValues()
+ br.terminated = False
+ br.RunTest = FakeRunTestFail
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING", "FAILED"])
- # Fourth test: ReadCache found a cached result.
- ResetTestValues()
- br.RunTest = FakeRunTest
- br.ReadCache = FakeReadCacheSucceed
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: Cache hit.', 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(self.log_error, ['result.err stuff'])
- self.assertEqual(self.status, ['SUCCEEDED'])
+ # Fourth test: ReadCache found a cached result.
+ ResetTestValues()
+ br.RunTest = FakeRunTest
+ br.ReadCache = FakeReadCacheSucceed
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: Cache hit.",
+ "result.out stuff",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(self.log_error, ["result.err stuff"])
+ self.assertEqual(self.status, ["SUCCEEDED"])
- # Fifth test: ReadCache generates an exception; does the try/finally block
- # work?
- ResetTestValues()
- br.ReadCache = FakeReadCacheException
- br.machine = FakeAcquireMachine()
- br.run()
- self.assertEqual(self.log_error, [
- "Benchmark run: 'test_run' failed: This is an exception test; it is "
- 'supposed to happen'
- ])
- self.assertEqual(self.status, ['FAILED'])
+ # Fifth test: ReadCache generates an exception; does the try/finally block
+ # work?
+ ResetTestValues()
+ br.ReadCache = FakeReadCacheException
+ br.machine = FakeAcquireMachine()
+ br.run()
+ self.assertEqual(
+ self.log_error,
+ [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ "supposed to happen"
+ ],
+ )
+ self.assertEqual(self.status, ["FAILED"])
- def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_terminate_pass(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- def GetLastEventPassed():
- """Helper function for test_terminate_pass"""
- return benchmark_run.STATUS_SUCCEEDED
+ def GetLastEventPassed():
+ """Helper function for test_terminate_pass"""
+ return benchmark_run.STATUS_SUCCEEDED
- def RecordStub(status):
- """Helper function for test_terminate_pass"""
- self.status = status
+ def RecordStub(status):
+ """Helper function for test_terminate_pass"""
+ self.status = status
- self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse(br.terminated)
- self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
- br.timeline.GetLastEvent = GetLastEventPassed
- br.timeline.Record = RecordStub
+ br.timeline.GetLastEvent = GetLastEventPassed
+ br.timeline.Record = RecordStub
- br.Terminate()
+ br.Terminate()
- self.assertTrue(br.terminated)
- self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
- self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
- def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_terminate_fail(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- def GetLastEventFailed():
- """Helper function for test_terminate_fail"""
- return benchmark_run.STATUS_FAILED
+ def GetLastEventFailed():
+ """Helper function for test_terminate_fail"""
+ return benchmark_run.STATUS_FAILED
- def RecordStub(status):
- """Helper function for test_terminate_fail"""
- self.status = status
+ def RecordStub(status):
+ """Helper function for test_terminate_fail"""
+ self.status = status
- self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse(br.terminated)
- self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
- br.timeline.GetLastEvent = GetLastEventFailed
- br.timeline.Record = RecordStub
+ br.timeline.GetLastEvent = GetLastEventFailed
+ br.timeline.Record = RecordStub
- br.Terminate()
+ br.Terminate()
- self.assertTrue(br.terminated)
- self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
- self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
- def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_acquire_machine(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- br.terminated = True
- self.assertRaises(Exception, br.AcquireMachine)
+ br.terminated = True
+ self.assertRaises(Exception, br.AcquireMachine)
- br.terminated = False
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- self.mock_machine_manager.AcquireMachine.return_value = mock_machine
+ br.terminated = False
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ self.mock_machine_manager.AcquireMachine.return_value = mock_machine
- machine = br.AcquireMachine()
- self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
+ machine = br.AcquireMachine()
+ self.assertEqual(machine.name, "chromeos1-row3-rack5-host7.cros")
- def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_get_extra_autotest_args(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- def MockLogError(err_msg):
- """Helper function for test_get_extra_autotest_args"""
- self.err_msg = err_msg
+ def MockLogError(err_msg):
+ """Helper function for test_get_extra_autotest_args"""
+ self.err_msg = err_msg
- self.mock_logger.LogError = MockLogError
+ self.mock_logger.LogError = MockLogError
- result = br.GetExtraAutotestArgs()
- self.assertEqual(result, '')
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, "")
- self.test_benchmark.perf_args = 'record -e cycles'
- result = br.GetExtraAutotestArgs()
- self.assertEqual(
- result,
- '--profiler=custom_perf --profiler_args=\'perf_options="record -a -e '
- 'cycles"\'')
+ self.test_benchmark.perf_args = "record -e cycles"
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'",
+ )
- self.test_benchmark.perf_args = 'record -e cycles'
- self.test_benchmark.suite = 'test_that'
- result = br.GetExtraAutotestArgs()
- self.assertEqual(result, '')
- self.assertEqual(self.err_msg,
- 'Non-telemetry benchmark does not support profiler.')
+ self.test_benchmark.perf_args = "record -e cycles"
+ self.test_benchmark.suite = "test_that"
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, "")
+ self.assertEqual(
+ self.err_msg, "Non-telemetry benchmark does not support profiler."
+ )
- self.test_benchmark.perf_args = 'junk args'
- self.test_benchmark.suite = 'telemetry_Crosperf'
- self.assertRaises(Exception, br.GetExtraAutotestArgs)
+ self.test_benchmark.perf_args = "junk args"
+ self.test_benchmark.suite = "telemetry_Crosperf"
+ self.assertRaises(Exception, br.GetExtraAutotestArgs)
- @mock.patch.object(SuiteRunner, 'Run')
- @mock.patch.object(Result, 'CreateFromRun')
- def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ @mock.patch.object(SuiteRunner, "Run")
+ @mock.patch.object(Result, "CreateFromRun")
+ def test_run_test(self, mock_result, mock_runner):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- self.status = []
+ self.status = []
- def MockRecord(status):
- self.status.append(status)
+ def MockRecord(status):
+ self.status.append(status)
- br.timeline.Record = MockRecord
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- mock_runner.return_value = [0, "{'Score':100}", '']
+ br.timeline.Record = MockRecord
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ mock_runner.return_value = [0, "{'Score':100}", ""]
- br.RunTest(mock_machine)
+ br.RunTest(mock_machine)
- self.assertTrue(br.run_completed)
- self.assertEqual(
- self.status,
- [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
+ self.assertTrue(br.run_completed)
+ self.assertEqual(
+ self.status,
+ [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING],
+ )
- self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
- br.machine_manager.ImageMachine.assert_called_with(mock_machine,
- self.test_label)
- self.assertEqual(mock_runner.call_count, 1)
- mock_runner.assert_called_with(mock_machine, br.label, br.benchmark, '',
- br.profiler_args)
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(
+ mock_machine, self.test_label
+ )
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(
+ mock_machine, br.label, br.benchmark, "", br.profiler_args
+ )
- self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
- None, "{'Score':100}", '', 0,
- 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf', '')
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(
+ self.mock_logger,
+ "average",
+ self.test_label,
+ None,
+ "{'Score':100}",
+ "",
+ 0,
+ "page_cycler.netsim.top_10",
+ "telemetry_Crosperf",
+ "",
+ )
- def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ def test_set_cache_conditions(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
- phony_cache_conditions = [123, 456, True, False]
+ phony_cache_conditions = [123, 456, True, False]
- self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
- br.SetCacheConditions(phony_cache_conditions)
- self.assertEqual(br.cache_conditions, phony_cache_conditions)
+ br.SetCacheConditions(phony_cache_conditions)
+ self.assertEqual(br.cache_conditions, phony_cache_conditions)
- br.SetCacheConditions(self.test_cache_conditions)
- self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+ br.SetCacheConditions(self.test_cache_conditions)
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 70508b1..bb23bdb 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the Crosperf Benchmark class."""
-from __future__ import print_function
import inspect
import unittest
@@ -16,57 +15,70 @@
class BenchmarkTestCase(unittest.TestCase):
- """Individual tests for the Benchmark class."""
+ """Individual tests for the Benchmark class."""
- def test_benchmark(self):
- # Test creating a benchmark with all the fields filled out.
- b1 = Benchmark(
- 'b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
- self.assertTrue(b1.suite, 'telemetry_Crosperf')
+ def test_benchmark(self):
+ # Test creating a benchmark with all the fields filled out.
+ b1 = Benchmark(
+ "b1_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles", # perf_args
+ "telemetry_Crosperf", # suite
+ True,
+ ) # show_all_results
+ self.assertTrue(b1.suite, "telemetry_Crosperf")
- # Test creating a benchmark field with default fields left out.
- b2 = Benchmark(
- 'b2_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles') # perf_args
- self.assertEqual(b2.suite, '')
- self.assertFalse(b2.show_all_results)
+ # Test creating a benchmark field with default fields left out.
+ b2 = Benchmark(
+ "b2_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles",
+ ) # perf_args
+ self.assertEqual(b2.suite, "")
+ self.assertFalse(b2.show_all_results)
- # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
- # and see what happens.
- b3 = Benchmark(
- 'b3_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry', # suite
- False) # show_all_results
- self.assertTrue(b3.show_all_results)
+ # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
+ # and see what happens.
+ b3 = Benchmark(
+ "b3_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles", # perf_args
+ "telemetry", # suite
+ False,
+ ) # show_all_results
+ self.assertTrue(b3.show_all_results)
- # Check to see if the args to Benchmark have changed since the last time
- # this test was updated.
- args_list = [
- 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
- 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local',
- 'cwp_dso', 'weight'
- ]
- arg_spec = inspect.getfullargspec(Benchmark.__init__)
- self.assertEqual(len(arg_spec.args), len(args_list))
- for arg in args_list:
- self.assertIn(arg, arg_spec.args)
+ # Check to see if the args to Benchmark have changed since the last time
+ # this test was updated.
+ args_list = [
+ "self",
+ "name",
+ "test_name",
+ "test_args",
+ "iterations",
+ "rm_chroot_tmp",
+ "perf_args",
+ "suite",
+ "show_all_results",
+ "retries",
+ "run_local",
+ "cwp_dso",
+ "weight",
+ ]
+ arg_spec = inspect.getfullargspec(Benchmark.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ for arg in args_list:
+ self.assertIn(arg, arg_spec.args)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py
index 400979e..6ed99bf 100644
--- a/crosperf/column_chart.py
+++ b/crosperf/column_chart.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,46 +7,46 @@
class ColumnChart(object):
- """class to draw column chart."""
+ """class to draw column chart."""
- def __init__(self, title, width, height):
- self.title = title
- self.chart_div = ''.join(t for t in title if t.isalnum())
- self.width = width
- self.height = height
- self.columns = []
- self.rows = []
- self.series = []
+ def __init__(self, title, width, height):
+ self.title = title
+ self.chart_div = "".join(t for t in title if t.isalnum())
+ self.width = width
+ self.height = height
+ self.columns = []
+ self.rows = []
+ self.series = []
- def AddSeries(self, column_name, series_type, color):
- for i in range(len(self.columns)):
- if column_name == self.columns[i][1]:
- self.series.append((i - 1, series_type, color))
- break
+ def AddSeries(self, column_name, series_type, color):
+ for i in range(len(self.columns)):
+ if column_name == self.columns[i][1]:
+ self.series.append((i - 1, series_type, color))
+ break
- def AddColumn(self, name, column_type):
- self.columns.append((column_type, name))
+ def AddColumn(self, name, column_type):
+ self.columns.append((column_type, name))
- def AddRow(self, row):
- self.rows.append(row)
+ def AddRow(self, row):
+ self.rows.append(row)
- def GetJavascript(self):
- res = 'var data = new google.visualization.DataTable();\n'
- for column in self.columns:
- res += "data.addColumn('%s', '%s');\n" % column
- res += 'data.addRows(%s);\n' % len(self.rows)
- for row in range(len(self.rows)):
- for column in range(len(self.columns)):
- val = self.rows[row][column]
- if isinstance(val, str):
- val = "'%s'" % val
- res += 'data.setValue(%s, %s, %s);\n' % (row, column, val)
+ def GetJavascript(self):
+ res = "var data = new google.visualization.DataTable();\n"
+ for column in self.columns:
+ res += "data.addColumn('%s', '%s');\n" % column
+ res += "data.addRows(%s);\n" % len(self.rows)
+ for row in range(len(self.rows)):
+ for column in range(len(self.columns)):
+ val = self.rows[row][column]
+ if isinstance(val, str):
+ val = "'%s'" % val
+ res += "data.setValue(%s, %s, %s);\n" % (row, column, val)
- series_javascript = ''
- for series in self.series:
- series_javascript += "%s: {type: '%s', color: '%s'}, " % series
+ series_javascript = ""
+ for series in self.series:
+ series_javascript += "%s: {type: '%s', color: '%s'}, " % series
- chart_add_javascript = """
+ chart_add_javascript = """
var chart_%s = new google.visualization.ComboChart(
document.getElementById('%s'));
chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
@@ -54,10 +54,16 @@
vAxis: {minValue: 0}})
"""
- res += chart_add_javascript % (self.chart_div, self.chart_div,
- self.chart_div, self.width, self.height,
- self.title, series_javascript)
- return res
+ res += chart_add_javascript % (
+ self.chart_div,
+ self.chart_div,
+ self.chart_div,
+ self.width,
+ self.height,
+ self.title,
+ series_javascript,
+ )
+ return res
- def GetDiv(self):
- return "<div id='%s' class='chart'></div>" % self.chart_div
+ def GetDiv(self):
+ return "<div id='%s' class='chart'></div>" % self.chart_div
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index c73f875..756753a 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -1,67 +1,71 @@
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to compare two machines."""
-from __future__ import print_function
+import argparse
import os.path
import sys
-import argparse
from machine_manager import CrosMachine
def PrintUsage(msg):
- print(msg)
- print('Usage: ')
- print('\n compare_machines.py --chromeos_root=/path/to/chroot/ '
- 'machine1 machine2 ...')
+ print(msg)
+ print("Usage: ")
+ print(
+ "\n compare_machines.py --chromeos_root=/path/to/chroot/ "
+ "machine1 machine2 ..."
+ )
def Main(argv):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--chromeos_root',
- default='/path/to/chromeos',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
- parser.add_argument('remotes', nargs=argparse.REMAINDER)
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--chromeos_root",
+ default="/path/to/chromeos",
+ dest="chromeos_root",
+ help="ChromeOS root checkout directory",
+ )
+ parser.add_argument("remotes", nargs=argparse.REMAINDER)
- options = parser.parse_args(argv)
+ options = parser.parse_args(argv)
- machine_list = options.remotes
- if len(machine_list) < 2:
- PrintUsage('ERROR: Must specify at least two machines.')
- return 1
- elif not os.path.exists(options.chromeos_root):
- PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root)
- return 1
+ machine_list = options.remotes
+ if len(machine_list) < 2:
+ PrintUsage("ERROR: Must specify at least two machines.")
+ return 1
+ elif not os.path.exists(options.chromeos_root):
+ PrintUsage(
+ "Error: chromeos_root does not exist %s" % options.chromeos_root
+ )
+ return 1
- chroot = options.chromeos_root
- cros_machines = []
- test_machine_checksum = None
- for m in machine_list:
- cm = CrosMachine(m, chroot, 'average')
- cros_machines = cros_machines + [cm]
- test_machine_checksum = cm.machine_checksum
+ chroot = options.chromeos_root
+ cros_machines = []
+ test_machine_checksum = None
+ for m in machine_list:
+ cm = CrosMachine(m, chroot, "average")
+ cros_machines = cros_machines + [cm]
+ test_machine_checksum = cm.machine_checksum
- ret = 0
- for cm in cros_machines:
- print('checksum for %s : %s' % (cm.name, cm.machine_checksum))
- if cm.machine_checksum != test_machine_checksum:
- ret = 1
- print('Machine checksums do not all match')
+ ret = 0
+ for cm in cros_machines:
+ print("checksum for %s : %s" % (cm.name, cm.machine_checksum))
+ if cm.machine_checksum != test_machine_checksum:
+ ret = 1
+ print("Machine checksums do not all match")
- if ret == 0:
- print('Machines all match.')
+ if ret == 0:
+ print("Machines all match.")
- return ret
+ return ret
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/crosperf/config.py b/crosperf/config.py
index 61ad9c1..c2a7fe5 100644
--- a/crosperf/config.py
+++ b/crosperf/config.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,8 +8,8 @@
def GetConfig(key):
- return config.get(key)
+ return config.get(key)
def AddConfig(key, value):
- config[key] = value
+ config[key] = value
diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py
index 208f44d..fdff7ea 100755
--- a/crosperf/config_unittest.py
+++ b/crosperf/config_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for config.py"""
-from __future__ import print_function
import unittest
@@ -14,40 +13,40 @@
class ConfigTestCase(unittest.TestCase):
- """Class for the config unit tests."""
+ """Class for the config unit tests."""
- def test_config(self):
- # Verify that config exists, that it's a dictionary, and that it's
- # empty.
- self.assertTrue(isinstance(config.config, dict))
- self.assertEqual(len(config.config), 0)
+ def test_config(self):
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(isinstance(config.config, dict))
+ self.assertEqual(len(config.config), 0)
- # Verify that attempting to get a non-existant key out of the
- # dictionary returns None.
- self.assertIsNone(config.GetConfig('rabbit'))
- self.assertIsNone(config.GetConfig('key1'))
+ # Verify that attempting to get a non-existant key out of the
+ # dictionary returns None.
+ self.assertIsNone(config.GetConfig("rabbit"))
+ self.assertIsNone(config.GetConfig("key1"))
- config.AddConfig('key1', 16)
- config.AddConfig('key2', 32)
- config.AddConfig('key3', 'third value')
+ config.AddConfig("key1", 16)
+ config.AddConfig("key2", 32)
+ config.AddConfig("key3", "third value")
- # Verify that after 3 calls to AddConfig we have 3 values in the
- # dictionary.
- self.assertEqual(len(config.config), 3)
+ # Verify that after 3 calls to AddConfig we have 3 values in the
+ # dictionary.
+ self.assertEqual(len(config.config), 3)
- # Verify that GetConfig works and gets the expected values.
- self.assertIs(config.GetConfig('key2'), 32)
- self.assertIs(config.GetConfig('key3'), 'third value')
- self.assertIs(config.GetConfig('key1'), 16)
+ # Verify that GetConfig works and gets the expected values.
+ self.assertIs(config.GetConfig("key2"), 32)
+ self.assertIs(config.GetConfig("key3"), "third value")
+ self.assertIs(config.GetConfig("key1"), 16)
- # Re-set config.
- config.config.clear()
+ # Re-set config.
+ config.config.clear()
- # Verify that config exists, that it's a dictionary, and that it's
- # empty.
- self.assertTrue(isinstance(config.config, dict))
- self.assertEqual(len(config.config), 0)
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(isinstance(config.config, dict))
+ self.assertEqual(len(config.config), 0)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/crosperf b/crosperf/crosperf
index c98f2dd..9a7bde0 100755
--- a/crosperf/crosperf
+++ b/crosperf/crosperf
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index f195b13..aace2c8 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The driver script for running performance benchmarks on ChromeOS."""
-from __future__ import print_function
import argparse
import atexit
@@ -14,139 +13,145 @@
import signal
import sys
-from experiment_runner import ExperimentRunner
-from experiment_runner import MockExperimentRunner
-from experiment_factory import ExperimentFactory
-from experiment_file import ExperimentFile
-from settings_factory import GlobalSettings
-
# This import causes pylint to warn about "No name 'logger' in module
# 'cros_utils'". I do not understand why. The import works fine in python.
# pylint: disable=no-name-in-module
from cros_utils import logger
-
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from experiment_runner import ExperimentRunner
+from experiment_runner import MockExperimentRunner
+from settings_factory import GlobalSettings
import test_flag
+
HAS_FAILURE = 1
ALL_FAILED = 2
def SetupParserOptions(parser):
- """Add all options to the parser."""
- parser.add_argument(
- '--dry_run',
- dest='dry_run',
- help=('Parse the experiment file and '
- 'show what will be done'),
- action='store_true',
- default=False)
- # Allow each of the global fields to be overridden by passing in
- # options. Add each global field as an option.
- option_settings = GlobalSettings('')
- for field_name in option_settings.fields:
- field = option_settings.fields[field_name]
+ """Add all options to the parser."""
parser.add_argument(
- '--%s' % field.name,
- dest=field.name,
- help=field.description,
- action='store')
+ "--dry_run",
+ dest="dry_run",
+ help=("Parse the experiment file and " "show what will be done"),
+ action="store_true",
+ default=False,
+ )
+ # Allow each of the global fields to be overridden by passing in
+ # options. Add each global field as an option.
+ option_settings = GlobalSettings("")
+ for field_name in option_settings.fields:
+ field = option_settings.fields[field_name]
+ parser.add_argument(
+ "--%s" % field.name,
+ dest=field.name,
+ help=field.description,
+ action="store",
+ )
def ConvertOptionsToSettings(options):
- """Convert options passed in into global settings."""
- option_settings = GlobalSettings('option_settings')
- for option_name in options.__dict__:
- if (options.__dict__[option_name] is not None and
- option_name in option_settings.fields):
- option_settings.SetField(option_name, options.__dict__[option_name])
- return option_settings
+ """Convert options passed in into global settings."""
+ option_settings = GlobalSettings("option_settings")
+ for option_name in options.__dict__:
+ if (
+ options.__dict__[option_name] is not None
+ and option_name in option_settings.fields
+ ):
+ option_settings.SetField(option_name, options.__dict__[option_name])
+ return option_settings
def Cleanup(experiment):
- """Handler function which is registered to the atexit handler."""
- experiment.Cleanup()
+ """Handler function which is registered to the atexit handler."""
+ experiment.Cleanup()
def CallExitHandler(signum, _):
- """Signal handler that transforms a signal into a call to exit.
+ """Signal handler that transforms a signal into a call to exit.
- This is useful because functionality registered by "atexit" will
- be called. It also means you can "catch" the signal by catching
- the SystemExit exception.
- """
- sys.exit(128 + signum)
+ This is useful because functionality registered by "atexit" will
+ be called. It also means you can "catch" the signal by catching
+ the SystemExit exception.
+ """
+ sys.exit(128 + signum)
def RunCrosperf(argv):
- parser = argparse.ArgumentParser()
+ parser = argparse.ArgumentParser()
- parser.add_argument(
- '--noschedv2',
- dest='noschedv2',
- default=False,
- action='store_true',
- help=('Do not use new scheduler. '
- 'Use original scheduler instead.'))
- parser.add_argument(
- '-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under <crosperf_logs>/logs')
+ parser.add_argument(
+ "--noschedv2",
+ dest="noschedv2",
+ default=False,
+ action="store_true",
+ help=("Do not use new scheduler. " "Use original scheduler instead."),
+ )
+ parser.add_argument(
+ "-l",
+ "--log_dir",
+ dest="log_dir",
+ default="",
+ help="The log_dir, default is under <crosperf_logs>/logs",
+ )
- SetupParserOptions(parser)
- options, args = parser.parse_known_args(argv)
+ SetupParserOptions(parser)
+ options, args = parser.parse_known_args(argv)
- # Convert the relevant options that are passed in into a settings
- # object which will override settings in the experiment file.
- option_settings = ConvertOptionsToSettings(options)
- log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
- logger.GetLogger(log_dir)
+ # Convert the relevant options that are passed in into a settings
+ # object which will override settings in the experiment file.
+ option_settings = ConvertOptionsToSettings(options)
+ log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
+ logger.GetLogger(log_dir)
- if len(args) == 2:
- experiment_filename = args[1]
- else:
- parser.error('Invalid number arguments.')
+ if len(args) == 2:
+ experiment_filename = args[1]
+ else:
+ parser.error("Invalid number arguments.")
- working_directory = os.getcwd()
- if options.dry_run:
- test_flag.SetTestMode(True)
+ working_directory = os.getcwd()
+ if options.dry_run:
+ test_flag.SetTestMode(True)
- experiment_file = ExperimentFile(
- open(experiment_filename, encoding='utf-8'), option_settings)
- if not experiment_file.GetGlobalSettings().GetField('name'):
- experiment_name = os.path.basename(experiment_filename)
- experiment_file.GetGlobalSettings().SetField('name', experiment_name)
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory, log_dir)
+ experiment_file = ExperimentFile(
+ open(experiment_filename, encoding="utf-8"), option_settings
+ )
+ if not experiment_file.GetGlobalSettings().GetField("name"):
+ experiment_name = os.path.basename(experiment_filename)
+ experiment_file.GetGlobalSettings().SetField("name", experiment_name)
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory, log_dir
+ )
- json_report = experiment_file.GetGlobalSettings().GetField('json_report')
+ json_report = experiment_file.GetGlobalSettings().GetField("json_report")
- signal.signal(signal.SIGTERM, CallExitHandler)
- atexit.register(Cleanup, experiment)
+ signal.signal(signal.SIGTERM, CallExitHandler)
+ atexit.register(Cleanup, experiment)
- if options.dry_run:
- runner = MockExperimentRunner(experiment, json_report)
- else:
- runner = ExperimentRunner(
- experiment, json_report, using_schedv2=(not options.noschedv2))
+ if options.dry_run:
+ runner = MockExperimentRunner(experiment, json_report)
+ else:
+ runner = ExperimentRunner(
+ experiment, json_report, using_schedv2=(not options.noschedv2)
+ )
- ret = runner.Run()
- if ret == HAS_FAILURE:
- raise RuntimeError('One or more benchmarks failed.')
- if ret == ALL_FAILED:
- raise RuntimeError('All benchmarks failed to run.')
+ ret = runner.Run()
+ if ret == HAS_FAILURE:
+ raise RuntimeError("One or more benchmarks failed.")
+ if ret == ALL_FAILED:
+ raise RuntimeError("All benchmarks failed to run.")
def Main(argv):
- try:
- RunCrosperf(argv)
- except Exception:
- # Flush buffers before exiting to avoid out of order printing
- sys.stdout.flush()
- # Raise exception prints out traceback
- raise
+ try:
+ RunCrosperf(argv)
+ except Exception:
+ # Flush buffers before exiting to avoid out of order printing
+ sys.stdout.flush()
+ # Raise exception prints out traceback
+ raise
-if __name__ == '__main__':
- Main(sys.argv)
+if __name__ == "__main__":
+ Main(sys.argv)
diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py
index b593fa9..011f01e 100755
--- a/crosperf/crosperf_autolock.py
+++ b/crosperf/crosperf_autolock.py
@@ -1,19 +1,20 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to automatically lock devices for crosperf."""
-import os
-import sys
import argparse
-import subprocess
import contextlib
-import json
-from typing import Optional, Any
import dataclasses
+import json
+import os
+import subprocess
+import sys
+from typing import Any, Dict, List, Optional, Tuple
+
# Have to do sys.path hackery because crosperf relies on PYTHONPATH
# modifications.
@@ -21,261 +22,292 @@
sys.path.append(PARENT_DIR)
-def main(sys_args: list[str]) -> Optional[str]:
- """Run crosperf_autolock. Returns error msg or None"""
- args, leftover_args = parse_args(sys_args)
- fleet_params = [
- CrosfleetParams(board=args.board,
- pool=args.pool,
- lease_time=args.lease_time)
- for _ in range(args.num_leases)
- ]
- if not fleet_params:
- return ('No board names identified. If you want to use'
- ' a known host, just use crosperf directly.')
- try:
- _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
- except BoardLockError as e:
- _eprint('ERROR:', e)
- _eprint('May need to login to crosfleet? Run "crosfleet login"')
- _eprint('The leases may also be successful later on. '
- 'Check with "crosfleet dut leases"')
- return 'crosperf_autolock failed'
- except BoardReleaseError as e:
- _eprint('ERROR:', e)
- _eprint('May need to re-run "crosfleet dut abandon"')
- return 'crosperf_autolock failed'
- return None
+def main(sys_args: List[str]) -> Optional[str]:
+ """Run crosperf_autolock. Returns error msg or None"""
+ args, leftover_args = parse_args(sys_args)
+ fleet_params = [
+ CrosfleetParams(
+ board=args.board, pool=args.pool, lease_time=args.lease_time
+ )
+ for _ in range(args.num_leases)
+ ]
+ if not fleet_params:
+ return (
+ "No board names identified. If you want to use"
+ " a known host, just use crosperf directly."
+ )
+ try:
+ _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
+ except BoardLockError as e:
+ _eprint("ERROR:", e)
+ _eprint('May need to login to crosfleet? Run "crosfleet login"')
+ _eprint(
+ "The leases may also be successful later on. "
+ 'Check with "crosfleet dut leases"'
+ )
+ return "crosperf_autolock failed"
+ except BoardReleaseError as e:
+ _eprint("ERROR:", e)
+ _eprint('May need to re-run "crosfleet dut abandon"')
+ return "crosperf_autolock failed"
+ return None
-def parse_args(args: list[str]) -> tuple[Any, list]:
- """Parse the CLI arguments."""
- parser = argparse.ArgumentParser(
- 'crosperf_autolock',
- description='Wrapper around crosperf'
- ' to autolock DUTs from crosfleet.',
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--board',
- type=str,
- help='Space or comma separated list of boards to lock',
- required=True,
- default=argparse.SUPPRESS)
- parser.add_argument('--num-leases',
- type=int,
- help='Number of boards to lock.',
- metavar='NUM',
- default=1)
- parser.add_argument('--pool',
- type=str,
- help='Pool to pull from.',
- default='DUT_POOL_QUOTA')
- parser.add_argument('--dut-lock-timeout',
- type=float,
- metavar='SEC',
- help='Number of seconds we want to try to lease a board'
- ' from crosfleet. This option does NOT change the'
- ' lease length.',
- default=600)
- parser.add_argument('--lease-time',
- type=int,
- metavar='MIN',
- help='Number of minutes to lock the board. Max is 1440.',
- default=1440)
- parser.epilog = (
- 'For more detailed flags, you have to read the args taken by the'
- ' crosperf executable. Args are passed transparently to crosperf.')
- return parser.parse_known_args(args)
+def parse_args(args: List[str]) -> Tuple[Any, List]:
+ """Parse the CLI arguments."""
+ parser = argparse.ArgumentParser(
+ "crosperf_autolock",
+ description="Wrapper around crosperf"
+ " to autolock DUTs from crosfleet.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--board",
+ type=str,
+ help="Space or comma separated list of boards to lock",
+ required=True,
+ default=argparse.SUPPRESS,
+ )
+ parser.add_argument(
+ "--num-leases",
+ type=int,
+ help="Number of boards to lock.",
+ metavar="NUM",
+ default=1,
+ )
+ parser.add_argument(
+ "--pool", type=str, help="Pool to pull from.", default="DUT_POOL_QUOTA"
+ )
+ parser.add_argument(
+ "--dut-lock-timeout",
+ type=float,
+ metavar="SEC",
+ help="Number of seconds we want to try to lease a board"
+ " from crosfleet. This option does NOT change the"
+ " lease length.",
+ default=600,
+ )
+ parser.add_argument(
+ "--lease-time",
+ type=int,
+ metavar="MIN",
+ help="Number of minutes to lock the board. Max is 1440.",
+ default=1440,
+ )
+ parser.epilog = (
+ "For more detailed flags, you have to read the args taken by the"
+ " crosperf executable. Args are passed transparently to crosperf."
+ )
+ return parser.parse_known_args(args)
class BoardLockError(Exception):
- """Error to indicate failure to lock a board."""
+ """Error to indicate failure to lock a board."""
- def __init__(self, msg: str):
- self.msg = 'BoardLockError: ' + msg
- super().__init__(self.msg)
+ def __init__(self, msg: str):
+ self.msg = "BoardLockError: " + msg
+ super().__init__(self.msg)
class BoardReleaseError(Exception):
- """Error to indicate failure to release a board."""
+ """Error to indicate failure to release a board."""
- def __init__(self, msg: str):
- self.msg = 'BoardReleaseError: ' + msg
- super().__init__(self.msg)
+ def __init__(self, msg: str):
+ self.msg = "BoardReleaseError: " + msg
+ super().__init__(self.msg)
@dataclasses.dataclass(frozen=True)
class CrosfleetParams:
- """Dataclass to hold all crosfleet parameterizations."""
- board: str
- pool: str
- lease_time: int
+ """Dataclass to hold all crosfleet parameterizations."""
+
+ board: str
+ pool: str
+ lease_time: int
def _eprint(*msg, **kwargs):
- print(*msg, file=sys.stderr, **kwargs)
+ print(*msg, file=sys.stderr, **kwargs)
-def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float,
- leftover_args: list[str]):
- """Autolock devices and run crosperf with leftover arguments.
+def _run_crosperf(
+ crosfleet_params: List[CrosfleetParams],
+ lock_timeout: float,
+ leftover_args: List[str],
+):
+ """Autolock devices and run crosperf with leftover arguments.
- Raises:
- BoardLockError: When board was unable to be locked.
- BoardReleaseError: When board was unable to be released.
- """
- if not crosfleet_params:
- raise ValueError('No crosfleet params given; cannot call crosfleet.')
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ if not crosfleet_params:
+ raise ValueError("No crosfleet params given; cannot call crosfleet.")
- # We'll assume all the boards are the same type, which seems to be the case
- # in experiments that actually get used.
- passed_board_arg = crosfleet_params[0].board
- with contextlib.ExitStack() as stack:
- dut_hostnames = []
- for param in crosfleet_params:
- print(
- f'Sent lock request for {param.board} for {param.lease_time} minutes'
- '\nIf this fails, you may need to run "crosfleet dut abandon <...>"')
- # May raise BoardLockError, abandoning previous DUTs.
- dut_hostname = stack.enter_context(
- crosfleet_machine_ctx(
- param.board,
- param.lease_time,
- lock_timeout,
- {'label-pool': param.pool},
- ))
- if dut_hostname:
- print(f'Locked {param.board} machine: {dut_hostname}')
- dut_hostnames.append(dut_hostname)
+ # We'll assume all the boards are the same type, which seems to be the case
+ # in experiments that actually get used.
+ passed_board_arg = crosfleet_params[0].board
+ with contextlib.ExitStack() as stack:
+ dut_hostnames = []
+ for param in crosfleet_params:
+ print(
+ f"Sent lock request for {param.board} for {param.lease_time} minutes"
+ '\nIf this fails, you may need to run "crosfleet dut abandon <...>"'
+ )
+ # May raise BoardLockError, abandoning previous DUTs.
+ dut_hostname = stack.enter_context(
+ crosfleet_machine_ctx(
+ param.board,
+ param.lease_time,
+ lock_timeout,
+ {"label-pool": param.pool},
+ )
+ )
+ if dut_hostname:
+ print(f"Locked {param.board} machine: {dut_hostname}")
+ dut_hostnames.append(dut_hostname)
- # We import crosperf late, because this import is extremely slow.
- # We don't want the user to wait several seconds just to get
- # help info.
- import crosperf
- for dut_hostname in dut_hostnames:
- crosperf.Main([
- sys.argv[0],
- '--no_lock',
- 'True',
- '--remote',
- dut_hostname,
- '--board',
- passed_board_arg,
- ] + leftover_args)
+ # We import crosperf late, because this import is extremely slow.
+ # We don't want the user to wait several seconds just to get
+ # help info.
+ import crosperf
+
+ for dut_hostname in dut_hostnames:
+ crosperf.Main(
+ [
+ sys.argv[0],
+ "--no_lock",
+ "True",
+ "--remote",
+ dut_hostname,
+ "--board",
+ passed_board_arg,
+ ]
+ + leftover_args
+ )
@contextlib.contextmanager
-def crosfleet_machine_ctx(board: str,
- lease_minutes: int,
- lock_timeout: float,
- dims: dict[str, Any],
- abandon_timeout: float = 120.0) -> Any:
- """Acquire dut from crosfleet, and release once it leaves the context.
+def crosfleet_machine_ctx(
+ board: str,
+ lease_minutes: int,
+ lock_timeout: float,
+ dims: Dict[str, Any],
+ abandon_timeout: float = 120.0,
+) -> Any:
+ """Acquire dut from crosfleet, and release once it leaves the context.
- Args:
- board: Board type to lease.
- lease_minutes: Length of lease, in minutes.
- lock_timeout: How long to wait for a lock until quitting.
- dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
- abandon_timeout (optional): How long to wait for releasing until quitting.
+ Args:
+ board: Board type to lease.
+ lease_minutes: Length of lease, in minutes.
+ lock_timeout: How long to wait for a lock until quitting.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ abandon_timeout: How long to wait for releasing until quitting.
- Yields:
- A string representing the crosfleet DUT hostname.
+ Yields:
+ A string representing the crosfleet DUT hostname.
- Raises:
- BoardLockError: When board was unable to be locked.
- BoardReleaseError: When board was unable to be released.
- """
- # This lock may raise an exception, but if it does, we can't release
- # the DUT anyways as we won't have the dut_hostname.
- dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
- try:
- yield dut_hostname
- finally:
- if dut_hostname:
- crosfleet_release(dut_hostname, abandon_timeout)
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ # This lock may raise an exception, but if it does, we can't release
+ # the DUT anyways as we won't have the dut_hostname.
+ dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
+ try:
+ yield dut_hostname
+ finally:
+ if dut_hostname:
+ crosfleet_release(dut_hostname, abandon_timeout)
-def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any],
- timeout_sec: float) -> str:
- """Lock a device using crosfleet, paramaterized by the board type.
+def crosfleet_autolock(
+ board: str, lease_minutes: int, dims: Dict[str, Any], timeout_sec: float
+) -> str:
+ """Lock a device using crosfleet, paramaterized by the board type.
- Args:
- board: Board of the DUT we want to lock.
- lease_minutes: Number of minutes we're trying to lease the DUT for.
- dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
- timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
+ Args:
+ board: Board of the DUT we want to lock.
+ lease_minutes: Number of minutes we're trying to lease the DUT for.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
- Returns:
- The hostname of the board, or empty string if it couldn't be parsed.
+ Returns:
+ The hostname of the board, or empty string if it couldn't be parsed.
- Raises:
- BoardLockError: When board was unable to be locked.
- """
- crosfleet_cmd_args = [
- 'crosfleet',
- 'dut',
- 'lease',
- '-json',
- '-reason="crosperf autolock"',
- f'-board={board}',
- f'-minutes={lease_minutes}',
- ]
- if dims:
- dims_arg = ','.join('{}={}'.format(k, v) for k, v in dims.items())
- crosfleet_cmd_args.extend(['-dims', f'{dims_arg}'])
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ """
+ crosfleet_cmd_args = [
+ "crosfleet",
+ "dut",
+ "lease",
+ "-json",
+ '-reason="crosperf autolock"',
+ f"-board={board}",
+ f"-minutes={lease_minutes}",
+ ]
+ if dims:
+ dims_arg = ",".join(f"{k}={v}" for k, v in dims.items())
+ crosfleet_cmd_args.extend(["-dims", f"{dims_arg}"])
- try:
- output = subprocess.check_output(crosfleet_cmd_args,
- timeout=timeout_sec,
- encoding='utf-8')
- except subprocess.CalledProcessError as e:
- raise BoardLockError(
- f'crosfleet dut lease failed with exit code: {e.returncode}')
- except subprocess.TimeoutExpired as e:
- raise BoardLockError(f'crosfleet dut lease timed out after {timeout_sec}s;'
- ' please abandon the dut manually.')
+ try:
+ output = subprocess.check_output(
+ crosfleet_cmd_args, timeout=timeout_sec, encoding="utf-8"
+ )
+ except subprocess.CalledProcessError as e:
+ raise BoardLockError(
+ f"crosfleet dut lease failed with exit code: {e.returncode}"
+ )
+ except subprocess.TimeoutExpired as e:
+ raise BoardLockError(
+ f"crosfleet dut lease timed out after {timeout_sec}s;"
+ " please abandon the dut manually."
+ )
- try:
- json_obj = json.loads(output)
- dut_hostname = json_obj['DUT']['Hostname']
- if not isinstance(dut_hostname, str):
- raise TypeError('dut_hostname was not a string')
- except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
- raise BoardLockError(
- f'crosfleet dut lease output was parsed incorrectly: {e!r};'
- f' observed output was {output}')
- return _maybe_append_suffix(dut_hostname)
+ try:
+ json_obj = json.loads(output)
+ dut_hostname = json_obj["DUT"]["Hostname"]
+ if not isinstance(dut_hostname, str):
+ raise TypeError("dut_hostname was not a string")
+ except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
+ raise BoardLockError(
+ f"crosfleet dut lease output was parsed incorrectly: {e!r};"
+ f" observed output was {output}"
+ )
+ return _maybe_append_suffix(dut_hostname)
def crosfleet_release(dut_hostname: str, timeout_sec: float = 120.0):
- """Release a crosfleet device.
+ """Release a crosfleet device.
- Consider using the context managed crosfleet_machine_context
+ Consider using the context managed crosfleet_machine_context
- Args:
- dut_hostname: Name of the device we want to release.
- timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
+ Args:
+ dut_hostname: Name of the device we want to release.
+ timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
- Raises:
- BoardReleaseError: Potentially failed to abandon the lease.
- """
- crosfleet_cmd_args = [
- 'crosfleet',
- 'dut',
- 'abandon',
- dut_hostname,
- ]
- exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
- if exit_code != 0:
- raise BoardReleaseError(
- f'"crosfleet dut abandon" had exit code {exit_code}')
+ Raises:
+ BoardReleaseError: Potentially failed to abandon the lease.
+ """
+ crosfleet_cmd_args = [
+ "crosfleet",
+ "dut",
+ "abandon",
+ dut_hostname,
+ ]
+ exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
+ if exit_code != 0:
+ raise BoardReleaseError(
+ f'"crosfleet dut abandon" had exit code {exit_code}'
+ )
def _maybe_append_suffix(hostname: str) -> str:
- if hostname.endswith('.cros') or '.cros.' in hostname:
- return hostname
- return hostname + '.cros'
+ if hostname.endswith(".cros") or ".cros." in hostname:
+ return hostname
+ return hostname + ".cros"
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 774159f..7b52f2e 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -1,14 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for crosperf."""
-from __future__ import division
-from __future__ import print_function
import argparse
import io
@@ -17,8 +15,9 @@
import unittest.mock as mock
import crosperf
-import settings_factory
import experiment_file
+import settings_factory
+
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -41,50 +40,51 @@
class CrosperfTest(unittest.TestCase):
- """Crosperf test class."""
+ """Crosperf test class."""
- def setUp(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- self.exp_file = experiment_file.ExperimentFile(input_file)
+ def setUp(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ self.exp_file = experiment_file.ExperimentFile(input_file)
- def testDryRun(self):
- with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f:
- f.write(EXPERIMENT_FILE_1)
- f.flush()
- crosperf.Main(['', f.name, '--dry_run'])
+ def testDryRun(self):
+ with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f:
+ f.write(EXPERIMENT_FILE_1)
+ f.flush()
+ crosperf.Main(["", f.name, "--dry_run"])
- def testConvertOptionsToSettings(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
- crosperf.SetupParserOptions(parser)
- argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
- options, _ = parser.parse_known_args(argv)
- settings = crosperf.ConvertOptionsToSettings(options)
- self.assertIsNotNone(settings)
- self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 40)
- self.assertTrue(settings.GetField('rerun'))
- argv = ['crosperf/crosperf.py', 'temp.exp']
- options, _ = parser.parse_known_args(argv)
- settings = crosperf.ConvertOptionsToSettings(options)
- self.assertFalse(settings.GetField('rerun'))
+ def testConvertOptionsToSettings(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-l",
+ "--log_dir",
+ dest="log_dir",
+ default="",
+ help="The log_dir, default is under " "<crosperf_logs>/logs",
+ )
+ crosperf.SetupParserOptions(parser)
+ argv = ["crosperf/crosperf.py", "temp.exp", "--rerun=True"]
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertIsNotNone(settings)
+ self.assertIsInstance(settings, settings_factory.GlobalSettings)
+ self.assertEqual(len(settings.fields), 40)
+ self.assertTrue(settings.GetField("rerun"))
+ argv = ["crosperf/crosperf.py", "temp.exp"]
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertFalse(settings.GetField("rerun"))
- def testExceptionPrintTraceback(self):
- """Test the main function can print traceback in exception."""
+ def testExceptionPrintTraceback(self):
+ """Test the main function can print traceback in exception."""
- def mock_RunCrosperf(*_args, **_kwargs):
- return 10 / 0
+ def mock_RunCrosperf(*_args, **_kwargs):
+ return 10 / 0
- with mock.patch('crosperf.RunCrosperf', new=mock_RunCrosperf):
- with self.assertRaises(ZeroDivisionError) as context:
- crosperf.Main([])
- self.assertEqual('division by zero', str(context.exception))
+ with mock.patch("crosperf.RunCrosperf", new=mock_RunCrosperf):
+ with self.assertRaises(ZeroDivisionError) as context:
+ crosperf.Main([])
+ self.assertEqual("division by zero", str(context.exception))
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
index faecb83..714385e 100644
--- a/crosperf/default_remotes
+++ b/crosperf/default_remotes
@@ -1,8 +1,6 @@
-bob : chromeos6-row4-rack13-host6.cros
-chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros
-coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros
-elm : chromeos6-row14-rack15-host21.cros
-kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros
-nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros
-snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros
-veyron_tiger : chromeos6-row3-rack7-host1.cros
+bob : chromeos8-row12-rack16-host2
+chell : chromeos2-row1-rack10-host2 chromeos2-row1-rack10-host4
+coral : chromeos6-row5-rack6-host1 chromeos6-row5-rack6-host3 chromeos6-row5-rack6-host5
+elm : chromeos6-row14-rack15-host21
+nautilus : chromeos6-row5-rack10-host1 chromeos6-row5-rack10-host3
+snappy : chromeos8-row12-rack17-host1 chromeos8-row12-rack17-host2
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8e1bad1..9a46280 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -1,327 +1,399 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2014-2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2014-2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download images from Cloud Storage."""
-from __future__ import print_function
import ast
import os
+from cros_utils import command_executer
import test_flag
-from cros_utils import command_executer
-GS_UTIL = 'src/chromium/depot_tools/gsutil.py'
+GS_UTIL = "src/chromium/depot_tools/gsutil.py"
class MissingImage(Exception):
- """Raised when the requested image does not exist in gs://"""
+ """Raised when the requested image does not exist in gs://"""
class MissingFile(Exception):
- """Raised when the requested file does not exist in gs://"""
+ """Raised when the requested file does not exist in gs://"""
class RunCommandExceptionHandler(object):
- """Handle Exceptions from calls to RunCommand"""
+ """Handle Exceptions from calls to RunCommand"""
- def __init__(self, logger_to_use, log_level, cmd_exec, command):
- self.logger = logger_to_use
- self.log_level = log_level
- self.ce = cmd_exec
- self.cleanup_command = command
+ def __init__(self, logger_to_use, log_level, cmd_exec, command):
+ self.logger = logger_to_use
+ self.log_level = log_level
+ self.ce = cmd_exec
+ self.cleanup_command = command
- def HandleException(self, _, e):
- # Exception handler, Run specified command
- if self.log_level != 'verbose' and self.cleanup_command is not None:
- self.logger.LogOutput('CMD: %s' % self.cleanup_command)
- if self.cleanup_command is not None:
- _ = self.ce.RunCommand(self.cleanup_command)
- # Raise exception again
- raise e
+ def HandleException(self, _, e):
+ # Exception handler, Run specified command
+ if self.log_level != "verbose" and self.cleanup_command is not None:
+ self.logger.LogOutput("CMD: %s" % self.cleanup_command)
+ if self.cleanup_command is not None:
+ _ = self.ce.RunCommand(self.cleanup_command)
+ # Raise exception again
+ raise e
class ImageDownloader(object):
- """Download images from Cloud Storage."""
+ """Download images from Cloud Storage."""
- def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None):
- self._logger = logger_to_use
- self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger, log_level=self.log_level)
+ def __init__(self, logger_to_use=None, log_level="verbose", cmd_exec=None):
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level
+ )
- def GetBuildID(self, chromeos_root, xbuddy_label):
- # Get the translation of the xbuddy_label into the real Google Storage
- # image name.
- command = ('cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; '
- "./translate_xbuddy.py '%s'" % xbuddy_label)
- _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
- chromeos_root, command)
- if not build_id_tuple_str:
- raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
+ def GetBuildID(self, chromeos_root, xbuddy_label):
+ # Get the translation of the xbuddy_label into the real Google Storage
+ # image name.
+ command = (
+ "cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; "
+ "./translate_xbuddy.py '%s'" % xbuddy_label
+ )
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
+ chromeos_root, command
+ )
+ if not build_id_tuple_str:
+ raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
- build_id_tuple = ast.literal_eval(build_id_tuple_str)
- build_id = build_id_tuple[0]
+ build_id_tuple = ast.literal_eval(build_id_tuple_str)
+ build_id = build_id_tuple[0]
- return build_id
+ return build_id
- def DownloadImage(self, chromeos_root, build_id, image_name):
- if self.log_level == 'average':
- self._logger.LogOutput('Preparing to download %s image to local '
- 'directory.' % build_id)
+ def DownloadImage(self, chromeos_root, build_id, image_name):
+ if self.log_level == "average":
+ self._logger.LogOutput(
+ "Preparing to download %s image to local "
+ "directory." % build_id
+ )
- # Make sure the directory for downloading the image exists.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- image_path = os.path.join(download_path, 'chromiumos_test_image.bin')
- if not os.path.exists(download_path):
- os.makedirs(download_path)
+ # Make sure the directory for downloading the image exists.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ image_path = os.path.join(download_path, "chromiumos_test_image.bin")
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
- # Check to see if the image has already been downloaded. If not,
- # download the image.
- if not os.path.exists(image_path):
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- command = '%s cp %s %s' % (gsutil_cmd, image_name, download_path)
+ # Check to see if the image has already been downloaded. If not,
+ # download the image.
+ if not os.path.exists(image_path):
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ command = "%s cp %s %s" % (gsutil_cmd, image_name, download_path)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- status = self._ce.RunCommand(command)
- downloaded_image_name = os.path.join(download_path,
- 'chromiumos_test_image.tar.xz')
- if status != 0 or not os.path.exists(downloaded_image_name):
- raise MissingImage('Cannot download image: %s.' % downloaded_image_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ status = self._ce.RunCommand(command)
+ downloaded_image_name = os.path.join(
+ download_path, "chromiumos_test_image.tar.xz"
+ )
+ if status != 0 or not os.path.exists(downloaded_image_name):
+ raise MissingImage(
+ "Cannot download image: %s." % downloaded_image_name
+ )
- return image_path
+ return image_path
- def UncompressImage(self, chromeos_root, build_id):
- # Check to see if the file has already been uncompresssed, etc.
- if os.path.exists(
- os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'chromiumos_test_image.bin')):
- return
+ def UncompressImage(self, chromeos_root, build_id):
+ # Check to see if the file has already been uncompresssed, etc.
+ if os.path.exists(
+ os.path.join(
+ chromeos_root,
+ "chroot/tmp",
+ build_id,
+ "chromiumos_test_image.bin",
+ )
+ ):
+ return
- # Uncompress and untar the downloaded image.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; tar -Jxf chromiumos_test_image.tar.xz ' % download_path)
- # Cleanup command for exception handler
- clean_cmd = ('cd %s ; rm -f chromiumos_test_image.bin ' % download_path)
- exception_handler = RunCommandExceptionHandler(self._logger, self.log_level,
- self._ce, clean_cmd)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Uncompressing and un-tarring may take a couple of minutes...'
- 'please be patient.)')
- retval = self._ce.RunCommand(
- command, except_handler=exception_handler.HandleException)
- if retval != 0:
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % clean_cmd)
- print('(Removing file chromiumos_test_image.bin.)')
- # Remove partially uncompressed file
- _ = self._ce.RunCommand(clean_cmd)
- # Raise exception for failure to uncompress
- raise MissingImage('Cannot uncompress image: %s.' % build_id)
+ # Uncompress and untar the downloaded image.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ command = (
+ "cd %s ; tar -Jxf chromiumos_test_image.tar.xz " % download_path
+ )
+ # Cleanup command for exception handler
+ clean_cmd = "cd %s ; rm -f chromiumos_test_image.bin " % download_path
+ exception_handler = RunCommandExceptionHandler(
+ self._logger, self.log_level, self._ce, clean_cmd
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print(
+ "(Uncompressing and un-tarring may take a couple of minutes..."
+ "please be patient.)"
+ )
+ retval = self._ce.RunCommand(
+ command, except_handler=exception_handler.HandleException
+ )
+ if retval != 0:
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % clean_cmd)
+ print("(Removing file chromiumos_test_image.bin.)")
+ # Remove partially uncompressed file
+ _ = self._ce.RunCommand(clean_cmd)
+ # Raise exception for failure to uncompress
+ raise MissingImage("Cannot uncompress image: %s." % build_id)
- # Remove compressed image
- command = ('cd %s ; rm -f chromiumos_test_image.tar.xz; ' % download_path)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Removing file chromiumos_test_image.tar.xz.)')
- # try removing file, its ok to have an error, print if encountered
- retval = self._ce.RunCommand(command)
- if retval != 0:
- print('(Warning: Could not remove file chromiumos_test_image.tar.xz .)')
+ # Remove compressed image
+ command = "cd %s ; rm -f chromiumos_test_image.tar.xz; " % download_path
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Removing file chromiumos_test_image.tar.xz.)")
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print(
+ "(Warning: Could not remove file chromiumos_test_image.tar.xz .)"
+ )
- def DownloadSingleFile(self, chromeos_root, build_id, package_file_name):
- # Verify if package files exist
- status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file_name))
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- if not test_flag.GetTestMode():
- cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- raise MissingFile('Cannot find package file: %s.' % package_file_name)
+ def DownloadSingleFile(self, chromeos_root, build_id, package_file_name):
+ # Verify if package files exist
+ status = 0
+ gs_package_name = "gs://chromeos-image-archive/%s/%s" % (
+ build_id,
+ package_file_name,
+ )
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = "%s ls %s" % (gsutil_cmd, gs_package_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingFile(
+ "Cannot find package file: %s." % package_file_name
+ )
- if self.log_level == 'average':
- self._logger.LogOutput('Preparing to download %s package to local '
- 'directory.' % package_file_name)
+ if self.log_level == "average":
+ self._logger.LogOutput(
+ "Preparing to download %s package to local "
+ "directory." % package_file_name
+ )
- # Make sure the directory for downloading the package exists.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- package_path = os.path.join(download_path, package_file_name)
- if not os.path.exists(download_path):
- os.makedirs(download_path)
+ # Make sure the directory for downloading the package exists.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ package_path = os.path.join(download_path, package_file_name)
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
- # Check to see if the package file has already been downloaded. If not,
- # download it.
- if not os.path.exists(package_path):
- command = '%s cp %s %s' % (gsutil_cmd, gs_package_name, download_path)
+ # Check to see if the package file has already been downloaded. If not,
+ # download it.
+ if not os.path.exists(package_path):
+ command = "%s cp %s %s" % (
+ gsutil_cmd,
+ gs_package_name,
+ download_path,
+ )
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- status = self._ce.RunCommand(command)
- if status != 0 or not os.path.exists(package_path):
- raise MissingFile('Cannot download package: %s .' % package_path)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ status = self._ce.RunCommand(command)
+ if status != 0 or not os.path.exists(package_path):
+ raise MissingFile(
+ "Cannot download package: %s ." % package_path
+ )
- def UncompressSingleFile(self, chromeos_root, build_id, package_file_name,
- uncompress_cmd):
- # Uncompress file
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; %s %s' %
- (download_path, uncompress_cmd, package_file_name))
+ def UncompressSingleFile(
+ self, chromeos_root, build_id, package_file_name, uncompress_cmd
+ ):
+ # Uncompress file
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ command = "cd %s ; %s %s" % (
+ download_path,
+ uncompress_cmd,
+ package_file_name,
+ )
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Uncompressing file %s .)' % package_file_name)
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Cannot uncompress file: %s.' % package_file_name)
- # Remove uncompressed downloaded file
- command = ('cd %s ; rm -f %s' % (download_path, package_file_name))
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Removing processed file %s .)' % package_file_name)
- # try removing file, its ok to have an error, print if encountered
- retval = self._ce.RunCommand(command)
- if retval != 0:
- print('(Warning: Could not remove file %s .)' % package_file_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Uncompressing file %s .)" % package_file_name)
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile("Cannot uncompress file: %s." % package_file_name)
+ # Remove uncompressed downloaded file
+ command = "cd %s ; rm -f %s" % (download_path, package_file_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Removing processed file %s .)" % package_file_name)
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print("(Warning: Could not remove file %s .)" % package_file_name)
- def VerifyFileExists(self, chromeos_root, build_id, package_file):
- # Quickly verify if the files are there
- status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file))
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- if not test_flag.GetTestMode():
- cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % cmd)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- print('(Warning: Could not find file %s )' % gs_package_name)
- return 1
- # Package exists on server
- return 0
+ def VerifyFileExists(self, chromeos_root, build_id, package_file):
+ # Quickly verify if the files are there
+ status = 0
+ gs_package_name = "gs://chromeos-image-archive/%s/%s" % (
+ build_id,
+ package_file,
+ )
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = "%s ls %s" % (gsutil_cmd, gs_package_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % cmd)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ print("(Warning: Could not find file %s )" % gs_package_name)
+ return 1
+ # Package exists on server
+ return 0
- def DownloadAutotestFiles(self, chromeos_root, build_id):
- # Download autest package files (3 files)
- autotest_packages_name = ('autotest_packages.tar')
- autotest_server_package_name = ('autotest_server_package.tar.bz2')
- autotest_control_files_name = ('control_files.tar')
+ def DownloadAutotestFiles(self, chromeos_root, build_id):
+ # Download autest package files (3 files)
+ autotest_packages_name = "autotest_packages.tar"
+ autotest_server_package_name = "autotest_server_package.tar.bz2"
+ autotest_control_files_name = "control_files.tar"
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- # Autotest directory relative path wrt chroot
- autotest_rel_path = os.path.join('/tmp', build_id, 'autotest_files')
- # Absolute Path to download files
- autotest_path = os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'autotest_files')
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ # Autotest directory relative path wrt chroot
+ autotest_rel_path = os.path.join("/tmp", build_id, "autotest_files")
+ # Absolute Path to download files
+ autotest_path = os.path.join(
+ chromeos_root, "chroot/tmp", build_id, "autotest_files"
+ )
- if not os.path.exists(autotest_path):
- # Quickly verify if the files are present on server
- # If not, just exit with warning
- status = self.VerifyFileExists(chromeos_root, build_id,
- autotest_packages_name)
- if status != 0:
- default_autotest_dir = '/mnt/host/source/src/third_party/autotest/files'
- print('(Warning: Could not find autotest packages .)\n'
- '(Warning: Defaulting autotest path to %s .' %
- default_autotest_dir)
- return default_autotest_dir
+ if not os.path.exists(autotest_path):
+ # Quickly verify if the files are present on server
+ # If not, just exit with warning
+ status = self.VerifyFileExists(
+ chromeos_root, build_id, autotest_packages_name
+ )
+ if status != 0:
+ default_autotest_dir = (
+ "/mnt/host/source/src/third_party/autotest/files"
+ )
+ print(
+ "(Warning: Could not find autotest packages .)\n"
+ "(Warning: Defaulting autotest path to %s ."
+ % default_autotest_dir
+ )
+ return default_autotest_dir
- # Files exist on server, download and uncompress them
- self.DownloadSingleFile(chromeos_root, build_id, autotest_packages_name)
- self.DownloadSingleFile(chromeos_root, build_id,
- autotest_server_package_name)
- self.DownloadSingleFile(chromeos_root, build_id,
- autotest_control_files_name)
+ # Files exist on server, download and uncompress them
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_packages_name
+ )
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_server_package_name
+ )
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_control_files_name
+ )
- self.UncompressSingleFile(chromeos_root, build_id, autotest_packages_name,
- 'tar -xf ')
- self.UncompressSingleFile(chromeos_root, build_id,
- autotest_server_package_name, 'tar -jxf ')
- self.UncompressSingleFile(chromeos_root, build_id,
- autotest_control_files_name, 'tar -xf ')
- # Rename created autotest directory to autotest_files
- command = ('cd %s ; mv autotest autotest_files' % download_path)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Moving downloaded autotest files to autotest_files)')
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Could not create directory autotest_files')
+ self.UncompressSingleFile(
+ chromeos_root, build_id, autotest_packages_name, "tar -xf "
+ )
+ self.UncompressSingleFile(
+ chromeos_root,
+ build_id,
+ autotest_server_package_name,
+ "tar -jxf ",
+ )
+ self.UncompressSingleFile(
+ chromeos_root, build_id, autotest_control_files_name, "tar -xf "
+ )
+ # Rename created autotest directory to autotest_files
+ command = "cd %s ; mv autotest autotest_files" % download_path
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Moving downloaded autotest files to autotest_files)")
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile("Could not create directory autotest_files")
- return autotest_rel_path
+ return autotest_rel_path
- def DownloadDebugFile(self, chromeos_root, build_id):
- # Download autest package files (3 files)
- debug_archive_name = 'debug.tgz'
+ def DownloadDebugFile(self, chromeos_root, build_id):
+ # Download autest package files (3 files)
+ debug_archive_name = "debug.tgz"
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- # Debug directory relative path wrt chroot
- debug_rel_path = os.path.join('/tmp', build_id, 'debug_files')
- # Debug path to download files
- debug_path = os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'debug_files')
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ # Debug directory relative path wrt chroot
+ debug_rel_path = os.path.join("/tmp", build_id, "debug_files")
+ # Debug path to download files
+ debug_path = os.path.join(
+ chromeos_root, "chroot/tmp", build_id, "debug_files"
+ )
- if not os.path.exists(debug_path):
- # Quickly verify if the file is present on server
- # If not, just exit with warning
- status = self.VerifyFileExists(chromeos_root, build_id,
- debug_archive_name)
- if status != 0:
- self._logger.LogOutput('WARNING: Could not find debug archive on gs')
- return ''
+ if not os.path.exists(debug_path):
+ # Quickly verify if the file is present on server
+ # If not, just exit with warning
+ status = self.VerifyFileExists(
+ chromeos_root, build_id, debug_archive_name
+ )
+ if status != 0:
+ self._logger.LogOutput(
+ "WARNING: Could not find debug archive on gs"
+ )
+ return ""
- # File exists on server, download and uncompress it
- self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name)
+ # File exists on server, download and uncompress it
+ self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name)
- self.UncompressSingleFile(chromeos_root, build_id, debug_archive_name,
- 'tar -xf ')
- # Extract and move debug files into the proper location.
- debug_dir = 'debug_files/usr/lib'
- command = ('cd %s ; mkdir -p %s; mv debug %s' %
- (download_path, debug_dir, debug_dir))
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('Moving downloaded debug files to %s' % debug_dir)
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Could not create directory %s' %
- os.path.join(debug_dir, 'debug'))
+ self.UncompressSingleFile(
+ chromeos_root, build_id, debug_archive_name, "tar -xf "
+ )
+ # Extract and move debug files into the proper location.
+ debug_dir = "debug_files/usr/lib"
+ command = "cd %s ; mkdir -p %s; mv debug %s" % (
+ download_path,
+ debug_dir,
+ debug_dir,
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("Moving downloaded debug files to %s" % debug_dir)
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile(
+ "Could not create directory %s"
+ % os.path.join(debug_dir, "debug")
+ )
- return debug_rel_path
+ return debug_rel_path
- def Run(self, chromeos_root, xbuddy_label, autotest_path, debug_path,
- download_debug):
- build_id = self.GetBuildID(chromeos_root, xbuddy_label)
- image_name = (
- 'gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz' %
- build_id)
+ def Run(
+ self,
+ chromeos_root,
+ xbuddy_label,
+ autotest_path,
+ debug_path,
+ download_debug,
+ ):
+ build_id = self.GetBuildID(chromeos_root, xbuddy_label)
+ image_name = (
+ "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ % build_id
+ )
- # Verify that image exists for build_id, before attempting to
- # download it.
- status = 0
- if not test_flag.GetTestMode():
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- cmd = '%s ls %s' % (gsutil_cmd, image_name)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- raise MissingImage('Cannot find official image: %s.' % image_name)
+ # Verify that image exists for build_id, before attempting to
+ # download it.
+ status = 0
+ if not test_flag.GetTestMode():
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ cmd = "%s ls %s" % (gsutil_cmd, image_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingImage("Cannot find official image: %s." % image_name)
- image_path = self.DownloadImage(chromeos_root, build_id, image_name)
- self.UncompressImage(chromeos_root, build_id)
+ image_path = self.DownloadImage(chromeos_root, build_id, image_name)
+ self.UncompressImage(chromeos_root, build_id)
- if self.log_level != 'quiet':
- self._logger.LogOutput('Using image from %s.' % image_path)
+ if self.log_level != "quiet":
+ self._logger.LogOutput("Using image from %s." % image_path)
- if autotest_path == '':
- autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id)
+ if autotest_path == "":
+ autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id)
- if debug_path == '' and download_debug:
- debug_path = self.DownloadDebugFile(chromeos_root, build_id)
+ if debug_path == "" and download_debug:
+ debug_path = self.DownloadDebugFile(chromeos_root, build_id)
- return image_path, autotest_path, debug_path
+ return image_path, autotest_path, debug_path
diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py
index fc37f2c..20dd13c 100755
--- a/crosperf/download_images_buildid_test.py
+++ b/crosperf/download_images_buildid_test.py
@@ -1,18 +1,18 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test translation of xbuddy names."""
-from __future__ import print_function
import argparse
import sys
import download_images
+
# On May 1, 2014:
# latest : lumpy-release/R34-5500.132.0
# latest-beta : lumpy-release/R35-5712.43.0
@@ -22,93 +22,111 @@
class ImageDownloaderBuildIDTest(object):
- """Test translation of xbuddy names."""
+ """Test translation of xbuddy names."""
- def __init__(self):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='Directory containing ChromeOS root.')
+ def __init__(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-c",
+ "--chromeos_root",
+ dest="chromeos_root",
+ help="Directory containing ChromeOS root.",
+ )
- options = parser.parse_known_args(sys.argv[1:])[0]
- if options.chromeos_root is None:
- self._usage(parser, '--chromeos_root must be set')
- self.chromeos_root = options.chromeos_root
- self.tests_passed = 0
- self.tests_run = 0
- self.tests_failed = 0
+ options = parser.parse_known_args(sys.argv[1:])[0]
+ if options.chromeos_root is None:
+ self._usage(parser, "--chromeos_root must be set")
+ self.chromeos_root = options.chromeos_root
+ self.tests_passed = 0
+ self.tests_run = 0
+ self.tests_failed = 0
- def _usage(self, parser, message):
- print('ERROR: ' + message)
- parser.print_help()
- sys.exit(0)
+ def _usage(self, parser, message):
+ print("ERROR: " + message)
+ parser.print_help()
+ sys.exit(0)
- def print_test_status(self):
- print('----------------------------------------\n')
- print('Tests attempted: %d' % self.tests_run)
- print('Tests passed: %d' % self.tests_passed)
- print('Tests failed: %d' % self.tests_failed)
- print('\n----------------------------------------')
+ def print_test_status(self):
+ print("----------------------------------------\n")
+ print("Tests attempted: %d" % self.tests_run)
+ print("Tests passed: %d" % self.tests_passed)
+ print("Tests failed: %d" % self.tests_failed)
+ print("\n----------------------------------------")
- def assert_failure(self, msg):
- print('Assert failure: %s' % msg)
- self.print_test_status()
- sys.exit(1)
+ def assert_failure(self, msg):
+ print("Assert failure: %s" % msg)
+ self.print_test_status()
+ sys.exit(1)
- def assertIsNotNone(self, arg, arg_name):
- if arg is None:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not None' % arg_name)
+ def assertIsNotNone(self, arg, arg_name):
+ if arg is None:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure("%s is not None" % arg_name)
- def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
- if arg1 == arg2:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name))
+ def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 == arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure(
+ "%s is not NotEqual to %s" % (arg1_name, arg2_name)
+ )
- def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
- if arg1 != arg2:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name))
+ def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 != arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure(
+ "%s is not Equal to %s" % (arg1_name, arg2_name)
+ )
- def test_one_id(self, downloader, test_id, result_string, exact_match):
- print("Translating '%s'" % test_id)
- self.tests_run = self.tests_run + 1
+ def test_one_id(self, downloader, test_id, result_string, exact_match):
+ print("Translating '%s'" % test_id)
+ self.tests_run = self.tests_run + 1
- result = downloader.GetBuildID(self.chromeos_root, test_id)
- # Verify that we got a build id back.
- self.assertIsNotNone(result, 'result')
+ result = downloader.GetBuildID(self.chromeos_root, test_id)
+ # Verify that we got a build id back.
+ self.assertIsNotNone(result, "result")
- # Verify that the result either contains or exactly matches the
- # result_string, depending on the exact_match argument.
- if exact_match:
- self.assertEqual(result, result_string, 'result', result_string)
- else:
- self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1')
- self.tests_passed = self.tests_passed + 1
+ # Verify that the result either contains or exactly matches the
+ # result_string, depending on the exact_match argument.
+ if exact_match:
+ self.assertEqual(result, result_string, "result", result_string)
+ else:
+ self.assertNotEqual(
+ result.find(result_string), -1, "result.find", "-1"
+ )
+ self.tests_passed = self.tests_passed + 1
- def test_get_build_id(self):
- """Test that the actual translating of xbuddy names is working properly."""
- downloader = download_images.ImageDownloader(log_level='quiet')
+ def test_get_build_id(self):
+ """Test that the actual translating of xbuddy names is working properly."""
+ downloader = download_images.ImageDownloader(log_level="quiet")
- self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
- False)
- self.test_one_id(downloader,
- 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86',
- 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True)
- self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0',
- 'lumpy-release/R35-5672.0.0', True)
- self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
- False)
- self.test_one_id(downloader, 'remote/lumpy/latest-official',
- 'lumpy-release/R', False)
- self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R',
- False)
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader,
+ "remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
+ "trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
+ True,
+ )
+ self.test_one_id(
+ downloader,
+ "remote/lumpy-release/R35-5672.0.0",
+ "lumpy-release/R35-5672.0.0",
+ True,
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-official", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-beta", "lumpy-release/R", False
+ )
- self.print_test_status()
+ self.print_test_status()
-if __name__ == '__main__':
- tester = ImageDownloaderBuildIDTest()
- tester.test_get_build_id()
+if __name__ == "__main__":
+ tester = ImageDownloaderBuildIDTest()
+ tester.test_get_build_id()
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 62b8d89..6a640f8 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -1,277 +1,316 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download image unittest."""
-from __future__ import print_function
import os
import unittest
import unittest.mock as mock
-import download_images
from cros_utils import command_executer
from cros_utils import logger
-
+import download_images
import test_flag
-MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True)
+
+MOCK_LOGGER = logger.GetLogger(log_dir="", mock=True)
class ImageDownloaderTestcast(unittest.TestCase):
- """The image downloader test class."""
+ """The image downloader test class."""
- def __init__(self, *args, **kwargs):
- super(ImageDownloaderTestcast, self).__init__(*args, **kwargs)
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_get_build_id = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
+ def __init__(self, *args, **kwargs):
+ super(ImageDownloaderTestcast, self).__init__(*args, **kwargs)
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
- @mock.patch.object(os, 'makedirs')
- @mock.patch.object(os.path, 'exists')
- def test_download_image(self, mock_path_exists, mock_mkdirs):
+ @mock.patch.object(os, "makedirs")
+ @mock.patch.object(os.path, "exists")
+ def test_download_image(self, mock_path_exists, mock_mkdirs):
- # Set mock and test values.
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'lumpy-release/R36-5814.0.0'
- image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
- % test_build_id)
+ # Set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "lumpy-release/R36-5814.0.0"
+ image_path = (
+ "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ % test_build_id
+ )
- downloader = download_images.ImageDownloader(
- logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec
+ )
- # Set os.path.exists to always return False and run downloader
- mock_path_exists.return_value = False
- test_flag.SetTestMode(True)
- self.assertRaises(download_images.MissingImage, downloader.DownloadImage,
- test_chroot, test_build_id, image_path)
+ # Set os.path.exists to always return False and run downloader
+ mock_path_exists.return_value = False
+ test_flag.SetTestMode(True)
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.DownloadImage,
+ test_chroot,
+ test_build_id,
+ image_path,
+ )
- # Verify os.path.exists was called twice, with proper arguments.
- self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+ mock_path_exists.assert_any_call(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
- # Verify we called os.mkdirs
- self.assertEqual(mock_mkdirs.call_count, 1)
- mock_mkdirs.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ # Verify we called os.mkdirs
+ self.assertEqual(mock_mkdirs.call_count, 1)
+ mock_mkdirs.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
- # Verify we called RunCommand once, with proper arguments.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1)
- expected_args = (
- '/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py '
- 'cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/'
- 'chromiumos_test_image.tar.xz '
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ # Verify we called RunCommand once, with proper arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1)
+ expected_args = (
+ "/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py "
+ "cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/"
+ "chromiumos_test_image.tar.xz "
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
- mock_cmd_exec.RunCommand.assert_called_with(expected_args)
+ mock_cmd_exec.RunCommand.assert_called_with(expected_args)
- # Reset the velues in the mocks; set os.path.exists to always return True.
- mock_path_exists.reset_mock()
- mock_cmd_exec.reset_mock()
- mock_path_exists.return_value = True
+ # Reset the velues in the mocks; set os.path.exists to always return True.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
- # Run downloader
- downloader.DownloadImage(test_chroot, test_build_id, image_path)
+ # Run downloader
+ downloader.DownloadImage(test_chroot, test_build_id, image_path)
- # Verify os.path.exists was called twice, with proper arguments.
- self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+ mock_path_exists.assert_any_call(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
- # Verify we made no RunCommand or ChrootRunCommand calls (since
- # os.path.exists returned True, there was no work do be done).
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
- self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
+ # Verify we made no RunCommand or ChrootRunCommand calls (since
+ # os.path.exists returned True, there was no work do be done).
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
- @mock.patch.object(os.path, 'exists')
- def test_uncompress_image(self, mock_path_exists):
+ @mock.patch.object(os.path, "exists")
+ def test_uncompress_image(self, mock_path_exists):
- # set mock and test values.
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'lumpy-release/R36-5814.0.0'
+ # set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "lumpy-release/R36-5814.0.0"
- downloader = download_images.ImageDownloader(
- logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec
+ )
- # Set os.path.exists to always return False and run uncompress.
- mock_path_exists.return_value = False
- self.assertRaises(download_images.MissingImage, downloader.UncompressImage,
- test_chroot, test_build_id)
+ # Set os.path.exists to always return False and run uncompress.
+ mock_path_exists.return_value = False
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.UncompressImage,
+ test_chroot,
+ test_build_id,
+ )
- # Verify os.path.exists was called once, with correct arguments.
- self.assertEqual(mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
- # Verify RunCommand was called twice with correct arguments.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2)
- # Call 1, should have 2 arguments
- self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2)
- actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0]
- expected_arg = (
- 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
- 'tar -Jxf chromiumos_test_image.tar.xz ',)
- self.assertEqual(expected_arg, actual_arg)
- # 2nd arg must be exception handler
- except_handler_string = 'RunCommandExceptionHandler.HandleException'
- self.assertTrue(
- except_handler_string in repr(mock_cmd_exec.RunCommand.call_args_list[0]
- [1]))
+ # Verify RunCommand was called twice with correct arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2)
+ # Call 1, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0]
+ expected_arg = (
+ "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; "
+ "tar -Jxf chromiumos_test_image.tar.xz ",
+ )
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be exception handler
+ except_handler_string = "RunCommandExceptionHandler.HandleException"
+ self.assertTrue(
+ except_handler_string
+ in repr(mock_cmd_exec.RunCommand.call_args_list[0][1])
+ )
- # Call 2, should have 2 arguments
- self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
- actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0]
- expected_arg = (
- 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
- 'rm -f chromiumos_test_image.bin ',)
- self.assertEqual(expected_arg, actual_arg)
- # 2nd arg must be empty
- self.assertTrue('{}' in repr(mock_cmd_exec.RunCommand.call_args_list[1][1]))
+ # Call 2, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0]
+ expected_arg = (
+ "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; "
+ "rm -f chromiumos_test_image.bin ",
+ )
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be empty
+ self.assertTrue(
+ "{}" in repr(mock_cmd_exec.RunCommand.call_args_list[1][1])
+ )
- # Set os.path.exists to always return True and run uncompress.
- mock_path_exists.reset_mock()
- mock_cmd_exec.reset_mock()
- mock_path_exists.return_value = True
- downloader.UncompressImage(test_chroot, test_build_id)
+ # Set os.path.exists to always return True and run uncompress.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
+ downloader.UncompressImage(test_chroot, test_build_id)
- # Verify os.path.exists was called once, with correct arguments.
- self.assertEqual(mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
- # Verify RunCommand was not called.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ # Verify RunCommand was not called.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
- def test_run(self):
+ def test_run(self):
- # Set test arguments
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'remote/lumpy/latest-dev'
- test_empty_autotest_path = ''
- test_empty_debug_path = ''
- test_autotest_path = '/tmp/autotest'
- test_debug_path = '/tmp/debug'
- download_debug = True
+ # Set test arguments
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "remote/lumpy/latest-dev"
+ test_empty_autotest_path = ""
+ test_empty_debug_path = ""
+ test_autotest_path = "/tmp/autotest"
+ test_debug_path = "/tmp/debug"
+ download_debug = True
- # Set values to test/check.
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_get_build_id = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
+ # Set values to test/check.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
- # Define fake stub functions for Run to call
- def FakeGetBuildID(unused_root, unused_xbuddy_label):
- self.called_get_build_id = True
- return 'lumpy-release/R36-5814.0.0'
+ # Define fake stub functions for Run to call
+ def FakeGetBuildID(unused_root, unused_xbuddy_label):
+ self.called_get_build_id = True
+ return "lumpy-release/R36-5814.0.0"
- def GoodDownloadImage(root, build_id, image_path):
- if root or build_id or image_path:
- pass
- self.called_download_image = True
- return 'chromiumos_test_image.bin'
+ def GoodDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ return "chromiumos_test_image.bin"
- def BadDownloadImage(root, build_id, image_path):
- if root or build_id or image_path:
- pass
- self.called_download_image = True
- raise download_images.MissingImage('Could not download image')
+ def BadDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ raise download_images.MissingImage("Could not download image")
- def FakeUncompressImage(root, build_id):
- if root or build_id:
- pass
- self.called_uncompress_image = True
- return 0
+ def FakeUncompressImage(root, build_id):
+ if root or build_id:
+ pass
+ self.called_uncompress_image = True
+ return 0
- def FakeDownloadAutotestFiles(root, build_id):
- if root or build_id:
- pass
- self.called_download_autotest_files = True
- return 'autotest'
+ def FakeDownloadAutotestFiles(root, build_id):
+ if root or build_id:
+ pass
+ self.called_download_autotest_files = True
+ return "autotest"
- def FakeDownloadDebugFile(root, build_id):
- if root or build_id:
- pass
- self.called_download_debug_file = True
- return 'debug'
+ def FakeDownloadDebugFile(root, build_id):
+ if root or build_id:
+ pass
+ self.called_download_debug_file = True
+ return "debug"
- # Initialize downloader
- downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER)
+ # Initialize downloader
+ downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER)
- # Set downloader to call fake stubs.
- downloader.GetBuildID = FakeGetBuildID
- downloader.UncompressImage = FakeUncompressImage
- downloader.DownloadImage = GoodDownloadImage
- downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles
- downloader.DownloadDebugFile = FakeDownloadDebugFile
+ # Set downloader to call fake stubs.
+ downloader.GetBuildID = FakeGetBuildID
+ downloader.UncompressImage = FakeUncompressImage
+ downloader.DownloadImage = GoodDownloadImage
+ downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles
+ downloader.DownloadDebugFile = FakeDownloadDebugFile
- # Call Run.
- image_path, autotest_path, debug_path = downloader.Run(
- test_chroot, test_build_id, test_empty_autotest_path,
- test_empty_debug_path, download_debug)
+ # Call Run.
+ image_path, autotest_path, debug_path = downloader.Run(
+ test_chroot,
+ test_build_id,
+ test_empty_autotest_path,
+ test_empty_debug_path,
+ download_debug,
+ )
- # Make sure it called both _DownloadImage and _UncompressImage
- self.assertTrue(self.called_download_image)
- self.assertTrue(self.called_uncompress_image)
- # Make sure it called DownloadAutotestFiles
- self.assertTrue(self.called_download_autotest_files)
- # Make sure it called DownloadDebugFile
- self.assertTrue(self.called_download_debug_file)
- # Make sure it returned an image and autotest path returned from this call
- self.assertTrue(image_path == 'chromiumos_test_image.bin')
- self.assertTrue(autotest_path == 'autotest')
- self.assertTrue(debug_path == 'debug')
+ # Make sure it called both _DownloadImage and _UncompressImage
+ self.assertTrue(self.called_download_image)
+ self.assertTrue(self.called_uncompress_image)
+ # Make sure it called DownloadAutotestFiles
+ self.assertTrue(self.called_download_autotest_files)
+ # Make sure it called DownloadDebugFile
+ self.assertTrue(self.called_download_debug_file)
+ # Make sure it returned an image and autotest path returned from this call
+ self.assertTrue(image_path == "chromiumos_test_image.bin")
+ self.assertTrue(autotest_path == "autotest")
+ self.assertTrue(debug_path == "debug")
- # Call Run with a non-empty autotest and debug path
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
+ # Call Run with a non-empty autotest and debug path
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
- image_path, autotest_path, debug_path = downloader.Run(
- test_chroot, test_build_id, test_autotest_path, test_debug_path,
- download_debug)
+ image_path, autotest_path, debug_path = downloader.Run(
+ test_chroot,
+ test_build_id,
+ test_autotest_path,
+ test_debug_path,
+ download_debug,
+ )
- # Verify that downloadAutotestFiles was not called
- self.assertFalse(self.called_download_autotest_files)
- # Make sure it returned the specified autotest path returned from this call
- self.assertTrue(autotest_path == test_autotest_path)
- # Make sure it returned the specified debug path returned from this call
- self.assertTrue(debug_path == test_debug_path)
+ # Verify that downloadAutotestFiles was not called
+ self.assertFalse(self.called_download_autotest_files)
+ # Make sure it returned the specified autotest path returned from this call
+ self.assertTrue(autotest_path == test_autotest_path)
+ # Make sure it returned the specified debug path returned from this call
+ self.assertTrue(debug_path == test_debug_path)
- # Reset values; Now use fake stub that simulates DownloadImage failing.
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
- downloader.DownloadImage = BadDownloadImage
+ # Reset values; Now use fake stub that simulates DownloadImage failing.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
+ downloader.DownloadImage = BadDownloadImage
- # Call Run again.
- self.assertRaises(download_images.MissingImage, downloader.Run, test_chroot,
- test_autotest_path, test_debug_path, test_build_id,
- download_debug)
+ # Call Run again.
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.Run,
+ test_chroot,
+ test_autotest_path,
+ test_debug_path,
+ test_build_id,
+ download_debug,
+ )
- # Verify that UncompressImage and downloadAutotestFiles were not called,
- # since _DownloadImage "failed"
- self.assertTrue(self.called_download_image)
- self.assertFalse(self.called_uncompress_image)
- self.assertFalse(self.called_download_autotest_files)
- self.assertFalse(self.called_download_debug_file)
+ # Verify that UncompressImage and downloadAutotestFiles were not called,
+ # since _DownloadImage "failed"
+ self.assertTrue(self.called_download_image)
+ self.assertFalse(self.called_uncompress_image)
+ self.assertFalse(self.called_download_autotest_files)
+ self.assertFalse(self.called_download_debug_file)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index e919f6e..9973f7e 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -1,21 +1,18 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment setting module."""
-from __future__ import print_function
import os
+from threading import Lock
import time
-from threading import Lock
-
+import benchmark_run
from cros_utils import logger
from cros_utils import misc
-
-import benchmark_run
from machine_manager import BadChecksum
from machine_manager import MachineManager
from machine_manager import MockMachineManager
@@ -23,208 +20,249 @@
class Experiment(object):
- """Class representing an Experiment to be run."""
+ """Class representing an Experiment to be run."""
- def __init__(self, name, remote, working_directory, chromeos_root,
- cache_conditions, labels, benchmarks, experiment_file, email_to,
- acquire_timeout, log_dir, log_level, share_cache,
- results_directory, compress_results, locks_directory, cwp_dso,
- ignore_min_max, crosfleet, dut_config, no_lock: bool):
- self.name = name
- self.working_directory = working_directory
- self.remote = remote
- self.chromeos_root = chromeos_root
- self.cache_conditions = cache_conditions
- self.experiment_file = experiment_file
- self.email_to = email_to
- if not results_directory:
- self.results_directory = os.path.join(self.working_directory,
- self.name + '_results')
- else:
- self.results_directory = misc.CanonicalizePath(results_directory)
- self.compress_results = compress_results
- self.log_dir = log_dir
- self.log_level = log_level
- self.labels = labels
- self.benchmarks = benchmarks
- self.num_complete = 0
- self.num_run_complete = 0
- self.share_cache = share_cache
- self.active_threads = []
- self.locks_dir = locks_directory
- self.locked_machines = []
- self.lock_mgr = None
- self.cwp_dso = cwp_dso
- self.ignore_min_max = ignore_min_max
- self.crosfleet = crosfleet
- self.no_lock = no_lock
- self.l = logger.GetLogger(log_dir)
+ def __init__(
+ self,
+ name,
+ remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file,
+ email_to,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_directory,
+ compress_results,
+ locks_directory,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock: bool,
+ ):
+ self.name = name
+ self.working_directory = working_directory
+ self.remote = remote
+ self.chromeos_root = chromeos_root
+ self.cache_conditions = cache_conditions
+ self.experiment_file = experiment_file
+ self.email_to = email_to
+ if not results_directory:
+ self.results_directory = os.path.join(
+ self.working_directory, self.name + "_results"
+ )
+ else:
+ self.results_directory = misc.CanonicalizePath(results_directory)
+ self.compress_results = compress_results
+ self.log_dir = log_dir
+ self.log_level = log_level
+ self.labels = labels
+ self.benchmarks = benchmarks
+ self.num_complete = 0
+ self.num_run_complete = 0
+ self.share_cache = share_cache
+ self.active_threads = []
+ self.locks_dir = locks_directory
+ self.locked_machines = []
+ self.lock_mgr = None
+ self.cwp_dso = cwp_dso
+ self.ignore_min_max = ignore_min_max
+ self.crosfleet = crosfleet
+ self.no_lock = no_lock
+ self.l = logger.GetLogger(log_dir)
- if not self.benchmarks:
- raise RuntimeError('No benchmarks specified')
- if not self.labels:
- raise RuntimeError('No labels specified')
- if not remote and not self.crosfleet:
- raise RuntimeError('No remote hosts specified')
+ if not self.benchmarks:
+ raise RuntimeError("No benchmarks specified")
+ if not self.labels:
+ raise RuntimeError("No labels specified")
+ if not remote and not self.crosfleet:
+ raise RuntimeError("No remote hosts specified")
- # We need one chromeos_root to run the benchmarks in, but it doesn't
- # matter where it is, unless the ABIs are different.
- if not chromeos_root:
- for label in self.labels:
- if label.chromeos_root:
- chromeos_root = label.chromeos_root
- break
- if not chromeos_root:
- raise RuntimeError('No chromeos_root given and could not determine '
- 'one from the image path.')
+ # We need one chromeos_root to run the benchmarks in, but it doesn't
+ # matter where it is, unless the ABIs are different.
+ if not chromeos_root:
+ for label in self.labels:
+ if label.chromeos_root:
+ chromeos_root = label.chromeos_root
+ break
+ if not chromeos_root:
+ raise RuntimeError(
+ "No chromeos_root given and could not determine "
+ "one from the image path."
+ )
- machine_manager_fn = MachineManager
- if test_flag.GetTestMode():
- machine_manager_fn = MockMachineManager
- self.machine_manager = machine_manager_fn(chromeos_root, acquire_timeout,
- log_level, locks_directory)
- self.l = logger.GetLogger(log_dir)
+ machine_manager_fn = MachineManager
+ if test_flag.GetTestMode():
+ machine_manager_fn = MockMachineManager
+ self.machine_manager = machine_manager_fn(
+ chromeos_root, acquire_timeout, log_level, locks_directory
+ )
+ self.l = logger.GetLogger(log_dir)
- for machine in self.remote:
- # machine_manager.AddMachine only adds reachable machines.
- self.machine_manager.AddMachine(machine)
- # Now machine_manager._all_machines contains a list of reachable
- # machines. This is a subset of self.remote. We make both lists the same.
- self.remote = [m.name for m in self.machine_manager.GetAllMachines()]
- if not self.remote:
- raise RuntimeError('No machine available for running experiment.')
+ for machine in self.remote:
+ # machine_manager.AddMachine only adds reachable machines.
+ self.machine_manager.AddMachine(machine)
+ # Now machine_manager._all_machines contains a list of reachable
+ # machines. This is a subset of self.remote. We make both lists the same.
+ self.remote = [m.name for m in self.machine_manager.GetAllMachines()]
+ if not self.remote:
+ raise RuntimeError("No machine available for running experiment.")
- # Initialize checksums for all machines, ignore errors at this time.
- # The checksum will be double checked, and image will be flashed after
- # duts are locked/leased.
- self.SetCheckSums()
+ # Initialize checksums for all machines, ignore errors at this time.
+ # The checksum will be double checked, and image will be flashed after
+ # duts are locked/leased.
+ self.SetCheckSums()
- self.start_time = None
- self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config)
+ self.start_time = None
+ self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config)
- self._schedv2 = None
- self._internal_counter_lock = Lock()
+ self._schedv2 = None
+ self._internal_counter_lock = Lock()
- def set_schedv2(self, schedv2):
- self._schedv2 = schedv2
+ def set_schedv2(self, schedv2):
+ self._schedv2 = schedv2
- def schedv2(self):
- return self._schedv2
+ def schedv2(self):
+ return self._schedv2
- def _GenerateBenchmarkRuns(self, dut_config):
- """Generate benchmark runs from labels and benchmark defintions."""
- benchmark_runs = []
- for label in self.labels:
- for benchmark in self.benchmarks:
- for iteration in range(1, benchmark.iterations + 1):
+ def _GenerateBenchmarkRuns(self, dut_config):
+ """Generate benchmark runs from labels and benchmark defintions."""
+ benchmark_runs = []
+ for label in self.labels:
+ for benchmark in self.benchmarks:
+ for iteration in range(1, benchmark.iterations + 1):
- benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
- iteration)
- full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
- logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
- True)
- benchmark_runs.append(
- benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
- iteration, self.cache_conditions,
- self.machine_manager, logger_to_use,
- self.log_level, self.share_cache,
- dut_config))
+ benchmark_run_name = "%s: %s (%s)" % (
+ label.name,
+ benchmark.name,
+ iteration,
+ )
+ full_name = "%s_%s_%s" % (
+ label.name,
+ benchmark.name,
+ iteration,
+ )
+ logger_to_use = logger.Logger(
+ self.log_dir, "run.%s" % (full_name), True
+ )
+ benchmark_runs.append(
+ benchmark_run.BenchmarkRun(
+ benchmark_run_name,
+ benchmark,
+ label,
+ iteration,
+ self.cache_conditions,
+ self.machine_manager,
+ logger_to_use,
+ self.log_level,
+ self.share_cache,
+ dut_config,
+ )
+ )
- return benchmark_runs
+ return benchmark_runs
- def SetCheckSums(self, forceSameImage=False):
- for label in self.labels:
- # We filter out label remotes that are not reachable (not in
- # self.remote). So each label.remote is a sublist of experiment.remote.
- label.remote = [r for r in label.remote if r in self.remote]
- try:
- self.machine_manager.ComputeCommonCheckSum(label)
- except BadChecksum:
- # Force same image on all machines, then we do checksum again. No
- # bailout if checksums still do not match.
- # TODO (zhizhouy): Need to figure out how flashing image will influence
- # the new checksum.
- if forceSameImage:
- self.machine_manager.ForceSameImageToAllMachines(label)
- self.machine_manager.ComputeCommonCheckSum(label)
+ def SetCheckSums(self, forceSameImage=False):
+ for label in self.labels:
+ # We filter out label remotes that are not reachable (not in
+ # self.remote). So each label.remote is a sublist of experiment.remote.
+ label.remote = [r for r in label.remote if r in self.remote]
+ try:
+ self.machine_manager.ComputeCommonCheckSum(label)
+ except BadChecksum:
+ # Force same image on all machines, then we do checksum again. No
+ # bailout if checksums still do not match.
+ # TODO (zhizhouy): Need to figure out how flashing image will influence
+ # the new checksum.
+ if forceSameImage:
+ self.machine_manager.ForceSameImageToAllMachines(label)
+ self.machine_manager.ComputeCommonCheckSum(label)
- self.machine_manager.ComputeCommonCheckSumString(label)
+ self.machine_manager.ComputeCommonCheckSumString(label)
- def Build(self):
- pass
+ def Build(self):
+ pass
- def Terminate(self):
- if self._schedv2 is not None:
- self._schedv2.terminate()
- else:
- for t in self.benchmark_runs:
- if t.isAlive():
- self.l.LogError("Terminating run: '%s'." % t.name)
- t.Terminate()
+ def Terminate(self):
+ if self._schedv2 is not None:
+ self._schedv2.terminate()
+ else:
+ for t in self.benchmark_runs:
+ if t.isAlive():
+ self.l.LogError("Terminating run: '%s'." % t.name)
+ t.Terminate()
- def IsComplete(self):
- if self._schedv2:
- return self._schedv2.is_complete()
- if self.active_threads:
- for t in self.active_threads:
- if t.isAlive():
- t.join(0)
- if not t.isAlive():
- self.num_complete += 1
- if not t.cache_hit:
- self.num_run_complete += 1
- self.active_threads.remove(t)
- return False
- return True
+ def IsComplete(self):
+ if self._schedv2:
+ return self._schedv2.is_complete()
+ if self.active_threads:
+ for t in self.active_threads:
+ if t.isAlive():
+ t.join(0)
+ if not t.isAlive():
+ self.num_complete += 1
+ if not t.cache_hit:
+ self.num_run_complete += 1
+ self.active_threads.remove(t)
+ return False
+ return True
- def BenchmarkRunFinished(self, br):
- """Update internal counters after br finishes.
+ def BenchmarkRunFinished(self, br):
+ """Update internal counters after br finishes.
- Note this is only used by schedv2 and is called by multiple threads.
- Never throw any exception here.
- """
+ Note this is only used by schedv2 and is called by multiple threads.
+ Never throw any exception here.
+ """
- assert self._schedv2 is not None
- with self._internal_counter_lock:
- self.num_complete += 1
- if not br.cache_hit:
- self.num_run_complete += 1
+ assert self._schedv2 is not None
+ with self._internal_counter_lock:
+ self.num_complete += 1
+ if not br.cache_hit:
+ self.num_run_complete += 1
- def Run(self):
- self.start_time = time.time()
- if self._schedv2 is not None:
- self._schedv2.run_sched()
- else:
- self.active_threads = []
- for run in self.benchmark_runs:
- # Set threads to daemon so program exits when ctrl-c is pressed.
- run.daemon = True
- run.start()
- self.active_threads.append(run)
+ def Run(self):
+ self.start_time = time.time()
+ if self._schedv2 is not None:
+ self._schedv2.run_sched()
+ else:
+ self.active_threads = []
+ for run in self.benchmark_runs:
+ # Set threads to daemon so program exits when ctrl-c is pressed.
+ run.daemon = True
+ run.start()
+ self.active_threads.append(run)
- def SetCacheConditions(self, cache_conditions):
- for run in self.benchmark_runs:
- run.SetCacheConditions(cache_conditions)
+ def SetCacheConditions(self, cache_conditions):
+ for run in self.benchmark_runs:
+ run.SetCacheConditions(cache_conditions)
- def Cleanup(self):
- """Make sure all machines are unlocked."""
- if self.locks_dir:
- # We are using the file locks mechanism, so call machine_manager.Cleanup
- # to unlock everything.
- self.machine_manager.Cleanup()
+ def Cleanup(self):
+ """Make sure all machines are unlocked."""
+ if self.locks_dir:
+ # We are using the file locks mechanism, so call machine_manager.Cleanup
+ # to unlock everything.
+ self.machine_manager.Cleanup()
- if test_flag.GetTestMode() or not self.locked_machines:
- return
+ if test_flag.GetTestMode() or not self.locked_machines:
+ return
- # If we locked any machines earlier, make sure we unlock them now.
- if self.lock_mgr:
- machine_states = self.lock_mgr.GetMachineStates('unlock')
- self.lock_mgr.CheckMachineLocks(machine_states, 'unlock')
- unlocked_machines = self.lock_mgr.UpdateMachines(False)
- failed_machines = [
- m for m in self.locked_machines if m not in unlocked_machines
- ]
- if failed_machines:
- raise RuntimeError('These machines are not unlocked correctly: %s' %
- failed_machines)
- self.lock_mgr = None
+ # If we locked any machines earlier, make sure we unlock them now.
+ if self.lock_mgr:
+ machine_states = self.lock_mgr.GetMachineStates("unlock")
+ self.lock_mgr.CheckMachineLocks(machine_states, "unlock")
+ unlocked_machines = self.lock_mgr.UpdateMachines(False)
+ failed_machines = [
+ m for m in self.locked_machines if m not in unlocked_machines
+ ]
+ if failed_machines:
+ raise RuntimeError(
+ "These machines are not unlocked correctly: %s"
+ % failed_machines
+ )
+ self.lock_mgr = None
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index a9594a2..c71981a 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -1,81 +1,87 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to generate experiments."""
-from __future__ import print_function
+
import os
import re
import socket
import sys
from benchmark import Benchmark
-import config
-from cros_utils import logger
from cros_utils import command_executer
+from cros_utils import logger
from experiment import Experiment
+import file_lock_machine
from label import Label
from label import MockLabel
from results_cache import CacheConditions
import test_flag
-import file_lock_machine
+
+import config
+
# Users may want to run Telemetry tests either individually, or in
# specified sets. Here we define sets of tests that users may want
# to run together.
telemetry_perfv2_tests = [
- 'kraken',
- 'octane',
+ "kraken",
+ "octane",
]
telemetry_pagecycler_tests = [
- 'page_cycler_v2.intl_ar_fa_he',
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
- 'page_cycler_v2.typical_25',
+ "page_cycler_v2.intl_ar_fa_he",
+ "page_cycler_v2.intl_es_fr_pt-BR",
+ "page_cycler_v2.intl_hi_ru",
+ "page_cycler_v2.intl_ja_zh",
+ "page_cycler_v2.intl_ko_th_vi",
+ "page_cycler_v2.typical_25",
]
telemetry_toolchain_old_perf_tests = [
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
- 'page_cycler_v2.netsim.top_10',
- 'page_cycler_v2.typical_25',
- 'spaceport',
- 'tab_switching.top_10',
+ "page_cycler_v2.intl_es_fr_pt-BR",
+ "page_cycler_v2.intl_hi_ru",
+ "page_cycler_v2.intl_ja_zh",
+ "page_cycler_v2.intl_ko_th_vi",
+ "page_cycler_v2.netsim.top_10",
+ "page_cycler_v2.typical_25",
+ "spaceport",
+ "tab_switching.top_10",
]
telemetry_toolchain_perf_tests = [
- 'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2'
+ "octane",
+ "kraken",
+ "speedometer",
+ "speedometer2",
+ "jetstream2",
]
graphics_perf_tests = [
- 'graphics_GLBench',
- 'graphics_GLMark2',
- 'graphics_SanAngeles',
- 'graphics_WebGLAquarium',
- 'graphics_WebGLPerformance',
+ "graphics_GLBench",
+ "graphics_GLMark2",
+ "graphics_SanAngeles",
+ "graphics_WebGLAquarium",
+ "graphics_WebGLPerformance",
]
# TODO: disable rendering.desktop by default as the benchmark is
# currently in a bad state
# page_cycler_v2.typical_25 is deprecated and the recommend replacement is
# loading.desktop@@typical (crbug.com/916340)
telemetry_crosbolt_perf_tests = [
- 'octane',
- 'kraken',
- 'speedometer2',
- 'jetstream',
- 'loading.desktop',
+ "octane",
+ "kraken",
+ "speedometer2",
+ "jetstream",
+ "loading.desktop",
# 'rendering.desktop',
]
crosbolt_perf_tests = [
- 'graphics_WebGLAquarium',
- 'tast.video.PlaybackPerfVP91080P30FPS',
+ "graphics_WebGLAquarium",
+ "tast.video.PlaybackPerfVP91080P30FPS",
]
# 'cheets_AntutuTest',
@@ -85,424 +91,582 @@
# ]
dso_list = [
- 'all',
- 'chrome',
- 'kallsyms',
+ "all",
+ "chrome",
+ "kallsyms",
]
class ExperimentFactory(object):
- """Factory class for building an Experiment, given an ExperimentFile as input.
+ """Factory class for building an Experiment, given an ExperimentFile as input.
- This factory is currently hardcoded to produce an experiment for running
- ChromeOS benchmarks, but the idea is that in the future, other types
- of experiments could be produced.
- """
+ This factory is currently hardcoded to produce an experiment for running
+ ChromeOS benchmarks, but the idea is that in the future, other types
+ of experiments could be produced.
+ """
- def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight):
- """Add all the tests in a set to the benchmarks list."""
- for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- benchmarks.append(telemetry_benchmark)
+ def AppendBenchmarkSet(
+ self,
+ benchmarks,
+ benchmark_list,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ ):
+ """Add all the tests in a set to the benchmarks list."""
+ for test_name in benchmark_list:
+ telemetry_benchmark = Benchmark(
+ test_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ benchmarks.append(telemetry_benchmark)
- def GetExperiment(self, experiment_file, working_directory, log_dir):
- """Construct an experiment from an experiment file."""
- global_settings = experiment_file.GetGlobalSettings()
- experiment_name = global_settings.GetField('name')
- board = global_settings.GetField('board')
- chromeos_root = global_settings.GetField('chromeos_root')
- log_level = global_settings.GetField('logging_level')
- if log_level not in ('quiet', 'average', 'verbose'):
- log_level = 'verbose'
+ def GetExperiment(self, experiment_file, working_directory, log_dir):
+ """Construct an experiment from an experiment file."""
+ global_settings = experiment_file.GetGlobalSettings()
+ experiment_name = global_settings.GetField("name")
+ board = global_settings.GetField("board")
+ chromeos_root = global_settings.GetField("chromeos_root")
+ log_level = global_settings.GetField("logging_level")
+ if log_level not in ("quiet", "average", "verbose"):
+ log_level = "verbose"
- crosfleet = global_settings.GetField('crosfleet')
- no_lock = bool(global_settings.GetField('no_lock'))
- # Check whether crosfleet tool is installed correctly for crosfleet mode.
- if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
- sys.exit(0)
+ crosfleet = global_settings.GetField("crosfleet")
+ no_lock = bool(global_settings.GetField("no_lock"))
+ # Check whether crosfleet tool is installed correctly for crosfleet mode.
+ if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
+ sys.exit(0)
- remote = global_settings.GetField('remote')
- # This is used to remove the ",' from the remote if user
- # add them to the remote string.
- new_remote = []
- if remote:
- for i in remote:
- c = re.sub('["\']', '', i)
- new_remote.append(c)
- remote = new_remote
- rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
- perf_args = global_settings.GetField('perf_args')
- download_debug = global_settings.GetField('download_debug')
- # Do not download debug symbols when perf_args is not specified.
- if not perf_args and download_debug:
- download_debug = False
- acquire_timeout = global_settings.GetField('acquire_timeout')
- cache_dir = global_settings.GetField('cache_dir')
- cache_only = global_settings.GetField('cache_only')
- config.AddConfig('no_email', global_settings.GetField('no_email'))
- share_cache = global_settings.GetField('share_cache')
- results_dir = global_settings.GetField('results_dir')
- compress_results = global_settings.GetField('compress_results')
- # Warn user that option use_file_locks is deprecated.
- use_file_locks = global_settings.GetField('use_file_locks')
- if use_file_locks:
- l = logger.GetLogger()
- l.LogWarning('Option use_file_locks is deprecated, please remove it '
- 'from your experiment settings.')
- locks_dir = global_settings.GetField('locks_dir')
- # If not specified, set the locks dir to the default locks dir in
- # file_lock_machine.
- if not locks_dir:
- locks_dir = file_lock_machine.Machine.LOCKS_DIR
- if not os.path.exists(locks_dir):
- raise RuntimeError('Cannot access default lock directory. '
- 'Please run prodaccess or specify a local directory')
- chrome_src = global_settings.GetField('chrome_src')
- show_all_results = global_settings.GetField('show_all_results')
- cwp_dso = global_settings.GetField('cwp_dso')
- if cwp_dso and not cwp_dso in dso_list:
- raise RuntimeError('The DSO specified is not supported')
- ignore_min_max = global_settings.GetField('ignore_min_max')
- dut_config = {
- 'enable_aslr': global_settings.GetField('enable_aslr'),
- 'intel_pstate': global_settings.GetField('intel_pstate'),
- 'cooldown_time': global_settings.GetField('cooldown_time'),
- 'cooldown_temp': global_settings.GetField('cooldown_temp'),
- 'governor': global_settings.GetField('governor'),
- 'cpu_usage': global_settings.GetField('cpu_usage'),
- 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'),
- 'turbostat': global_settings.GetField('turbostat'),
- 'top_interval': global_settings.GetField('top_interval'),
- }
+ remote = global_settings.GetField("remote")
+ # This is used to remove the ",' from the remote if user
+ # add them to the remote string.
+ new_remote = []
+ if remote:
+ for i in remote:
+ c = re.sub("[\"']", "", i)
+ new_remote.append(c)
+ remote = new_remote
+ rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
+ perf_args = global_settings.GetField("perf_args")
+ download_debug = global_settings.GetField("download_debug")
+ # Do not download debug symbols when perf_args is not specified.
+ if not perf_args and download_debug:
+ download_debug = False
+ acquire_timeout = global_settings.GetField("acquire_timeout")
+ cache_dir = global_settings.GetField("cache_dir")
+ cache_only = global_settings.GetField("cache_only")
+ config.AddConfig("no_email", global_settings.GetField("no_email"))
+ share_cache = global_settings.GetField("share_cache")
+ results_dir = global_settings.GetField("results_dir")
+ compress_results = global_settings.GetField("compress_results")
+ # Warn user that option use_file_locks is deprecated.
+ use_file_locks = global_settings.GetField("use_file_locks")
+ if use_file_locks:
+ l = logger.GetLogger()
+ l.LogWarning(
+ "Option use_file_locks is deprecated, please remove it "
+ "from your experiment settings."
+ )
+ locks_dir = global_settings.GetField("locks_dir")
+ # If not specified, set the locks dir to the default locks dir in
+ # file_lock_machine.
+ if not locks_dir:
+ locks_dir = file_lock_machine.Machine.LOCKS_DIR
+ if not os.path.exists(locks_dir):
+ raise RuntimeError(
+ "Cannot access default lock directory. "
+ "Please run prodaccess or specify a local directory"
+ )
+ chrome_src = global_settings.GetField("chrome_src")
+ show_all_results = global_settings.GetField("show_all_results")
+ cwp_dso = global_settings.GetField("cwp_dso")
+ if cwp_dso and not cwp_dso in dso_list:
+ raise RuntimeError("The DSO specified is not supported")
+ ignore_min_max = global_settings.GetField("ignore_min_max")
+ dut_config = {
+ "enable_aslr": global_settings.GetField("enable_aslr"),
+ "intel_pstate": global_settings.GetField("intel_pstate"),
+ "cooldown_time": global_settings.GetField("cooldown_time"),
+ "cooldown_temp": global_settings.GetField("cooldown_temp"),
+ "governor": global_settings.GetField("governor"),
+ "cpu_usage": global_settings.GetField("cpu_usage"),
+ "cpu_freq_pct": global_settings.GetField("cpu_freq_pct"),
+ "turbostat": global_settings.GetField("turbostat"),
+ "top_interval": global_settings.GetField("top_interval"),
+ }
- # Default cache hit conditions. The image checksum in the cache and the
- # computed checksum of the image must match. Also a cache file must exist.
- cache_conditions = [
- CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
- ]
- if global_settings.GetField('rerun_if_failed'):
- cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- if global_settings.GetField('rerun'):
- cache_conditions.append(CacheConditions.FALSE)
- if global_settings.GetField('same_machine'):
- cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
- if global_settings.GetField('same_specs'):
- cache_conditions.append(CacheConditions.MACHINES_MATCH)
+ # Default cache hit conditions. The image checksum in the cache and the
+ # computed checksum of the image must match. Also a cache file must exist.
+ cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ]
+ if global_settings.GetField("rerun_if_failed"):
+ cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
+ if global_settings.GetField("rerun"):
+ cache_conditions.append(CacheConditions.FALSE)
+ if global_settings.GetField("same_machine"):
+ cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
+ if global_settings.GetField("same_specs"):
+ cache_conditions.append(CacheConditions.MACHINES_MATCH)
- # Construct benchmarks.
- # Some fields are common with global settings. The values are
- # inherited and/or merged with the global settings values.
- benchmarks = []
- all_benchmark_settings = experiment_file.GetSettings('benchmark')
+ # Construct benchmarks.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ benchmarks = []
+ all_benchmark_settings = experiment_file.GetSettings("benchmark")
- # Check if there is duplicated benchmark name
- benchmark_names = {}
- # Check if in cwp_dso mode, all benchmarks should have same iterations
- cwp_dso_iterations = 0
+ # Check if there is duplicated benchmark name
+ benchmark_names = {}
+ # Check if in cwp_dso mode, all benchmarks should have same iterations
+ cwp_dso_iterations = 0
- for benchmark_settings in all_benchmark_settings:
- benchmark_name = benchmark_settings.name
- test_name = benchmark_settings.GetField('test_name')
- if not test_name:
- test_name = benchmark_name
- test_args = benchmark_settings.GetField('test_args')
+ for benchmark_settings in all_benchmark_settings:
+ benchmark_name = benchmark_settings.name
+ test_name = benchmark_settings.GetField("test_name")
+ if not test_name:
+ test_name = benchmark_name
+ test_args = benchmark_settings.GetField("test_args")
- # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
- # in test_args. Make sure these two tags only appear once.
- story_count = 0
- for arg in test_args.split():
- if '--story-filter=' in arg or '--story-tag-filter=' in arg:
- story_count += 1
- if story_count > 1:
- raise RuntimeError('Only one story or story-tag filter allowed in '
- 'a single benchmark run')
- # Rename benchmark name with an extension of 'story'-option
- benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1])
+ # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
+ # in test_args. Make sure these two tags only appear once.
+ story_count = 0
+ for arg in test_args.split():
+ if "--story-filter=" in arg or "--story-tag-filter=" in arg:
+ story_count += 1
+ if story_count > 1:
+ raise RuntimeError(
+ "Only one story or story-tag filter allowed in "
+ "a single benchmark run"
+ )
+ # Rename benchmark name with an extension of 'story'-option
+ benchmark_name = "%s@@%s" % (
+ benchmark_name,
+ arg.split("=")[-1],
+ )
- # Check for duplicated benchmark name after renaming
- if not benchmark_name in benchmark_names:
- benchmark_names[benchmark_name] = True
- else:
- raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name)
-
- iterations = benchmark_settings.GetField('iterations')
- if cwp_dso:
- if cwp_dso_iterations not in (0, iterations):
- raise RuntimeError('Iterations of each benchmark run are not the '
- 'same')
- cwp_dso_iterations = iterations
-
- suite = benchmark_settings.GetField('suite')
- retries = benchmark_settings.GetField('retries')
- run_local = benchmark_settings.GetField('run_local')
- weight = benchmark_settings.GetField('weight')
- if weight:
- if not cwp_dso:
- raise RuntimeError('Weight can only be set when DSO specified')
- if suite != 'telemetry_Crosperf':
- raise RuntimeError('CWP approximation weight only works with '
- 'telemetry_Crosperf suite')
- if run_local:
- raise RuntimeError('run_local must be set to False to use CWP '
- 'approximation')
- if weight < 0:
- raise RuntimeError('Weight should be a float >=0')
- elif cwp_dso:
- raise RuntimeError('With DSO specified, each benchmark should have a '
- 'weight')
-
- if suite == 'telemetry_Crosperf':
- if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- elif test_name == 'all_pagecyclers':
- self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local,
- cwp_dso, weight)
- self.AppendBenchmarkSet(benchmarks,
- crosbolt_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- elif test_name == 'all_toolchain_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- # Add non-telemetry toolchain-perf benchmarks:
-
- # Tast test platform.ReportDiskUsage for image size.
- benchmarks.append(
- Benchmark(
- 'platform.ReportDiskUsage',
- 'platform.ReportDiskUsage',
- '',
- 1, # This is not a performance benchmark, only run once.
- rm_chroot_tmp,
- '',
- 'tast', # Specify the suite to be 'tast'
- show_all_results,
- retries))
-
- # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until
- # it gets fixed.
- #
- # benchmarks.append(
- # Benchmark(
- # 'graphics_WebGLAquarium',
- # 'graphics_WebGLAquarium',
- # '',
- # iterations,
- # rm_chroot_tmp,
- # perf_args,
- # 'crosperf_Wrapper', # Use client wrapper in Autotest
- # show_all_results,
- # retries,
- # run_local=False,
- # cwp_dso=cwp_dso,
- # weight=weight))
- elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- else:
- benchmark = Benchmark(benchmark_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
- benchmarks.append(benchmark)
- else:
- if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(benchmarks,
- graphics_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- else:
- # Add the single benchmark.
- benchmark = Benchmark(benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- benchmarks.append(benchmark)
-
- if not benchmarks:
- raise RuntimeError('No benchmarks specified')
-
- # Construct labels.
- # Some fields are common with global settings. The values are
- # inherited and/or merged with the global settings values.
- labels = []
- all_label_settings = experiment_file.GetSettings('label')
- all_remote = list(remote)
- for label_settings in all_label_settings:
- label_name = label_settings.name
- image = label_settings.GetField('chromeos_image')
- build = label_settings.GetField('build')
- autotest_path = label_settings.GetField('autotest_path')
- debug_path = label_settings.GetField('debug_path')
- chromeos_root = label_settings.GetField('chromeos_root')
- my_remote = label_settings.GetField('remote')
- compiler = label_settings.GetField('compiler')
- new_remote = []
- if my_remote:
- for i in my_remote:
- c = re.sub('["\']', '', i)
- new_remote.append(c)
- my_remote = new_remote
-
- if image:
- if crosfleet:
- raise RuntimeError(
- 'In crosfleet mode, local image should not be used.')
- if build:
- raise RuntimeError('Image path and build are provided at the same '
- 'time, please use only one of them.')
- else:
- if not build:
- raise RuntimeError("Can not have empty 'build' field!")
- image, autotest_path, debug_path = label_settings.GetXbuddyPath(
- build, autotest_path, debug_path, board, chromeos_root, log_level,
- download_debug)
-
- cache_dir = label_settings.GetField('cache_dir')
- chrome_src = label_settings.GetField('chrome_src')
-
- # TODO(yunlian): We should consolidate code in machine_manager.py
- # to derermine whether we are running from within google or not
- if ('corp.google.com' in socket.gethostname() and not my_remote
- and not crosfleet):
- my_remote = self.GetDefaultRemotes(board)
- if global_settings.GetField('same_machine') and len(my_remote) > 1:
- raise RuntimeError('Only one remote is allowed when same_machine '
- 'is turned on')
- all_remote += my_remote
- image_args = label_settings.GetField('image_args')
- if test_flag.GetTestMode():
- # pylint: disable=too-many-function-args
- label = MockLabel(label_name, build, image, autotest_path, debug_path,
- chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler,
- crosfleet, chrome_src)
- else:
- label = Label(label_name, build, image, autotest_path, debug_path,
- chromeos_root, board, my_remote, image_args, cache_dir,
- cache_only, log_level, compiler, crosfleet, chrome_src)
- labels.append(label)
-
- if not labels:
- raise RuntimeError('No labels specified')
-
- email = global_settings.GetField('email')
- all_remote += list(set(my_remote))
- all_remote = list(set(all_remote))
- if crosfleet:
- for remote in all_remote:
- self.CheckRemotesInCrosfleet(remote)
- experiment = Experiment(experiment_name,
- all_remote,
- working_directory,
- chromeos_root,
- cache_conditions,
- labels,
- benchmarks,
- experiment_file.Canonicalize(),
- email,
- acquire_timeout,
- log_dir,
- log_level,
- share_cache,
- results_dir,
- compress_results,
- locks_dir,
- cwp_dso,
- ignore_min_max,
- crosfleet,
- dut_config,
- no_lock=no_lock)
-
- return experiment
-
- def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(os.path.dirname(__file__),
- 'default_remotes')
- try:
- with open(default_remotes_file) as f:
- for line in f:
- key, v = line.split(':')
- if key.strip() == board:
- remotes = v.strip().split()
- if remotes:
- return remotes
+ # Check for duplicated benchmark name after renaming
+ if not benchmark_name in benchmark_names:
+ benchmark_names[benchmark_name] = True
else:
- raise RuntimeError('There is no remote for {0}'.format(board))
- except IOError:
- # TODO: rethrow instead of throwing different exception.
- raise RuntimeError(
- 'IOError while reading file {0}'.format(default_remotes_file))
- else:
- raise RuntimeError('There is no remote for {0}'.format(board))
+ raise SyntaxError(
+ "Duplicate benchmark name: '%s'." % benchmark_name
+ )
- def CheckRemotesInCrosfleet(self, remote):
- # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
- # machine. If not lab machine, raise an error.
- pass
+ iterations = benchmark_settings.GetField("iterations")
+ if cwp_dso:
+ if cwp_dso_iterations not in (0, iterations):
+ raise RuntimeError(
+ "Iterations of each benchmark run are not the " "same"
+ )
+ cwp_dso_iterations = iterations
- def CheckCrosfleetTool(self, chromeos_root, log_level):
- CROSFLEET_PATH = 'crosfleet'
- if os.path.exists(CROSFLEET_PATH):
- return True
- l = logger.GetLogger()
- l.LogOutput('Crosfleet tool not installed, trying to install it.')
- ce = command_executer.GetCommandExecuter(l, log_level=log_level)
- setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin',
- 'lab-tools', 'setup_lab_tools')
- cmd = '%s' % setup_lab_tools
- status = ce.RunCommand(cmd)
- if status != 0:
- raise RuntimeError(
- 'Crosfleet tool not installed correctly, please try to '
- 'manually install it from %s' % setup_lab_tools)
- l.LogOutput('Crosfleet is installed at %s, please login before first use. '
- 'Login by running "crosfleet login" and follow instructions.' %
- CROSFLEET_PATH)
- return False
+ suite = benchmark_settings.GetField("suite")
+ retries = benchmark_settings.GetField("retries")
+ run_local = benchmark_settings.GetField("run_local")
+ weight = benchmark_settings.GetField("weight")
+ if weight:
+ if not cwp_dso:
+ raise RuntimeError(
+ "Weight can only be set when DSO specified"
+ )
+ if suite != "telemetry_Crosperf":
+ raise RuntimeError(
+ "CWP approximation weight only works with "
+ "telemetry_Crosperf suite"
+ )
+ if run_local:
+ raise RuntimeError(
+ "run_local must be set to False to use CWP "
+ "approximation"
+ )
+ if weight < 0:
+ raise RuntimeError("Weight should be a float >=0")
+ elif cwp_dso:
+ raise RuntimeError(
+ "With DSO specified, each benchmark should have a " "weight"
+ )
+
+ if suite == "telemetry_Crosperf":
+ if test_name == "all_perfv2":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_perfv2_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ elif test_name == "all_pagecyclers":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_pagecycler_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ elif test_name == "all_crosbolt_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_crosbolt_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "telemetry_Crosperf",
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ self.AppendBenchmarkSet(
+ benchmarks,
+ crosbolt_perf_tests,
+ "",
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "",
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ elif test_name == "all_toolchain_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ # Add non-telemetry toolchain-perf benchmarks:
+
+ # Tast test platform.ReportDiskUsage for image size.
+ benchmarks.append(
+ Benchmark(
+ "platform.ReportDiskUsage",
+ "platform.ReportDiskUsage",
+ "",
+ 1, # This is not a performance benchmark, only run once.
+ rm_chroot_tmp,
+ "",
+ "tast", # Specify the suite to be 'tast'
+ show_all_results,
+ retries,
+ )
+ )
+
+ # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until
+ # it gets fixed.
+ #
+ # benchmarks.append(
+ # Benchmark(
+ # 'graphics_WebGLAquarium',
+ # 'graphics_WebGLAquarium',
+ # '',
+ # iterations,
+ # rm_chroot_tmp,
+ # perf_args,
+ # 'crosperf_Wrapper', # Use client wrapper in Autotest
+ # show_all_results,
+ # retries,
+ # run_local=False,
+ # cwp_dso=cwp_dso,
+ # weight=weight))
+ elif test_name == "all_toolchain_perf_old":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_old_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ else:
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ benchmarks.append(benchmark)
+ else:
+ if test_name == "all_graphics_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ graphics_perf_tests,
+ "",
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "",
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ else:
+ # Add the single benchmark.
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ benchmarks.append(benchmark)
+
+ if not benchmarks:
+ raise RuntimeError("No benchmarks specified")
+
+ # Construct labels.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ labels = []
+ all_label_settings = experiment_file.GetSettings("label")
+ all_remote = list(remote)
+ for label_settings in all_label_settings:
+ label_name = label_settings.name
+ image = label_settings.GetField("chromeos_image")
+ build = label_settings.GetField("build")
+ autotest_path = label_settings.GetField("autotest_path")
+ debug_path = label_settings.GetField("debug_path")
+ chromeos_root = label_settings.GetField("chromeos_root")
+ my_remote = label_settings.GetField("remote")
+ compiler = label_settings.GetField("compiler")
+ new_remote = []
+ if my_remote:
+ for i in my_remote:
+ c = re.sub("[\"']", "", i)
+ new_remote.append(c)
+ my_remote = new_remote
+
+ if image:
+ if crosfleet:
+ raise RuntimeError(
+ "In crosfleet mode, local image should not be used."
+ )
+ if build:
+ raise RuntimeError(
+ "Image path and build are provided at the same "
+ "time, please use only one of them."
+ )
+ else:
+ if not build:
+ raise RuntimeError("Can not have empty 'build' field!")
+ image, autotest_path, debug_path = label_settings.GetXbuddyPath(
+ build,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ )
+
+ cache_dir = label_settings.GetField("cache_dir")
+ chrome_src = label_settings.GetField("chrome_src")
+
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if (
+ "corp.google.com" in socket.gethostname()
+ and not my_remote
+ and not crosfleet
+ ):
+ my_remote = self.GetDefaultRemotes(board)
+ if global_settings.GetField("same_machine") and len(my_remote) > 1:
+ raise RuntimeError(
+ "Only one remote is allowed when same_machine "
+ "is turned on"
+ )
+ all_remote += my_remote
+ image_args = label_settings.GetField("image_args")
+ if test_flag.GetTestMode():
+ # pylint: disable=too-many-function-args
+ label = MockLabel(
+ label_name,
+ build,
+ image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ my_remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet,
+ chrome_src,
+ )
+ else:
+ label = Label(
+ label_name,
+ build,
+ image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ my_remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet,
+ chrome_src,
+ )
+ labels.append(label)
+
+ if not labels:
+ raise RuntimeError("No labels specified")
+
+ email = global_settings.GetField("email")
+ all_remote += list(set(my_remote))
+ all_remote = list(set(all_remote))
+ if crosfleet:
+ for remote in all_remote:
+ self.CheckRemotesInCrosfleet(remote)
+ experiment = Experiment(
+ experiment_name,
+ all_remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file.Canonicalize(),
+ email,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_dir,
+ compress_results,
+ locks_dir,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock=no_lock,
+ )
+
+ return experiment
+
+ def GetDefaultRemotes(self, board):
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), "default_remotes"
+ )
+ try:
+ with open(default_remotes_file) as f:
+ for line in f:
+ key, v = line.split(":")
+ if key.strip() == board:
+ remotes = v.strip().split()
+ if remotes:
+ return remotes
+ else:
+ raise RuntimeError(
+ "There is no remote for {0}".format(board)
+ )
+ except IOError:
+ # TODO: rethrow instead of throwing different exception.
+ raise RuntimeError(
+ "IOError while reading file {0}".format(default_remotes_file)
+ )
+ else:
+ raise RuntimeError("There is no remote for {0}".format(board))
+
+ def CheckRemotesInCrosfleet(self, remote):
+ # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
+ # machine. If not lab machine, raise an error.
+ pass
+
+ def CheckCrosfleetTool(self, chromeos_root, log_level):
+ CROSFLEET_PATH = "crosfleet"
+ if os.path.exists(CROSFLEET_PATH):
+ return True
+ l = logger.GetLogger()
+ l.LogOutput("Crosfleet tool not installed, trying to install it.")
+ ce = command_executer.GetCommandExecuter(l, log_level=log_level)
+ setup_lab_tools = os.path.join(
+ chromeos_root, "chromeos-admin", "lab-tools", "setup_lab_tools"
+ )
+ cmd = "%s" % setup_lab_tools
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ raise RuntimeError(
+ "Crosfleet tool not installed correctly, please try to "
+ "manually install it from %s" % setup_lab_tools
+ )
+ l.LogOutput(
+ "Crosfleet is installed at %s, please login before first use. "
+ 'Login by running "crosfleet login" and follow instructions.'
+ % CROSFLEET_PATH
+ )
+ return False
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 9637c10..0541bb9 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit test for experiment_factory.py"""
-from __future__ import print_function
import io
import os
@@ -15,15 +14,15 @@
import unittest
import unittest.mock as mock
+import benchmark
from cros_utils import command_executer
from cros_utils.file_utils import FileUtils
-
-from experiment_file import ExperimentFile
-import test_flag
-import benchmark
import experiment_factory
from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
import settings_factory
+import test_flag
+
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -78,371 +77,454 @@
class ExperimentFactoryTest(unittest.TestCase):
- """Class for running experiment factory unittests."""
- def setUp(self):
- self.append_benchmark_call_args = []
+ """Class for running experiment factory unittests."""
- def testLoadExperimentFile1(self):
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- exp = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
- self.assertEqual(exp.remote, ['chromeos-alex3'])
+ def setUp(self):
+ self.append_benchmark_call_args = []
- self.assertEqual(len(exp.benchmarks), 2)
- self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
- self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
- self.assertEqual(exp.benchmarks[0].iterations, 3)
- self.assertEqual(exp.benchmarks[1].name, 'webrtc@@datachannel')
- self.assertEqual(exp.benchmarks[1].test_name, 'webrtc')
- self.assertEqual(exp.benchmarks[1].iterations, 1)
+ def testLoadExperimentFile1(self):
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ exp = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ self.assertEqual(exp.remote, ["chromeos-alex3"])
- self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[0].chromeos_image,
- '/usr/local/google/cros_image1.bin')
- self.assertEqual(exp.labels[0].board, 'x86-alex')
+ self.assertEqual(len(exp.benchmarks), 2)
+ self.assertEqual(exp.benchmarks[0].name, "PageCycler")
+ self.assertEqual(exp.benchmarks[0].test_name, "PageCycler")
+ self.assertEqual(exp.benchmarks[0].iterations, 3)
+ self.assertEqual(exp.benchmarks[1].name, "webrtc@@datachannel")
+ self.assertEqual(exp.benchmarks[1].test_name, "webrtc")
+ self.assertEqual(exp.benchmarks[1].iterations, 1)
- def testLoadExperimentFile2CWP(self):
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
- exp = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
- self.assertEqual(exp.cwp_dso, 'kallsyms')
- self.assertEqual(len(exp.benchmarks), 2)
- self.assertEqual(exp.benchmarks[0].weight, 0.8)
- self.assertEqual(exp.benchmarks[1].weight, 0.2)
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(
+ exp.labels[0].chromeos_image, "/usr/local/google/cros_image1.bin"
+ )
+ self.assertEqual(exp.labels[0].board, "x86-alex")
- def testDuplecateBenchmark(self):
- mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- mock_experiment_file.all_settings = []
- benchmark_settings1 = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings1)
- benchmark_settings2 = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings2)
+ def testLoadExperimentFile2CWP(self):
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
+ exp = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ self.assertEqual(exp.cwp_dso, "kallsyms")
+ self.assertEqual(len(exp.benchmarks), 2)
+ self.assertEqual(exp.benchmarks[0].weight, 0.8)
+ self.assertEqual(exp.benchmarks[1].weight, 0.2)
- with self.assertRaises(SyntaxError):
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
+ def testDuplecateBenchmark(self):
+ mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ mock_experiment_file.all_settings = []
+ benchmark_settings1 = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings1)
+ benchmark_settings2 = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings2)
- def testCWPExceptions(self):
- mock_experiment_file = ExperimentFile(io.StringIO(''))
- mock_experiment_file.all_settings = []
- global_settings = settings_factory.GlobalSettings('test_name')
- global_settings.SetField('locks_dir', '/tmp')
+ with self.assertRaises(SyntaxError):
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
- # Test 1: DSO type not supported
- global_settings.SetField('cwp_dso', 'test')
- self.assertEqual(global_settings.GetField('cwp_dso'), 'test')
- mock_experiment_file.global_settings = global_settings
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('The DSO specified is not supported', str(msg.exception))
+ def testCWPExceptions(self):
+ mock_experiment_file = ExperimentFile(io.StringIO(""))
+ mock_experiment_file.all_settings = []
+ global_settings = settings_factory.GlobalSettings("test_name")
+ global_settings.SetField("locks_dir", "/tmp")
- # Test 2: No weight after DSO specified
- global_settings.SetField('cwp_dso', 'kallsyms')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('With DSO specified, each benchmark should have a weight',
- str(msg.exception))
+ # Test 1: DSO type not supported
+ global_settings.SetField("cwp_dso", "test")
+ self.assertEqual(global_settings.GetField("cwp_dso"), "test")
+ mock_experiment_file.global_settings = global_settings
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "The DSO specified is not supported", str(msg.exception)
+ )
- # Test 3: Weight is set, but no dso specified
- global_settings.SetField('cwp_dso', '')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Weight can only be set when DSO specified',
- str(msg.exception))
+ # Test 2: No weight after DSO specified
+ global_settings.SetField("cwp_dso", "kallsyms")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "With DSO specified, each benchmark should have a weight",
+ str(msg.exception),
+ )
- # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
- global_settings.SetField('cwp_dso', 'kallsyms')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- 'CWP approximation weight only works with '
- 'telemetry_Crosperf suite', str(msg.exception))
+ # Test 3: Weight is set, but no dso specified
+ global_settings.SetField("cwp_dso", "")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Weight can only be set when DSO specified", str(msg.exception)
+ )
- # Test 5: cwp_dso does not work for local run
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'True')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('run_local must be set to False to use CWP approximation',
- str(msg.exception))
+ # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
+ global_settings.SetField("cwp_dso", "kallsyms")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "CWP approximation weight only works with "
+ "telemetry_Crosperf suite",
+ str(msg.exception),
+ )
- # Test 6: weight should be float >=0
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '-1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Weight should be a float >=0', str(msg.exception))
+ # Test 5: cwp_dso does not work for local run
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "True")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "run_local must be set to False to use CWP approximation",
+ str(msg.exception),
+ )
- # Test 7: more than one story tag in test_args
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('test_args',
- '--story-filter=a --story-tag-filter=b')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- 'Only one story or story-tag filter allowed in a single '
- 'benchmark run', str(msg.exception))
+ # Test 6: weight should be float >=0
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "-1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual("Weight should be a float >=0", str(msg.exception))
- # Test 8: Iterations of each benchmark run are not same in cwp mode
- mock_experiment_file.all_settings = []
- benchmark_settings = settings_factory.BenchmarkSettings('name1')
- benchmark_settings.SetField('iterations', '4')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings.append(benchmark_settings)
- benchmark_settings = settings_factory.BenchmarkSettings('name2')
- benchmark_settings.SetField('iterations', '3')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Iterations of each benchmark run are not the same',
- str(msg.exception))
+ # Test 7: more than one story tag in test_args
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField(
+ "test_args", "--story-filter=a --story-tag-filter=b"
+ )
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Only one story or story-tag filter allowed in a single "
+ "benchmark run",
+ str(msg.exception),
+ )
- def test_append_benchmark_set(self):
- ef = ExperimentFactory()
+ # Test 8: Iterations of each benchmark run are not same in cwp mode
+ mock_experiment_file.all_settings = []
+ benchmark_settings = settings_factory.BenchmarkSettings("name1")
+ benchmark_settings.SetField("iterations", "4")
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ benchmark_settings = settings_factory.BenchmarkSettings("name2")
+ benchmark_settings.SetField("iterations", "3")
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Iterations of each benchmark run are not the same",
+ str(msg.exception),
+ )
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_perfv2_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False, '',
- 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_perfv2_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+ def test_append_benchmark_set(self):
+ ef = ExperimentFactory()
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_pagecycler_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False, '',
- 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_pagecycler_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_perfv2_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_perfv2_tests)
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests,
- '', 1, False, '', 'telemetry_Crosperf', False, 0,
- False, '', 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_toolchain_perf_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_pagecycler_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_pagecycler_tests)
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
- @mock.patch.object(socket, 'gethostname')
- def test_get_experiment(self, mock_socket):
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_toolchain_perf_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list),
+ len(experiment_factory.telemetry_toolchain_perf_tests),
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
- test_flag.SetTestMode(False)
- self.append_benchmark_call_args = []
+ @mock.patch.object(socket, "gethostname")
+ def test_get_experiment(self, mock_socket):
- def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
- perf_args, suite, show_all):
- 'Helper function for test_get_experiment'
- arg_list = [
- bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
- ]
- self.append_benchmark_call_args.append(arg_list)
+ test_flag.SetTestMode(False)
+ self.append_benchmark_call_args = []
- def FakeGetDefaultRemotes(board):
- if not board:
- return []
- return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
+ def FakeAppendBenchmarkSet(
+ bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
+ ):
+ "Helper function for test_get_experiment"
+ arg_list = [
+ bench_list,
+ set_list,
+ args,
+ iters,
+ rm_ch,
+ perf_args,
+ suite,
+ show_all,
+ ]
+ self.append_benchmark_call_args.append(arg_list)
- def FakeGetXbuddyPath(build, autotest_dir, debug_dir, board, chroot,
- log_level, perf_args):
- autotest_path = autotest_dir
- if not autotest_path:
- autotest_path = 'fake_autotest_path'
- debug_path = debug_dir
- if not debug_path and perf_args:
- debug_path = 'fake_debug_path'
- if not build or not board or not chroot or not log_level:
- return '', autotest_path, debug_path
- return 'fake_image_path', autotest_path, debug_path
+ def FakeGetDefaultRemotes(board):
+ if not board:
+ return []
+ return [
+ "fake_chromeos_machine1.cros",
+ "fake_chromeos_machine2.cros",
+ ]
- ef = ExperimentFactory()
- ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
- ef.GetDefaultRemotes = FakeGetDefaultRemotes
+ def FakeGetXbuddyPath(
+ build, autotest_dir, debug_dir, board, chroot, log_level, perf_args
+ ):
+ autotest_path = autotest_dir
+ if not autotest_path:
+ autotest_path = "fake_autotest_path"
+ debug_path = debug_dir
+ if not debug_path and perf_args:
+ debug_path = "fake_debug_path"
+ if not build or not board or not chroot or not log_level:
+ return "", autotest_path, debug_path
+ return "fake_image_path", autotest_path, debug_path
- label_settings = settings_factory.LabelSettings('image_label')
- benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
- global_settings = settings_factory.GlobalSettings('test_name')
+ ef = ExperimentFactory()
+ ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
+ ef.GetDefaultRemotes = FakeGetDefaultRemotes
- label_settings.GetXbuddyPath = FakeGetXbuddyPath
+ label_settings = settings_factory.LabelSettings("image_label")
+ benchmark_settings = settings_factory.BenchmarkSettings("bench_test")
+ global_settings = settings_factory.GlobalSettings("test_name")
- mock_experiment_file = ExperimentFile(io.StringIO(''))
- mock_experiment_file.all_settings = []
+ label_settings.GetXbuddyPath = FakeGetXbuddyPath
+ mock_experiment_file = ExperimentFile(io.StringIO(""))
+ mock_experiment_file.all_settings = []
+
+ test_flag.SetTestMode(True)
+ # Basic test.
+ global_settings.SetField("name", "unittest_test")
+ global_settings.SetField("board", "lumpy")
+ global_settings.SetField("locks_dir", "/tmp")
+ global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
+ benchmark_settings.SetField("test_name", "kraken")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("iterations", 1)
+ label_settings.SetField(
+ "chromeos_image",
+ "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin",
+ )
+ label_settings.SetField(
+ "chrome_src", "/usr/local/google/home/chrome-top"
+ )
+ label_settings.SetField("autotest_path", "/tmp/autotest")
+
+ mock_experiment_file.global_settings = global_settings
+ mock_experiment_file.all_settings.append(label_settings)
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ mock_experiment_file.all_settings.append(global_settings)
+
+ mock_socket.return_value = ""
+
+ # First test. General test.
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertCountEqual(exp.remote, ["123.45.67.89", "123.45.76.80"])
+ self.assertEqual(exp.cache_conditions, [0, 2, 1])
+ self.assertEqual(exp.log_level, "average")
+
+ self.assertEqual(len(exp.benchmarks), 1)
+ self.assertEqual(exp.benchmarks[0].name, "bench_test")
+ self.assertEqual(exp.benchmarks[0].test_name, "kraken")
+ self.assertEqual(exp.benchmarks[0].iterations, 1)
+ self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf")
+ self.assertFalse(exp.benchmarks[0].show_all_results)
+
+ self.assertEqual(len(exp.labels), 1)
+ self.assertEqual(
+ exp.labels[0].chromeos_image,
+ "chromeos/src/build/images/lumpy/latest/"
+ "chromiumos_test_image.bin",
+ )
+ self.assertEqual(exp.labels[0].autotest_path, "/tmp/autotest")
+ self.assertEqual(exp.labels[0].board, "lumpy")
+
+ # Second test: Remotes listed in labels.
+ test_flag.SetTestMode(True)
+ label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertCountEqual(
+ exp.remote,
+ [
+ "123.45.67.89",
+ "123.45.76.80",
+ "chromeos1.cros",
+ "chromeos2.cros",
+ ],
+ )
+
+ # Third test: Automatic fixing of bad logging_level param:
+ global_settings.SetField("logging_level", "really loud!")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(exp.log_level, "verbose")
+
+ # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
+ global_settings.SetField("rerun_if_failed", "true")
+ global_settings.SetField("rerun", "true")
+ global_settings.SetField("same_machine", "true")
+ global_settings.SetField("same_specs", "true")
+
+ self.assertRaises(
+ Exception, ef.GetExperiment, mock_experiment_file, "", ""
+ )
+ label_settings.SetField("remote", "")
+ global_settings.SetField("remote", "123.45.67.89")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
+
+ # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
+ # remotes (Call GetDefaultRemotes).
+ mock_socket.return_value = "test.corp.google.com"
+ global_settings.SetField("remote", "")
+ global_settings.SetField("same_machine", "false")
+
+ label_settings_2 = settings_factory.LabelSettings(
+ "official_image_label"
+ )
+ label_settings_2.SetField("chromeos_root", "chromeos")
+ label_settings_2.SetField("build", "official-dev")
+ label_settings_2.SetField("autotest_path", "")
+ label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
+
+ mock_experiment_file.all_settings.append(label_settings_2)
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path")
+ self.assertEqual(exp.labels[1].autotest_path, "fake_autotest_path")
+ self.assertCountEqual(
+ exp.remote,
+ ["fake_chromeos_machine1.cros", "fake_chromeos_machine2.cros"],
+ )
+
+ def test_get_default_remotes(self):
+ board_list = [
+ "bob",
+ "chell",
+ "coral",
+ "elm",
+ "nautilus",
+ "snappy",
+ ]
+
+ ef = ExperimentFactory()
+ self.assertRaises(Exception, ef.GetDefaultRemotes, "bad-board")
+
+ # Verify that we have entries for every board
+ for b in board_list:
+ remotes = ef.GetDefaultRemotes(b)
+ self.assertGreaterEqual(len(remotes), 1)
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(os.path, "exists")
+ def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
+ ef = ExperimentFactory()
+ chromeos_root = "/tmp/chromeos"
+ log_level = "average"
+
+ mock_exists.return_value = True
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertTrue(ret)
+
+ mock_exists.return_value = False
+ mock_runcmd.return_value = 1
+ with self.assertRaises(RuntimeError) as err:
+ ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ str(err.exception),
+ "Crosfleet tool not installed "
+ "correctly, please try to manually install it from "
+ "/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools",
+ )
+
+ mock_runcmd.return_value = 0
+ mock_runcmd.call_count = 0
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertFalse(ret)
+
+
+if __name__ == "__main__":
+ FileUtils.Configure(True)
test_flag.SetTestMode(True)
- # Basic test.
- global_settings.SetField('name', 'unittest_test')
- global_settings.SetField('board', 'lumpy')
- global_settings.SetField('locks_dir', '/tmp')
- global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
- benchmark_settings.SetField('test_name', 'kraken')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('iterations', 1)
- label_settings.SetField(
- 'chromeos_image',
- 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
- label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
- label_settings.SetField('autotest_path', '/tmp/autotest')
-
- mock_experiment_file.global_settings = global_settings
- mock_experiment_file.all_settings.append(label_settings)
- mock_experiment_file.all_settings.append(benchmark_settings)
- mock_experiment_file.all_settings.append(global_settings)
-
- mock_socket.return_value = ''
-
- # First test. General test.
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertCountEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
- self.assertEqual(exp.cache_conditions, [0, 2, 1])
- self.assertEqual(exp.log_level, 'average')
-
- self.assertEqual(len(exp.benchmarks), 1)
- self.assertEqual(exp.benchmarks[0].name, 'bench_test')
- self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
- self.assertEqual(exp.benchmarks[0].iterations, 1)
- self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
- self.assertFalse(exp.benchmarks[0].show_all_results)
-
- self.assertEqual(len(exp.labels), 1)
- self.assertEqual(
- exp.labels[0].chromeos_image, 'chromeos/src/build/images/lumpy/latest/'
- 'chromiumos_test_image.bin')
- self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest')
- self.assertEqual(exp.labels[0].board, 'lumpy')
-
- # Second test: Remotes listed in labels.
- test_flag.SetTestMode(True)
- label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertCountEqual(
- exp.remote,
- ['123.45.67.89', '123.45.76.80', 'chromeos1.cros', 'chromeos2.cros'])
-
- # Third test: Automatic fixing of bad logging_level param:
- global_settings.SetField('logging_level', 'really loud!')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(exp.log_level, 'verbose')
-
- # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
- global_settings.SetField('rerun_if_failed', 'true')
- global_settings.SetField('rerun', 'true')
- global_settings.SetField('same_machine', 'true')
- global_settings.SetField('same_specs', 'true')
-
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '',
- '')
- label_settings.SetField('remote', '')
- global_settings.SetField('remote', '123.45.67.89')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
-
- # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
- # remotes (Call GetDefaultRemotes).
- mock_socket.return_value = 'test.corp.google.com'
- global_settings.SetField('remote', '')
- global_settings.SetField('same_machine', 'false')
-
- label_settings_2 = settings_factory.LabelSettings('official_image_label')
- label_settings_2.SetField('chromeos_root', 'chromeos')
- label_settings_2.SetField('build', 'official-dev')
- label_settings_2.SetField('autotest_path', '')
- label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
-
- mock_experiment_file.all_settings.append(label_settings_2)
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
- self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
- self.assertCountEqual(
- exp.remote,
- ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
-
- def test_get_default_remotes(self):
- board_list = [
- 'bob', 'chell', 'coral', 'elm', 'kefka', 'nautilus', 'snappy',
- 'veyron_tiger'
- ]
-
- ef = ExperimentFactory()
- self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
-
- # Verify that we have entries for every board
- for b in board_list:
- remotes = ef.GetDefaultRemotes(b)
- self.assertGreaterEqual(len(remotes), 1)
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(os.path, 'exists')
- def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
- ef = ExperimentFactory()
- chromeos_root = '/tmp/chromeos'
- log_level = 'average'
-
- mock_exists.return_value = True
- ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertTrue(ret)
-
- mock_exists.return_value = False
- mock_runcmd.return_value = 1
- with self.assertRaises(RuntimeError) as err:
- ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(
- str(err.exception), 'Crosfleet tool not installed '
- 'correctly, please try to manually install it from '
- '/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools')
-
- mock_runcmd.return_value = 0
- mock_runcmd.call_count = 0
- ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertFalse(ret)
-
-
-if __name__ == '__main__':
- FileUtils.Configure(True)
- test_flag.SetTestMode(True)
- unittest.main()
+ unittest.main()
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index d2831bd..70852a2 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -1,220 +1,241 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment file module. It manages the input file of crosperf."""
-from __future__ import print_function
+
import os.path
import re
+
from settings_factory import SettingsFactory
class ExperimentFile(object):
- """Class for parsing the experiment file format.
+ """Class for parsing the experiment file format.
- The grammar for this format is:
+ The grammar for this format is:
- experiment = { _FIELD_VALUE_RE | settings }
- settings = _OPEN_SETTINGS_RE
- { _FIELD_VALUE_RE }
- _CLOSE_SETTINGS_RE
+ experiment = { _FIELD_VALUE_RE | settings }
+ settings = _OPEN_SETTINGS_RE
+ { _FIELD_VALUE_RE }
+ _CLOSE_SETTINGS_RE
- Where the regexes are terminals defined below. This results in an format
- which looks something like:
+ Where the regexes are terminals defined below. This results in an format
+ which looks something like:
- field_name: value
- settings_type: settings_name {
field_name: value
- field_name: value
- }
- """
-
- # Field regex, e.g. "iterations: 3"
- _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)')
- # Open settings regex, e.g. "label {"
- _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{')
- # Close settings regex.
- _CLOSE_SETTINGS_RE = re.compile(r'}')
-
- def __init__(self, experiment_file, overrides=None):
- """Construct object from file-like experiment_file.
-
- Args:
- experiment_file: file-like object with text description of experiment.
- overrides: A settings object that will override fields in other settings.
-
- Raises:
- Exception: if invalid build type or description is invalid.
+ settings_type: settings_name {
+ field_name: value
+ field_name: value
+ }
"""
- self.all_settings = []
- self.global_settings = SettingsFactory().GetSettings('global', 'global')
- self.all_settings.append(self.global_settings)
- self._Parse(experiment_file)
+ # Field regex, e.g. "iterations: 3"
+ _FIELD_VALUE_RE = re.compile(r"(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)")
+ # Open settings regex, e.g. "label {"
+ _OPEN_SETTINGS_RE = re.compile(r"(?:([\w.-]+):)?\s*([\w.-]+)\s*{")
+ # Close settings regex.
+ _CLOSE_SETTINGS_RE = re.compile(r"}")
- for settings in self.all_settings:
- settings.Inherit()
- settings.Validate()
- if overrides:
- settings.Override(overrides)
+ def __init__(self, experiment_file, overrides=None):
+ """Construct object from file-like experiment_file.
- def GetSettings(self, settings_type):
- """Return nested fields from the experiment file."""
- res = []
- for settings in self.all_settings:
- if settings.settings_type == settings_type:
- res.append(settings)
- return res
+ Args:
+ experiment_file: file-like object with text description of experiment.
+ overrides: A settings object that will override fields in other settings.
- def GetGlobalSettings(self):
- """Return the global fields from the experiment file."""
- return self.global_settings
+ Raises:
+ Exception: if invalid build type or description is invalid.
+ """
+ self.all_settings = []
+ self.global_settings = SettingsFactory().GetSettings("global", "global")
+ self.all_settings.append(self.global_settings)
- def _ParseField(self, reader):
- """Parse a key/value field."""
- line = reader.CurrentLine().strip()
- match = ExperimentFile._FIELD_VALUE_RE.match(line)
- append, name, _, text_value = match.groups()
- return (name, text_value, append)
+ self._Parse(experiment_file)
- def _ParseSettings(self, reader):
- """Parse a settings block."""
- line = reader.CurrentLine().strip()
- match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
- settings_type = match.group(1)
- if settings_type is None:
- settings_type = ''
- settings_name = match.group(2)
- settings = SettingsFactory().GetSettings(settings_name, settings_type)
- settings.SetParentSettings(self.global_settings)
+ for settings in self.all_settings:
+ settings.Inherit()
+ settings.Validate()
+ if overrides:
+ settings.Override(overrides)
- while reader.NextLine():
- line = reader.CurrentLine().strip()
+ def GetSettings(self, settings_type):
+ """Return nested fields from the experiment file."""
+ res = []
+ for settings in self.all_settings:
+ if settings.settings_type == settings_type:
+ res.append(settings)
+ return res
- if not line:
- continue
+ def GetGlobalSettings(self):
+ """Return the global fields from the experiment file."""
+ return self.global_settings
- if ExperimentFile._FIELD_VALUE_RE.match(line):
- field = self._ParseField(reader)
- settings.SetField(field[0], field[1], field[2])
- elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
- return settings, settings_type
-
- raise EOFError('Unexpected EOF while parsing settings block.')
-
- def _Parse(self, experiment_file):
- """Parse experiment file and create settings."""
- reader = ExperimentFileReader(experiment_file)
- settings_names = {}
- try:
- while reader.NextLine():
+ def _ParseField(self, reader):
+ """Parse a key/value field."""
line = reader.CurrentLine().strip()
+ match = ExperimentFile._FIELD_VALUE_RE.match(line)
+ append, name, _, text_value = match.groups()
+ return (name, text_value, append)
- if not line:
- continue
+ def _ParseSettings(self, reader):
+ """Parse a settings block."""
+ line = reader.CurrentLine().strip()
+ match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
+ settings_type = match.group(1)
+ if settings_type is None:
+ settings_type = ""
+ settings_name = match.group(2)
+ settings = SettingsFactory().GetSettings(settings_name, settings_type)
+ settings.SetParentSettings(self.global_settings)
- if ExperimentFile._OPEN_SETTINGS_RE.match(line):
- new_settings, settings_type = self._ParseSettings(reader)
- # We will allow benchmarks with duplicated settings name for now.
- # Further decision will be made when parsing benchmark details in
- # ExperimentFactory.GetExperiment().
- if settings_type != 'benchmark':
- if new_settings.name in settings_names:
- raise SyntaxError(
- "Duplicate settings name: '%s'." % new_settings.name)
- settings_names[new_settings.name] = True
- self.all_settings.append(new_settings)
- elif ExperimentFile._FIELD_VALUE_RE.match(line):
- field = self._ParseField(reader)
- self.global_settings.SetField(field[0], field[1], field[2])
- else:
- raise IOError('Unexpected line.')
- except Exception as err:
- raise RuntimeError('Line %d: %s\n==> %s' % (reader.LineNo(), str(err),
- reader.CurrentLine(False)))
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
- def Canonicalize(self):
- """Convert parsed experiment file back into an experiment file."""
- res = ''
- board = ''
- for field_name in self.global_settings.fields:
- field = self.global_settings.fields[field_name]
- if field.assigned:
- res += '%s: %s\n' % (field.name, field.GetString())
- if field.name == 'board':
- board = field.GetString()
- res += '\n'
+ if not line:
+ continue
- for settings in self.all_settings:
- if settings.settings_type != 'global':
- res += '%s: %s {\n' % (settings.settings_type, settings.name)
- for field_name in settings.fields:
- field = settings.fields[field_name]
- if field.assigned:
- res += '\t%s: %s\n' % (field.name, field.GetString())
- if field.name == 'chromeos_image':
- real_file = (
- os.path.realpath(os.path.expanduser(field.GetString())))
- if real_file != field.GetString():
- res += '\t#actual_image: %s\n' % real_file
- if field.name == 'build':
- chromeos_root_field = settings.fields['chromeos_root']
- if chromeos_root_field:
- chromeos_root = chromeos_root_field.GetString()
- value = field.GetString()
- autotest_field = settings.fields['autotest_path']
- autotest_path = ''
- if autotest_field.assigned:
- autotest_path = autotest_field.GetString()
- debug_field = settings.fields['debug_path']
- debug_path = ''
- if debug_field.assigned:
- debug_path = autotest_field.GetString()
- # Do not download the debug symbols since this function is for
- # canonicalizing experiment file.
- downlad_debug = False
- image_path, autotest_path, debug_path = settings.GetXbuddyPath(
- value, autotest_path, debug_path, board, chromeos_root,
- 'quiet', downlad_debug)
- res += '\t#actual_image: %s\n' % image_path
- if not autotest_field.assigned:
- res += '\t#actual_autotest_path: %s\n' % autotest_path
- if not debug_field.assigned:
- res += '\t#actual_debug_path: %s\n' % debug_path
+ if ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ settings.SetField(field[0], field[1], field[2])
+ elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
+ return settings, settings_type
- res += '}\n\n'
+ raise EOFError("Unexpected EOF while parsing settings block.")
- return res
+ def _Parse(self, experiment_file):
+ """Parse experiment file and create settings."""
+ reader = ExperimentFileReader(experiment_file)
+ settings_names = {}
+ try:
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
+
+ if not line:
+ continue
+
+ if ExperimentFile._OPEN_SETTINGS_RE.match(line):
+ new_settings, settings_type = self._ParseSettings(reader)
+ # We will allow benchmarks with duplicated settings name for now.
+ # Further decision will be made when parsing benchmark details in
+ # ExperimentFactory.GetExperiment().
+ if settings_type != "benchmark":
+ if new_settings.name in settings_names:
+ raise SyntaxError(
+ "Duplicate settings name: '%s'."
+ % new_settings.name
+ )
+ settings_names[new_settings.name] = True
+ self.all_settings.append(new_settings)
+ elif ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ self.global_settings.SetField(field[0], field[1], field[2])
+ else:
+ raise IOError("Unexpected line.")
+ except Exception as err:
+ raise RuntimeError(
+ "Line %d: %s\n==> %s"
+ % (reader.LineNo(), str(err), reader.CurrentLine(False))
+ )
+
+ def Canonicalize(self):
+ """Convert parsed experiment file back into an experiment file."""
+ res = ""
+ board = ""
+ for field_name in self.global_settings.fields:
+ field = self.global_settings.fields[field_name]
+ if field.assigned:
+ res += "%s: %s\n" % (field.name, field.GetString())
+ if field.name == "board":
+ board = field.GetString()
+ res += "\n"
+
+ for settings in self.all_settings:
+ if settings.settings_type != "global":
+ res += "%s: %s {\n" % (settings.settings_type, settings.name)
+ for field_name in settings.fields:
+ field = settings.fields[field_name]
+ if field.assigned:
+ res += "\t%s: %s\n" % (field.name, field.GetString())
+ if field.name == "chromeos_image":
+ real_file = os.path.realpath(
+ os.path.expanduser(field.GetString())
+ )
+ if real_file != field.GetString():
+ res += "\t#actual_image: %s\n" % real_file
+ if field.name == "build":
+ chromeos_root_field = settings.fields[
+ "chromeos_root"
+ ]
+ if chromeos_root_field:
+ chromeos_root = chromeos_root_field.GetString()
+ value = field.GetString()
+ autotest_field = settings.fields["autotest_path"]
+ autotest_path = ""
+ if autotest_field.assigned:
+ autotest_path = autotest_field.GetString()
+ debug_field = settings.fields["debug_path"]
+ debug_path = ""
+ if debug_field.assigned:
+ debug_path = autotest_field.GetString()
+ # Do not download the debug symbols since this function is for
+ # canonicalizing experiment file.
+ downlad_debug = False
+ (
+ image_path,
+ autotest_path,
+ debug_path,
+ ) = settings.GetXbuddyPath(
+ value,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ "quiet",
+ downlad_debug,
+ )
+ res += "\t#actual_image: %s\n" % image_path
+ if not autotest_field.assigned:
+ res += (
+ "\t#actual_autotest_path: %s\n"
+ % autotest_path
+ )
+ if not debug_field.assigned:
+ res += "\t#actual_debug_path: %s\n" % debug_path
+
+ res += "}\n\n"
+
+ return res
class ExperimentFileReader(object):
- """Handle reading lines from an experiment file."""
+ """Handle reading lines from an experiment file."""
- def __init__(self, file_object):
- self.file_object = file_object
- self.current_line = None
- self.current_line_no = 0
+ def __init__(self, file_object):
+ self.file_object = file_object
+ self.current_line = None
+ self.current_line_no = 0
- def CurrentLine(self, strip_comment=True):
- """Return the next line from the file, without advancing the iterator."""
- if strip_comment:
- return self._StripComment(self.current_line)
- return self.current_line
+ def CurrentLine(self, strip_comment=True):
+ """Return the next line from the file, without advancing the iterator."""
+ if strip_comment:
+ return self._StripComment(self.current_line)
+ return self.current_line
- def NextLine(self, strip_comment=True):
- """Advance the iterator and return the next line of the file."""
- self.current_line_no += 1
- self.current_line = self.file_object.readline()
- return self.CurrentLine(strip_comment)
+ def NextLine(self, strip_comment=True):
+ """Advance the iterator and return the next line of the file."""
+ self.current_line_no += 1
+ self.current_line = self.file_object.readline()
+ return self.CurrentLine(strip_comment)
- def _StripComment(self, line):
- """Strip comments starting with # from a line."""
- if '#' in line:
- line = line[:line.find('#')] + line[-1]
- return line
+ def _StripComment(self, line):
+ """Strip comments starting with # from a line."""
+ if "#" in line:
+ line = line[: line.find("#")] + line[-1]
+ return line
- def LineNo(self):
- """Return the current line number."""
- return self.current_line_no
+ def LineNo(self):
+ """Return the current line number."""
+ return self.current_line_no
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index 0d4e1e6..5c09ee0 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -1,18 +1,18 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unittest of experiment_file."""
-from __future__ import print_function
import io
import unittest
from experiment_file import ExperimentFile
+
EXPERIMENT_FILE_1 = """
board: x86-alex
remote: chromeos-alex3
@@ -158,94 +158,111 @@
class ExperimentFileTest(unittest.TestCase):
- """The main class for Experiment File test."""
+ """The main class for Experiment File test."""
- def testLoadExperimentFile1(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
- self.assertEqual(
- global_settings.GetField('perf_args'), 'record -a -e cycles')
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(len(benchmark_settings), 1)
- self.assertEqual(benchmark_settings[0].name, 'PageCycler')
- self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
+ def testLoadExperimentFile1(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(
+ global_settings.GetField("perf_args"), "record -a -e cycles"
+ )
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(len(benchmark_settings), 1)
+ self.assertEqual(benchmark_settings[0].name, "PageCycler")
+ self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
- label_settings = experiment_file.GetSettings('label')
- self.assertEqual(len(label_settings), 2)
- self.assertEqual(label_settings[0].name, 'image1')
- self.assertEqual(label_settings[0].GetField('chromeos_image'),
- '/usr/local/google/cros_image1.bin')
- self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
- self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
+ label_settings = experiment_file.GetSettings("label")
+ self.assertEqual(len(label_settings), 2)
+ self.assertEqual(label_settings[0].name, "image1")
+ self.assertEqual(
+ label_settings[0].GetField("chromeos_image"),
+ "/usr/local/google/cros_image1.bin",
+ )
+ self.assertEqual(
+ label_settings[1].GetField("remote"), ["chromeos-lumpy1"]
+ )
+ self.assertEqual(
+ label_settings[0].GetField("remote"), ["chromeos-alex3"]
+ )
- def testOverrideSetting(self):
- input_file = io.StringIO(EXPERIMENT_FILE_2)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
+ def testOverrideSetting(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_2)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(len(benchmark_settings), 2)
- self.assertEqual(benchmark_settings[0].name, 'PageCycler')
- self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
- self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
- self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(len(benchmark_settings), 2)
+ self.assertEqual(benchmark_settings[0].name, "PageCycler")
+ self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
+ self.assertEqual(benchmark_settings[1].name, "AndroidBench")
+ self.assertEqual(benchmark_settings[1].GetField("iterations"), 2)
- def testDuplicateLabel(self):
- input_file = io.StringIO(EXPERIMENT_FILE_3)
- self.assertRaises(Exception, ExperimentFile, input_file)
+ def testDuplicateLabel(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_3)
+ self.assertRaises(Exception, ExperimentFile, input_file)
- def testDuplicateBenchmark(self):
- input_file = io.StringIO(EXPERIMENT_FILE_4)
- experiment_file = ExperimentFile(input_file)
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(benchmark_settings[0].name, 'webrtc')
- self.assertEqual(benchmark_settings[0].GetField('test_args'),
- '--story-filter=datachannel')
- self.assertEqual(benchmark_settings[1].name, 'webrtc')
- self.assertEqual(benchmark_settings[1].GetField('test_args'),
- '--story-tag-filter=smoothness')
+ def testDuplicateBenchmark(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_4)
+ experiment_file = ExperimentFile(input_file)
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(benchmark_settings[0].name, "webrtc")
+ self.assertEqual(
+ benchmark_settings[0].GetField("test_args"),
+ "--story-filter=datachannel",
+ )
+ self.assertEqual(benchmark_settings[1].name, "webrtc")
+ self.assertEqual(
+ benchmark_settings[1].GetField("test_args"),
+ "--story-tag-filter=smoothness",
+ )
- def testCanonicalize(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- experiment_file = ExperimentFile(input_file)
- res = experiment_file.Canonicalize()
- self.assertEqual(res, OUTPUT_FILE)
+ def testCanonicalize(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ res = experiment_file.Canonicalize()
+ self.assertEqual(res, OUTPUT_FILE)
- def testLoadDutConfigExperimentFile_Good(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('turbostat'), False)
- self.assertEqual(global_settings.GetField('intel_pstate'), 'no_hwp')
- self.assertEqual(global_settings.GetField('governor'), 'powersave')
- self.assertEqual(global_settings.GetField('cpu_usage'), 'exclusive_cores')
- self.assertEqual(global_settings.GetField('cpu_freq_pct'), 50)
- self.assertEqual(global_settings.GetField('cooldown_time'), 5)
- self.assertEqual(global_settings.GetField('cooldown_temp'), 38)
- self.assertEqual(global_settings.GetField('top_interval'), 5)
+ def testLoadDutConfigExperimentFile_Good(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("turbostat"), False)
+ self.assertEqual(global_settings.GetField("intel_pstate"), "no_hwp")
+ self.assertEqual(global_settings.GetField("governor"), "powersave")
+ self.assertEqual(
+ global_settings.GetField("cpu_usage"), "exclusive_cores"
+ )
+ self.assertEqual(global_settings.GetField("cpu_freq_pct"), 50)
+ self.assertEqual(global_settings.GetField("cooldown_time"), 5)
+ self.assertEqual(global_settings.GetField("cooldown_temp"), 38)
+ self.assertEqual(global_settings.GetField("top_interval"), 5)
- def testLoadDutConfigExperimentFile_WrongGovernor(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV)
- with self.assertRaises(RuntimeError) as msg:
- ExperimentFile(input_file)
- self.assertRegex(str(msg.exception), 'governor: misspelled_governor')
- self.assertRegex(
- str(msg.exception), "Invalid enum value for field 'governor'."
- r' Must be one of \(performance, powersave, userspace, ondemand,'
- r' conservative, schedutils, sched, interactive\)')
+ def testLoadDutConfigExperimentFile_WrongGovernor(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegex(str(msg.exception), "governor: misspelled_governor")
+ self.assertRegex(
+ str(msg.exception),
+ "Invalid enum value for field 'governor'."
+ r" Must be one of \(performance, powersave, userspace, ondemand,"
+ r" conservative, schedutils, sched, interactive\)",
+ )
- def testLoadDutConfigExperimentFile_WrongCpuUsage(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE)
- with self.assertRaises(RuntimeError) as msg:
- ExperimentFile(input_file)
- self.assertRegex(str(msg.exception), 'cpu_usage: unknown')
- self.assertRegex(
- str(msg.exception), "Invalid enum value for field 'cpu_usage'."
- r' Must be one of \(all, big_only, little_only, exclusive_cores\)')
+ def testLoadDutConfigExperimentFile_WrongCpuUsage(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegex(str(msg.exception), "cpu_usage: unknown")
+ self.assertRegex(
+ str(msg.exception),
+ "Invalid enum value for field 'cpu_usage'."
+ r" Must be one of \(all, big_only, little_only, exclusive_cores\)",
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment_files/telemetry_perf_perf b/crosperf/experiment_files/telemetry_perf_perf
index acdf96d..e46fdc2 100755
--- a/crosperf/experiment_files/telemetry_perf_perf
+++ b/crosperf/experiment_files/telemetry_perf_perf
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
@@ -12,7 +12,7 @@
# Perf will run for the entire benchmark run, so results should be interpreted
# in that context. i.e, if this shows a 3% overhead for a particular perf
# command, that overhead would only be seen during the 2 seconds of measurement
-# during a Chrome OS Wide Profiling collection.
+# during a ChromeOS Wide Profiling collection.
set -e
board=xxx #<you-board-here>
@@ -74,4 +74,3 @@
# overhead.
RunExperiment 'memory.bandwidth' \
'stat -e cycles -e instructions -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -e cpu/event=0xD0,umask=0x11,name=MEM_UOPS_RETIRED-STLB_MISS_LOADS/ -e cpu/event=0xD0,umask=0x12,name=MEM_UOPS_RETIRED-STLB_MISS_STORES/'
-
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 6daef78..1f78dcc 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -1,363 +1,402 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment runner module."""
-from __future__ import print_function
import getpass
import os
import shutil
import time
-import lock_machine
-import test_flag
-
from cros_utils import command_executer
from cros_utils import logger
from cros_utils.email_sender import EmailSender
from cros_utils.file_utils import FileUtils
-
-import config
from experiment_status import ExperimentStatus
+import lock_machine
from results_cache import CacheConditions
from results_cache import ResultsCache
from results_report import HTMLResultsReport
-from results_report import TextResultsReport
from results_report import JSONResultsReport
+from results_report import TextResultsReport
from schedv2 import Schedv2
+import test_flag
+
+import config
def _WriteJSONReportToFile(experiment, results_dir, json_report):
- """Writes a JSON report to a file in results_dir."""
- has_llvm = any('llvm' in l.compiler for l in experiment.labels)
- compiler_string = 'llvm' if has_llvm else 'gcc'
- board = experiment.labels[0].board
- filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
- json_report.time.replace(
- ':', '.'), compiler_string)
- fullname = os.path.join(results_dir, filename)
- report_text = json_report.GetReport()
- with open(fullname, 'w') as out_file:
- out_file.write(report_text)
+ """Writes a JSON report to a file in results_dir."""
+ has_llvm = any("llvm" in l.compiler for l in experiment.labels)
+ compiler_string = "llvm" if has_llvm else "gcc"
+ board = experiment.labels[0].board
+ filename = "report_%s_%s_%s.%s.json" % (
+ board,
+ json_report.date,
+ json_report.time.replace(":", "."),
+ compiler_string,
+ )
+ fullname = os.path.join(results_dir, filename)
+ report_text = json_report.GetReport()
+ with open(fullname, "w") as out_file:
+ out_file.write(report_text)
class ExperimentRunner(object):
- """ExperimentRunner Class."""
+ """ExperimentRunner Class."""
- STATUS_TIME_DELAY = 30
- THREAD_MONITOR_DELAY = 2
+ STATUS_TIME_DELAY = 30
+ THREAD_MONITOR_DELAY = 2
- SUCCEEDED = 0
- HAS_FAILURE = 1
- ALL_FAILED = 2
+ SUCCEEDED = 0
+ HAS_FAILURE = 1
+ ALL_FAILED = 2
- def __init__(self,
- experiment,
- json_report,
- using_schedv2=False,
- log=None,
- cmd_exec=None):
- self._experiment = experiment
- self.l = log or logger.GetLogger(experiment.log_dir)
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
- self._terminated = False
- self.json_report = json_report
- self.locked_machines = []
- if experiment.log_level != 'verbose':
- self.STATUS_TIME_DELAY = 10
+ def __init__(
+ self,
+ experiment,
+ json_report,
+ using_schedv2=False,
+ log=None,
+ cmd_exec=None,
+ ):
+ self._experiment = experiment
+ self.l = log or logger.GetLogger(experiment.log_dir)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
+ self._terminated = False
+ self.json_report = json_report
+ self.locked_machines = []
+ if experiment.log_level != "verbose":
+ self.STATUS_TIME_DELAY = 10
- # Setting this to True will use crosperf sched v2 (feature in progress).
- self._using_schedv2 = using_schedv2
+ # Setting this to True will use crosperf sched v2 (feature in progress).
+ self._using_schedv2 = using_schedv2
- def _GetMachineList(self):
- """Return a list of all requested machines.
+ def _GetMachineList(self):
+ """Return a list of all requested machines.
- Create a list of all the requested machines, both global requests and
- label-specific requests, and return the list.
- """
- machines = self._experiment.remote
- # All Label.remote is a sublist of experiment.remote.
- for l in self._experiment.labels:
- for r in l.remote:
- assert r in machines
- return machines
+ Create a list of all the requested machines, both global requests and
+ label-specific requests, and return the list.
+ """
+ machines = self._experiment.remote
+ # All Label.remote is a sublist of experiment.remote.
+ for l in self._experiment.labels:
+ for r in l.remote:
+ assert r in machines
+ return machines
- def _UpdateMachineList(self, locked_machines):
- """Update machines lists to contain only locked machines.
+ def _UpdateMachineList(self, locked_machines):
+ """Update machines lists to contain only locked machines.
- Go through all the lists of requested machines, both global and
- label-specific requests, and remove any machine that we were not
- able to lock.
+ Go through all the lists of requested machines, both global and
+ label-specific requests, and remove any machine that we were not
+ able to lock.
- Args:
- locked_machines: A list of the machines we successfully locked.
- """
- for m in self._experiment.remote:
- if m not in locked_machines:
- self._experiment.remote.remove(m)
+ Args:
+ locked_machines: A list of the machines we successfully locked.
+ """
+ for m in self._experiment.remote:
+ if m not in locked_machines:
+ self._experiment.remote.remove(m)
- for l in self._experiment.labels:
- for m in l.remote:
- if m not in locked_machines:
- l.remote.remove(m)
+ for l in self._experiment.labels:
+ for m in l.remote:
+ if m not in locked_machines:
+ l.remote.remove(m)
- def _GetMachineType(self, lock_mgr, machine):
- """Get where is the machine from.
+ def _GetMachineType(self, lock_mgr, machine):
+ """Get where is the machine from.
- Returns:
- The location of the machine: local or crosfleet
- """
- # We assume that lab machine always starts with chromeos*, and local
- # machines are ip address.
- if 'chromeos' in machine:
- if lock_mgr.CheckMachineInCrosfleet(machine):
- return 'crosfleet'
- else:
- raise RuntimeError('Lab machine not in Crosfleet.')
- return 'local'
-
- def _LockAllMachines(self, experiment):
- """Attempt to globally lock all of the machines requested for run.
-
- This method tries to lock all machines requested for this crosperf run
- in three different modes automatically, to prevent any other crosperf runs
- from being able to update/use the machines while this experiment is
- running:
- - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
- - Local machines: Use file lock mechanism to lock
- """
- if test_flag.GetTestMode():
- self.locked_machines = self._GetMachineList()
- experiment.locked_machines = self.locked_machines
- else:
- experiment.lock_mgr = lock_machine.LockManager(
- self._GetMachineList(),
- '',
- experiment.labels[0].chromeos_root,
- experiment.locks_dir,
- log=self.l,
- )
- for m in experiment.lock_mgr.machines:
- machine_type = self._GetMachineType(experiment.lock_mgr, m)
- if machine_type == 'local':
- experiment.lock_mgr.AddMachineToLocal(m)
- elif machine_type == 'crosfleet':
- experiment.lock_mgr.AddMachineToCrosfleet(m)
- machine_states = experiment.lock_mgr.GetMachineStates('lock')
- experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock')
- self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
- experiment.locked_machines = self.locked_machines
- self._UpdateMachineList(self.locked_machines)
- experiment.machine_manager.RemoveNonLockedMachines(self.locked_machines)
- if not self.locked_machines:
- raise RuntimeError('Unable to lock any machines.')
-
- def _ClearCacheEntries(self, experiment):
- for br in experiment.benchmark_runs:
- cache = ResultsCache()
- cache.Init(br.label.chromeos_image, br.label.chromeos_root,
- br.benchmark.test_name, br.iteration, br.test_args,
- br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br.logger(),
- br.log_level, br.label, br.share_cache, br.benchmark.suite,
- br.benchmark.show_all_results, br.benchmark.run_local,
- br.benchmark.cwp_dso)
- cache_dir = cache.GetCacheDirForWrite()
- if os.path.exists(cache_dir):
- self.l.LogOutput('Removing cache dir: %s' % cache_dir)
- shutil.rmtree(cache_dir)
-
- def _Run(self, experiment):
- try:
- # We should not lease machines if tests are launched via `crosfleet
- # create-test`. This is because leasing DUT in crosfleet will create a
- # no-op task on the DUT and new test created will be hanging there.
- # TODO(zhizhouy): Need to check whether machine is ready or not before
- # assigning a test to it.
- if not experiment.no_lock and not experiment.crosfleet:
- self._LockAllMachines(experiment)
- # Calculate all checksums of avaiable/locked machines, to ensure same
- # label has same machines for testing
- experiment.SetCheckSums(forceSameImage=True)
- if self._using_schedv2:
- schedv2 = Schedv2(experiment)
- experiment.set_schedv2(schedv2)
- if CacheConditions.FALSE in experiment.cache_conditions:
- self._ClearCacheEntries(experiment)
- status = ExperimentStatus(experiment)
- experiment.Run()
- last_status_time = 0
- last_status_string = ''
- try:
- if experiment.log_level != 'verbose':
- self.l.LogStartDots()
- while not experiment.IsComplete():
- if last_status_time + self.STATUS_TIME_DELAY < time.time():
- last_status_time = time.time()
- border = '=============================='
- if experiment.log_level == 'verbose':
- self.l.LogOutput(border)
- self.l.LogOutput(status.GetProgressString())
- self.l.LogOutput(status.GetStatusString())
- self.l.LogOutput(border)
+ Returns:
+ The location of the machine: local or crosfleet
+ """
+ # We assume that lab machine always starts with chromeos*, and local
+ # machines are ip address.
+ if "chromeos" in machine:
+ if lock_mgr.CheckMachineInCrosfleet(machine):
+ return "crosfleet"
else:
- current_status_string = status.GetStatusString()
- if current_status_string != last_status_string:
- self.l.LogEndDots()
- self.l.LogOutput(border)
- self.l.LogOutput(current_status_string)
- self.l.LogOutput(border)
- last_status_string = current_status_string
- else:
- self.l.LogAppendDot()
- time.sleep(self.THREAD_MONITOR_DELAY)
- except KeyboardInterrupt:
- self._terminated = True
- self.l.LogError('Ctrl-c pressed. Cleaning up...')
- experiment.Terminate()
- raise
- except SystemExit:
- self._terminated = True
- self.l.LogError('Unexpected exit. Cleaning up...')
- experiment.Terminate()
- raise
- finally:
- experiment.Cleanup()
+ raise RuntimeError("Lab machine not in Crosfleet.")
+ return "local"
- def _PrintTable(self, experiment):
- self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())
+ def _LockAllMachines(self, experiment):
+ """Attempt to globally lock all of the machines requested for run.
- def _Email(self, experiment):
- # Only email by default if a new run was completed.
- send_mail = False
- for benchmark_run in experiment.benchmark_runs:
- if not benchmark_run.cache_hit:
- send_mail = True
- break
- if (not send_mail and not experiment.email_to
- or config.GetConfig('no_email')):
- return
-
- label_names = []
- for label in experiment.labels:
- label_names.append(label.name)
- subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
-
- text_report = TextResultsReport.FromExperiment(experiment,
- True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
- text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
- html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
- attachment = EmailSender.Attachment('report.html', html_report)
- email_to = experiment.email_to or []
- email_to.append(getpass.getuser())
- EmailSender().SendEmail(email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
-
- def _StoreResults(self, experiment):
- if self._terminated:
- return self.ALL_FAILED
-
- results_directory = experiment.results_directory
- FileUtils().RmDir(results_directory)
- FileUtils().MkDirP(results_directory)
- self.l.LogOutput('Storing experiment file in %s.' % results_directory)
- experiment_file_path = os.path.join(results_directory, 'experiment.exp')
- FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
-
- has_failure = False
- all_failed = True
-
- topstats_file = os.path.join(results_directory, 'topstats.log')
- self.l.LogOutput('Storing top statistics of each benchmark run into %s.' %
- topstats_file)
- with open(topstats_file, 'w') as top_fd:
- for benchmark_run in experiment.benchmark_runs:
- if benchmark_run.result:
- # FIXME: Pylint has a bug suggesting the following change, which
- # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
- # Bug: https://github.com/PyCQA/pylint/issues/1984
- # pylint: disable=simplifiable-if-statement
- if benchmark_run.result.retval:
- has_failure = True
- else:
- all_failed = False
- # Header with benchmark run name.
- top_fd.write('%s\n' % str(benchmark_run))
- # Formatted string with top statistics.
- top_fd.write(benchmark_run.result.FormatStringTopCommands())
- top_fd.write('\n\n')
-
- if all_failed:
- return self.ALL_FAILED
-
- self.l.LogOutput('Storing results of each benchmark run.')
- for benchmark_run in experiment.benchmark_runs:
- if benchmark_run.result:
- benchmark_run_name = ''.join(ch for ch in benchmark_run.name
- if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory,
- benchmark_run_name)
- if experiment.compress_results:
- benchmark_run.result.CompressResultsTo(benchmark_run_path)
+ This method tries to lock all machines requested for this crosperf run
+ in three different modes automatically, to prevent any other crosperf runs
+ from being able to update/use the machines while this experiment is
+ running:
+ - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
+ - Local machines: Use file lock mechanism to lock
+ """
+ if test_flag.GetTestMode():
+ self.locked_machines = self._GetMachineList()
+ experiment.locked_machines = self.locked_machines
else:
- benchmark_run.result.CopyResultsTo(benchmark_run_path)
- benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
+ experiment.lock_mgr = lock_machine.LockManager(
+ self._GetMachineList(),
+ "",
+ experiment.labels[0].chromeos_root,
+ experiment.locks_dir,
+ log=self.l,
+ )
+ for m in experiment.lock_mgr.machines:
+ machine_type = self._GetMachineType(experiment.lock_mgr, m)
+ if machine_type == "local":
+ experiment.lock_mgr.AddMachineToLocal(m)
+ elif machine_type == "crosfleet":
+ experiment.lock_mgr.AddMachineToCrosfleet(m)
+ machine_states = experiment.lock_mgr.GetMachineStates("lock")
+ experiment.lock_mgr.CheckMachineLocks(machine_states, "lock")
+ self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
+ experiment.locked_machines = self.locked_machines
+ self._UpdateMachineList(self.locked_machines)
+ experiment.machine_manager.RemoveNonLockedMachines(
+ self.locked_machines
+ )
+ if not self.locked_machines:
+ raise RuntimeError("Unable to lock any machines.")
- self.l.LogOutput('Storing results report in %s.' % results_directory)
- results_table_path = os.path.join(results_directory, 'results.html')
- report = HTMLResultsReport.FromExperiment(experiment).GetReport()
- if self.json_report:
- json_report = JSONResultsReport.FromExperiment(experiment,
- json_args={'indent': 2})
- _WriteJSONReportToFile(experiment, results_directory, json_report)
+ def _ClearCacheEntries(self, experiment):
+ for br in experiment.benchmark_runs:
+ cache = ResultsCache()
+ cache.Init(
+ br.label.chromeos_image,
+ br.label.chromeos_root,
+ br.benchmark.test_name,
+ br.iteration,
+ br.test_args,
+ br.profiler_args,
+ br.machine_manager,
+ br.machine,
+ br.label.board,
+ br.cache_conditions,
+ br.logger(),
+ br.log_level,
+ br.label,
+ br.share_cache,
+ br.benchmark.suite,
+ br.benchmark.show_all_results,
+ br.benchmark.run_local,
+ br.benchmark.cwp_dso,
+ )
+ cache_dir = cache.GetCacheDirForWrite()
+ if os.path.exists(cache_dir):
+ self.l.LogOutput("Removing cache dir: %s" % cache_dir)
+ shutil.rmtree(cache_dir)
- FileUtils().WriteFile(results_table_path, report)
+ def _Run(self, experiment):
+ try:
+ # We should not lease machines if tests are launched via `crosfleet
+ # create-test`. This is because leasing DUT in crosfleet will create a
+ # no-op task on the DUT and new test created will be hanging there.
+ # TODO(zhizhouy): Need to check whether machine is ready or not before
+ # assigning a test to it.
+ if not experiment.no_lock and not experiment.crosfleet:
+ self._LockAllMachines(experiment)
+ # Calculate all checksums of avaiable/locked machines, to ensure same
+ # label has same machines for testing
+ experiment.SetCheckSums(forceSameImage=True)
+ if self._using_schedv2:
+ schedv2 = Schedv2(experiment)
+ experiment.set_schedv2(schedv2)
+ if CacheConditions.FALSE in experiment.cache_conditions:
+ self._ClearCacheEntries(experiment)
+ status = ExperimentStatus(experiment)
+ experiment.Run()
+ last_status_time = 0
+ last_status_string = ""
+ try:
+ if experiment.log_level != "verbose":
+ self.l.LogStartDots()
+ while not experiment.IsComplete():
+ if last_status_time + self.STATUS_TIME_DELAY < time.time():
+ last_status_time = time.time()
+ border = "=============================="
+ if experiment.log_level == "verbose":
+ self.l.LogOutput(border)
+ self.l.LogOutput(status.GetProgressString())
+ self.l.LogOutput(status.GetStatusString())
+ self.l.LogOutput(border)
+ else:
+ current_status_string = status.GetStatusString()
+ if current_status_string != last_status_string:
+ self.l.LogEndDots()
+ self.l.LogOutput(border)
+ self.l.LogOutput(current_status_string)
+ self.l.LogOutput(border)
+ last_status_string = current_status_string
+ else:
+ self.l.LogAppendDot()
+ time.sleep(self.THREAD_MONITOR_DELAY)
+ except KeyboardInterrupt:
+ self._terminated = True
+ self.l.LogError("Ctrl-c pressed. Cleaning up...")
+ experiment.Terminate()
+ raise
+ except SystemExit:
+ self._terminated = True
+ self.l.LogError("Unexpected exit. Cleaning up...")
+ experiment.Terminate()
+ raise
+ finally:
+ experiment.Cleanup()
- self.l.LogOutput('Storing email message body in %s.' % results_directory)
- msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport.FromExperiment(experiment,
- True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
- msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
- FileUtils().WriteFile(msg_file_path, msg_body)
+ def _PrintTable(self, experiment):
+ self.l.LogOutput(
+ TextResultsReport.FromExperiment(experiment).GetReport()
+ )
- return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
+ def _Email(self, experiment):
+ # Only email by default if a new run was completed.
+ send_mail = False
+ for benchmark_run in experiment.benchmark_runs:
+ if not benchmark_run.cache_hit:
+ send_mail = True
+ break
+ if (
+ not send_mail
+ and not experiment.email_to
+ or config.GetConfig("no_email")
+ ):
+ return
- def Run(self):
- try:
- self._Run(self._experiment)
- finally:
- # Always print the report at the end of the run.
- self._PrintTable(self._experiment)
- ret = self._StoreResults(self._experiment)
- if ret != self.ALL_FAILED:
- self._Email(self._experiment)
- return ret
+ label_names = []
+ for label in experiment.labels:
+ label_names.append(label.name)
+ subject = "%s: %s" % (experiment.name, " vs. ".join(label_names))
+
+ text_report = TextResultsReport.FromExperiment(
+ experiment, True
+ ).GetReport()
+ text_report += (
+ "\nResults are stored in %s.\n" % experiment.results_directory
+ )
+ text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ attachment = EmailSender.Attachment("report.html", html_report)
+ email_to = experiment.email_to or []
+ email_to.append(getpass.getuser())
+ EmailSender().SendEmail(
+ email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type="html",
+ )
+
+ def _StoreResults(self, experiment):
+ if self._terminated:
+ return self.ALL_FAILED
+
+ results_directory = experiment.results_directory
+ FileUtils().RmDir(results_directory)
+ FileUtils().MkDirP(results_directory)
+ self.l.LogOutput("Storing experiment file in %s." % results_directory)
+ experiment_file_path = os.path.join(results_directory, "experiment.exp")
+ FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
+
+ has_failure = False
+ all_failed = True
+
+ topstats_file = os.path.join(results_directory, "topstats.log")
+ self.l.LogOutput(
+ "Storing top statistics of each benchmark run into %s."
+ % topstats_file
+ )
+ with open(topstats_file, "w") as top_fd:
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ # FIXME: Pylint has a bug suggesting the following change, which
+ # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
+ # Bug: https://github.com/PyCQA/pylint/issues/1984
+ # pylint: disable=simplifiable-if-statement
+ if benchmark_run.result.retval:
+ has_failure = True
+ else:
+ all_failed = False
+ # Header with benchmark run name.
+ top_fd.write("%s\n" % str(benchmark_run))
+ # Formatted string with top statistics.
+ top_fd.write(benchmark_run.result.FormatStringTopCommands())
+ top_fd.write("\n\n")
+
+ if all_failed:
+ return self.ALL_FAILED
+
+ self.l.LogOutput("Storing results of each benchmark run.")
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ benchmark_run_name = "".join(
+ ch for ch in benchmark_run.name if ch.isalnum()
+ )
+ benchmark_run_path = os.path.join(
+ results_directory, benchmark_run_name
+ )
+ if experiment.compress_results:
+ benchmark_run.result.CompressResultsTo(benchmark_run_path)
+ else:
+ benchmark_run.result.CopyResultsTo(benchmark_run_path)
+ benchmark_run.result.CleanUp(
+ benchmark_run.benchmark.rm_chroot_tmp
+ )
+
+ self.l.LogOutput("Storing results report in %s." % results_directory)
+ results_table_path = os.path.join(results_directory, "results.html")
+ report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ if self.json_report:
+ json_report = JSONResultsReport.FromExperiment(
+ experiment, json_args={"indent": 2}
+ )
+ _WriteJSONReportToFile(experiment, results_directory, json_report)
+
+ FileUtils().WriteFile(results_table_path, report)
+
+ self.l.LogOutput(
+ "Storing email message body in %s." % results_directory
+ )
+ msg_file_path = os.path.join(results_directory, "msg_body.html")
+ text_report = TextResultsReport.FromExperiment(
+ experiment, True
+ ).GetReport()
+ text_report += (
+ "\nResults are stored in %s.\n" % experiment.results_directory
+ )
+ msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ FileUtils().WriteFile(msg_file_path, msg_body)
+
+ return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
+
+ def Run(self):
+ try:
+ self._Run(self._experiment)
+ finally:
+ # Always print the report at the end of the run.
+ self._PrintTable(self._experiment)
+ ret = self._StoreResults(self._experiment)
+ if ret != self.ALL_FAILED:
+ self._Email(self._experiment)
+ return ret
class MockExperimentRunner(ExperimentRunner):
- """Mocked ExperimentRunner for testing."""
+ """Mocked ExperimentRunner for testing."""
- def __init__(self, experiment, json_report):
- super(MockExperimentRunner, self).__init__(experiment, json_report)
+ def __init__(self, experiment, json_report):
+ super(MockExperimentRunner, self).__init__(experiment, json_report)
- def _Run(self, experiment):
- self.l.LogOutput("Would run the following experiment: '%s'." %
- experiment.name)
+ def _Run(self, experiment):
+ self.l.LogOutput(
+ "Would run the following experiment: '%s'." % experiment.name
+ )
- def _PrintTable(self, experiment):
- self.l.LogOutput('Would print the experiment table.')
+ def _PrintTable(self, experiment):
+ self.l.LogOutput("Would print the experiment table.")
- def _Email(self, experiment):
- self.l.LogOutput('Would send result email.')
+ def _Email(self, experiment):
+ self.l.LogOutput("Would send result email.")
- def _StoreResults(self, experiment):
- self.l.LogOutput('Would store the results.')
+ def _StoreResults(self, experiment):
+ self.l.LogOutput("Would store the results.")
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 31d02e7..a9a1263 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -1,37 +1,35 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the experiment runner module."""
-from __future__ import print_function
import getpass
import io
import os
import time
-
import unittest
import unittest.mock as mock
-import experiment_runner
-import experiment_status
-import machine_manager
-import config
-import test_flag
-
-from experiment_factory import ExperimentFactory
-from experiment_file import ExperimentFile
-from results_cache import Result
-from results_report import HTMLResultsReport
-from results_report import TextResultsReport
-
from cros_utils import command_executer
from cros_utils.email_sender import EmailSender
from cros_utils.file_utils import FileUtils
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+import experiment_runner
+import experiment_status
+import machine_manager
+from results_cache import Result
+from results_report import HTMLResultsReport
+from results_report import TextResultsReport
+import test_flag
+
+import config
+
EXPERIMENT_FILE_1 = """
board: parrot
@@ -57,445 +55,513 @@
class FakeLogger(object):
- """Fake logger for tests."""
+ """Fake logger for tests."""
- def __init__(self):
- self.LogOutputCount = 0
- self.LogErrorCount = 0
- self.output_msgs = []
- self.error_msgs = []
- self.dot_count = 0
- self.LogStartDotsCount = 0
- self.LogEndDotsCount = 0
- self.LogAppendDotCount = 0
+ def __init__(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
- def LogOutput(self, msg):
- self.LogOutputCount += 1
- self.output_msgs.append(msg)
+ def LogOutput(self, msg):
+ self.LogOutputCount += 1
+ self.output_msgs.append(msg)
- def LogError(self, msg):
- self.LogErrorCount += 1
- self.error_msgs.append(msg)
+ def LogError(self, msg):
+ self.LogErrorCount += 1
+ self.error_msgs.append(msg)
- def LogStartDots(self):
- self.LogStartDotsCount += 1
- self.dot_count += 1
+ def LogStartDots(self):
+ self.LogStartDotsCount += 1
+ self.dot_count += 1
- def LogAppendDot(self):
- self.LogAppendDotCount += 1
- self.dot_count += 1
+ def LogAppendDot(self):
+ self.LogAppendDotCount += 1
+ self.dot_count += 1
- def LogEndDots(self):
- self.LogEndDotsCount += 1
+ def LogEndDots(self):
+ self.LogEndDotsCount += 1
- def Reset(self):
- self.LogOutputCount = 0
- self.LogErrorCount = 0
- self.output_msgs = []
- self.error_msgs = []
- self.dot_count = 0
- self.LogStartDotsCount = 0
- self.LogEndDotsCount = 0
- self.LogAppendDotCount = 0
+ def Reset(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
class ExperimentRunnerTest(unittest.TestCase):
- """Test for experiment runner class."""
+ """Test for experiment runner class."""
- run_count = 0
- is_complete_count = 0
- mock_logger = FakeLogger()
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ run_count = 0
+ is_complete_count = 0
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- def make_fake_experiment(self):
- test_flag.SetTestMode(True)
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- experiment = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
- return experiment
+ def make_fake_experiment(self):
+ test_flag.SetTestMode(True)
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ return experiment
- @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
- @mock.patch.object(os.path, 'isfile')
+ @mock.patch.object(machine_manager.MachineManager, "AddMachine")
+ @mock.patch.object(os.path, "isfile")
- # pylint: disable=arguments-differ
- def setUp(self, mock_isfile, _mock_addmachine):
- mock_isfile.return_value = True
- self.exp = self.make_fake_experiment()
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isfile, _mock_addmachine):
+ mock_isfile.return_value = True
+ self.exp = self.make_fake_experiment()
- def test_init(self):
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- self.assertFalse(er._terminated)
- self.assertEqual(er.STATUS_TIME_DELAY, 10)
+ def test_init(self):
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ self.assertFalse(er._terminated)
+ self.assertEqual(er.STATUS_TIME_DELAY, 10)
- self.exp.log_level = 'verbose'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- self.assertEqual(er.STATUS_TIME_DELAY, 30)
+ self.exp.log_level = "verbose"
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ self.assertEqual(er.STATUS_TIME_DELAY, 30)
- @mock.patch.object(time, 'time')
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
- @mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString')
- def test_run(self, mock_progress_string, mock_status_string, mock_sleep,
- mock_time):
+ @mock.patch.object(time, "time")
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(experiment_status.ExperimentStatus, "GetStatusString")
+ @mock.patch.object(experiment_status.ExperimentStatus, "GetProgressString")
+ def test_run(
+ self, mock_progress_string, mock_status_string, mock_sleep, mock_time
+ ):
- self.run_count = 0
- self.is_complete_count = 0
- mock_sleep.return_value = None
- # pylint: disable=range-builtin-not-iterating
- mock_time.side_effect = range(1, 50, 1)
+ self.run_count = 0
+ self.is_complete_count = 0
+ mock_sleep.return_value = None
+ # pylint: disable=range-builtin-not-iterating
+ mock_time.side_effect = range(1, 50, 1)
- def reset():
- self.run_count = 0
- self.is_complete_count = 0
+ def reset():
+ self.run_count = 0
+ self.is_complete_count = 0
- def FakeRun():
- self.run_count += 1
- return 0
+ def FakeRun():
+ self.run_count += 1
+ return 0
- def FakeIsComplete():
- self.is_complete_count += 1
- if self.is_complete_count < 6:
- return False
- else:
- return True
+ def FakeIsComplete():
+ self.is_complete_count += 1
+ if self.is_complete_count < 6:
+ return False
+ else:
+ return True
- self.mock_logger.Reset()
- self.exp.Run = FakeRun
- self.exp.IsComplete = FakeIsComplete
+ self.mock_logger.Reset()
+ self.exp.Run = FakeRun
+ self.exp.IsComplete = FakeIsComplete
- # Test 1: log_level == "quiet"
- self.exp.log_level = 'quiet'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
- self.assertEqual(self.mock_logger.dot_count, 2)
- self.assertEqual(mock_progress_string.call_count, 0)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake status string',
- '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
+ # Test 1: log_level == "quiet"
+ self.exp.log_level = "quiet"
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
- # Test 2: log_level == "average"
- self.mock_logger.Reset()
- reset()
- self.exp.log_level = 'average'
- mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
- self.assertEqual(self.mock_logger.dot_count, 2)
- self.assertEqual(mock_progress_string.call_count, 0)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake status string',
- '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
+ # Test 2: log_level == "average"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = "average"
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
- # Test 3: log_level == "verbose"
- self.mock_logger.Reset()
- reset()
- self.exp.log_level = 'verbose'
- mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- mock_progress_string.return_value = 'Fake progress string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 0)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 0)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 0)
- self.assertEqual(self.mock_logger.dot_count, 0)
- self.assertEqual(mock_progress_string.call_count, 2)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake progress string',
- 'Fake status string', '==============================',
- '==============================', 'Fake progress string',
- 'Fake status string', '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
+ # Test 3: log_level == "verbose"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = "verbose"
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ mock_progress_string.return_value = "Fake progress string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 0)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 0)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 0)
+ self.assertEqual(self.mock_logger.dot_count, 0)
+ self.assertEqual(mock_progress_string.call_count, 2)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake progress string",
+ "Fake status string",
+ "==============================",
+ "==============================",
+ "Fake progress string",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
- @mock.patch.object(TextResultsReport, 'GetReport')
- def test_print_table(self, mock_report):
- self.mock_logger.Reset()
- mock_report.return_value = 'This is a fake experiment report.'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er._PrintTable(self.exp)
- self.assertEqual(mock_report.call_count, 1)
- self.assertEqual(self.mock_logger.output_msgs,
- ['This is a fake experiment report.'])
+ @mock.patch.object(TextResultsReport, "GetReport")
+ def test_print_table(self, mock_report):
+ self.mock_logger.Reset()
+ mock_report.return_value = "This is a fake experiment report."
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er._PrintTable(self.exp)
+ self.assertEqual(mock_report.call_count, 1)
+ self.assertEqual(
+ self.mock_logger.output_msgs, ["This is a fake experiment report."]
+ )
- @mock.patch.object(HTMLResultsReport, 'GetReport')
- @mock.patch.object(TextResultsReport, 'GetReport')
- @mock.patch.object(EmailSender, 'Attachment')
- @mock.patch.object(EmailSender, 'SendEmail')
- @mock.patch.object(getpass, 'getuser')
- def test_email(self, mock_getuser, mock_emailer, mock_attachment,
- mock_text_report, mock_html_report):
+ @mock.patch.object(HTMLResultsReport, "GetReport")
+ @mock.patch.object(TextResultsReport, "GetReport")
+ @mock.patch.object(EmailSender, "Attachment")
+ @mock.patch.object(EmailSender, "SendEmail")
+ @mock.patch.object(getpass, "getuser")
+ def test_email(
+ self,
+ mock_getuser,
+ mock_emailer,
+ mock_attachment,
+ mock_text_report,
+ mock_html_report,
+ ):
- mock_getuser.return_value = 'john.smith@google.com'
- mock_text_report.return_value = 'This is a fake text report.'
- mock_html_report.return_value = 'This is a fake html report.'
+ mock_getuser.return_value = "john.smith@google.com"
+ mock_text_report.return_value = "This is a fake text report."
+ mock_html_report.return_value = "This is a fake html report."
- self.mock_logger.Reset()
- config.AddConfig('no_email', True)
- self.exp.email_to = ['jane.doe@google.com']
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- # Test 1. Config:no_email; exp.email_to set ==> no email sent
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 0)
- self.assertEqual(mock_emailer.call_count, 0)
- self.assertEqual(mock_attachment.call_count, 0)
- self.assertEqual(mock_text_report.call_count, 0)
- self.assertEqual(mock_html_report.call_count, 0)
+ self.mock_logger.Reset()
+ config.AddConfig("no_email", True)
+ self.exp.email_to = ["jane.doe@google.com"]
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ # Test 1. Config:no_email; exp.email_to set ==> no email sent
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
- # Test 2. Config: email. exp.email_to set; cache hit. => send email
- self.mock_logger.Reset()
- config.AddConfig('no_email', False)
- for r in self.exp.benchmark_runs:
- r.cache_hit = True
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com'
- ], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+ # Test 2. Config: email. exp.email_to set; cache hit. => send email
+ self.mock_logger.Reset()
+ config.AddConfig("no_email", False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ ["jane.doe@google.com", "john.smith@google.com"],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
- # Test 3. Config: email; exp.mail_to set; no cache hit. => send email
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- config.AddConfig('no_email', False)
- for r in self.exp.benchmark_runs:
- r.cache_hit = False
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- ([
- 'jane.doe@google.com', 'john.smith@google.com',
- 'john.smith@google.com'
- ], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+ # Test 3. Config: email; exp.mail_to set; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ config.AddConfig("no_email", False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = False
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ [
+ "jane.doe@google.com",
+ "john.smith@google.com",
+ "john.smith@google.com",
+ ],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
- # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- self.exp.email_to = []
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- (['john.smith@google.com'], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+ # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ self.exp.email_to = []
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ ["john.smith@google.com"],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
- # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- for r in self.exp.benchmark_runs:
- r.cache_hit = True
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 0)
- self.assertEqual(mock_emailer.call_count, 0)
- self.assertEqual(mock_attachment.call_count, 0)
- self.assertEqual(mock_text_report.call_count, 0)
- self.assertEqual(mock_html_report.call_count, 0)
+ # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
- @mock.patch.object(FileUtils, 'RmDir')
- @mock.patch.object(FileUtils, 'MkDirP')
- @mock.patch.object(FileUtils, 'WriteFile')
- @mock.patch.object(HTMLResultsReport, 'FromExperiment')
- @mock.patch.object(TextResultsReport, 'FromExperiment')
- @mock.patch.object(Result, 'CompressResultsTo')
- @mock.patch.object(Result, 'CopyResultsTo')
- @mock.patch.object(Result, 'CleanUp')
- @mock.patch.object(Result, 'FormatStringTopCommands')
- @mock.patch('builtins.open', new_callable=mock.mock_open)
- def test_store_results(self, mock_open, mock_top_commands, mock_cleanup,
- mock_copy, mock_compress, _mock_text_report,
- mock_report, mock_writefile, mock_mkdir, mock_rmdir):
+ @mock.patch.object(FileUtils, "RmDir")
+ @mock.patch.object(FileUtils, "MkDirP")
+ @mock.patch.object(FileUtils, "WriteFile")
+ @mock.patch.object(HTMLResultsReport, "FromExperiment")
+ @mock.patch.object(TextResultsReport, "FromExperiment")
+ @mock.patch.object(Result, "CompressResultsTo")
+ @mock.patch.object(Result, "CopyResultsTo")
+ @mock.patch.object(Result, "CleanUp")
+ @mock.patch.object(Result, "FormatStringTopCommands")
+ @mock.patch("builtins.open", new_callable=mock.mock_open)
+ def test_store_results(
+ self,
+ mock_open,
+ mock_top_commands,
+ mock_cleanup,
+ mock_copy,
+ mock_compress,
+ _mock_text_report,
+ mock_report,
+ mock_writefile,
+ mock_mkdir,
+ mock_rmdir,
+ ):
- self.mock_logger.Reset()
- self.exp.results_directory = '/usr/local/crosperf-results'
- bench_run = self.exp.benchmark_runs[5]
- bench_path = '/usr/local/crosperf-results/' + ''.join(
- ch for ch in bench_run.name if ch.isalnum())
- self.assertEqual(len(self.exp.benchmark_runs), 6)
+ self.mock_logger.Reset()
+ self.exp.results_directory = "/usr/local/crosperf-results"
+ bench_run = self.exp.benchmark_runs[5]
+ bench_path = "/usr/local/crosperf-results/" + "".join(
+ ch for ch in bench_run.name if ch.isalnum()
+ )
+ self.assertEqual(len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
- # Test 1. Make sure nothing is done if _terminated is true.
- er._terminated = True
- er._StoreResults(self.exp)
- self.assertEqual(mock_cleanup.call_count, 0)
- self.assertEqual(mock_copy.call_count, 0)
- self.assertEqual(mock_compress.call_count, 0)
- self.assertEqual(mock_report.call_count, 0)
- self.assertEqual(mock_writefile.call_count, 0)
- self.assertEqual(mock_mkdir.call_count, 0)
- self.assertEqual(mock_rmdir.call_count, 0)
- self.assertEqual(self.mock_logger.LogOutputCount, 0)
- self.assertEqual(mock_open.call_count, 0)
- self.assertEqual(mock_top_commands.call_count, 0)
+ # Test 1. Make sure nothing is done if _terminated is true.
+ er._terminated = True
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 0)
+ self.assertEqual(mock_copy.call_count, 0)
+ self.assertEqual(mock_compress.call_count, 0)
+ self.assertEqual(mock_report.call_count, 0)
+ self.assertEqual(mock_writefile.call_count, 0)
+ self.assertEqual(mock_mkdir.call_count, 0)
+ self.assertEqual(mock_rmdir.call_count, 0)
+ self.assertEqual(self.mock_logger.LogOutputCount, 0)
+ self.assertEqual(mock_open.call_count, 0)
+ self.assertEqual(mock_top_commands.call_count, 0)
- # Test 2. _terminated is false; everything works properly.
- fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
- 'daisy1')
- for r in self.exp.benchmark_runs:
- r.result = fake_result
- er._terminated = False
- self.exp.compress_results = False
- er._StoreResults(self.exp)
- self.assertEqual(mock_cleanup.call_count, 6)
- mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp)
- self.assertEqual(mock_copy.call_count, 6)
- mock_copy.assert_called_with(bench_path)
- self.assertEqual(mock_writefile.call_count, 3)
- self.assertEqual(len(mock_writefile.call_args_list), 3)
- first_args = mock_writefile.call_args_list[0]
- second_args = mock_writefile.call_args_list[1]
- self.assertEqual(first_args[0][0],
- '/usr/local/crosperf-results/experiment.exp')
- self.assertEqual(second_args[0][0],
- '/usr/local/crosperf-results/results.html')
- self.assertEqual(mock_mkdir.call_count, 1)
- mock_mkdir.assert_called_with('/usr/local/crosperf-results')
- self.assertEqual(mock_rmdir.call_count, 1)
- mock_rmdir.assert_called_with('/usr/local/crosperf-results')
- self.assertEqual(self.mock_logger.LogOutputCount, 5)
- self.assertEqual(self.mock_logger.output_msgs, [
- 'Storing experiment file in /usr/local/crosperf-results.',
- 'Storing top statistics of each benchmark run into'
- ' /usr/local/crosperf-results/topstats.log.',
- 'Storing results of each benchmark run.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- ])
- self.assertEqual(mock_open.call_count, 1)
- # Check write to a topstats.log file.
- mock_open.assert_called_with('/usr/local/crosperf-results/topstats.log',
- 'w')
- mock_open().write.assert_called()
+ # Test 2. _terminated is false; everything works properly.
+ fake_result = Result(
+ self.mock_logger, self.exp.labels[0], "average", "daisy1"
+ )
+ for r in self.exp.benchmark_runs:
+ r.result = fake_result
+ er._terminated = False
+ self.exp.compress_results = False
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 6)
+ mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp)
+ self.assertEqual(mock_copy.call_count, 6)
+ mock_copy.assert_called_with(bench_path)
+ self.assertEqual(mock_writefile.call_count, 3)
+ self.assertEqual(len(mock_writefile.call_args_list), 3)
+ first_args = mock_writefile.call_args_list[0]
+ second_args = mock_writefile.call_args_list[1]
+ self.assertEqual(
+ first_args[0][0], "/usr/local/crosperf-results/experiment.exp"
+ )
+ self.assertEqual(
+ second_args[0][0], "/usr/local/crosperf-results/results.html"
+ )
+ self.assertEqual(mock_mkdir.call_count, 1)
+ mock_mkdir.assert_called_with("/usr/local/crosperf-results")
+ self.assertEqual(mock_rmdir.call_count, 1)
+ mock_rmdir.assert_called_with("/usr/local/crosperf-results")
+ self.assertEqual(self.mock_logger.LogOutputCount, 5)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "Storing experiment file in /usr/local/crosperf-results.",
+ "Storing top statistics of each benchmark run into"
+ " /usr/local/crosperf-results/topstats.log.",
+ "Storing results of each benchmark run.",
+ "Storing results report in /usr/local/crosperf-results.",
+ "Storing email message body in /usr/local/crosperf-results.",
+ ],
+ )
+ self.assertEqual(mock_open.call_count, 1)
+ # Check write to a topstats.log file.
+ mock_open.assert_called_with(
+ "/usr/local/crosperf-results/topstats.log", "w"
+ )
+ mock_open().write.assert_called()
- # Check top calls with no arguments.
- topcalls = [mock.call()] * 6
- self.assertEqual(mock_top_commands.call_args_list, topcalls)
+ # Check top calls with no arguments.
+ topcalls = [mock.call()] * 6
+ self.assertEqual(mock_top_commands.call_args_list, topcalls)
- # Test 3. Test compress_results.
- self.exp.compress_results = True
- mock_copy.call_count = 0
- mock_compress.call_count = 0
- er._StoreResults(self.exp)
- self.assertEqual(mock_copy.call_count, 0)
- mock_copy.assert_called_with(bench_path)
- self.assertEqual(mock_compress.call_count, 6)
- mock_compress.assert_called_with(bench_path)
+ # Test 3. Test compress_results.
+ self.exp.compress_results = True
+ mock_copy.call_count = 0
+ mock_compress.call_count = 0
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_copy.call_count, 0)
+ mock_copy.assert_called_with(bench_path)
+ self.assertEqual(mock_compress.call_count, 6)
+ mock_compress.assert_called_with(bench_path)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 2ac47c7..fa6b1ee 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The class to show the banner."""
-from __future__ import division
-from __future__ import print_function
import collections
import datetime
@@ -14,136 +12,156 @@
class ExperimentStatus(object):
- """The status class."""
+ """The status class."""
- def __init__(self, experiment):
- self.experiment = experiment
- self.num_total = len(self.experiment.benchmark_runs)
- self.completed = 0
- self.new_job_start_time = time.time()
- self.log_level = experiment.log_level
+ def __init__(self, experiment):
+ self.experiment = experiment
+ self.num_total = len(self.experiment.benchmark_runs)
+ self.completed = 0
+ self.new_job_start_time = time.time()
+ self.log_level = experiment.log_level
- def _GetProgressBar(self, num_complete, num_total):
- ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
- bar_length = 50
- done_char = '>'
- undone_char = ' '
- num_complete_chars = bar_length * num_complete // num_total
- num_undone_chars = bar_length - num_complete_chars
- ret += ' [%s%s]' % (num_complete_chars * done_char,
- num_undone_chars * undone_char)
- return ret
+ def _GetProgressBar(self, num_complete, num_total):
+ ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
+ bar_length = 50
+ done_char = ">"
+ undone_char = " "
+ num_complete_chars = bar_length * num_complete // num_total
+ num_undone_chars = bar_length - num_complete_chars
+ ret += " [%s%s]" % (
+ num_complete_chars * done_char,
+ num_undone_chars * undone_char,
+ )
+ return ret
- def GetProgressString(self):
- """Get the elapsed_time, ETA."""
- current_time = time.time()
- if self.experiment.start_time:
- elapsed_time = current_time - self.experiment.start_time
- else:
- elapsed_time = 0
- try:
- if self.completed != self.experiment.num_complete:
- self.completed = self.experiment.num_complete
- self.new_job_start_time = current_time
- time_completed_jobs = (
- elapsed_time - (current_time - self.new_job_start_time))
- # eta is calculated as:
- # ETA = (num_jobs_not_yet_started * estimated_time_per_job)
- # + time_left_for_current_job
- #
- # where
- # num_jobs_not_yet_started = (num_total - num_complete - 1)
- #
- # estimated_time_per_job = time_completed_jobs / num_run_complete
- #
- # time_left_for_current_job = estimated_time_per_job -
- # time_spent_so_far_on_current_job
- #
- # The biggest problem with this calculation is its assumption that
- # all jobs have roughly the same running time (blatantly false!).
- #
- # ETA can come out negative if the time spent on the current job is
- # greater than the estimated time per job (e.g. you're running the
- # first long job, after a series of short jobs). For now, if that
- # happens, we set the ETA to "Unknown."
- #
- eta_seconds = (
- float(self.num_total - self.experiment.num_complete - 1) *
- time_completed_jobs / self.experiment.num_run_complete +
- (time_completed_jobs / self.experiment.num_run_complete -
- (current_time - self.new_job_start_time)))
+ def GetProgressString(self):
+ """Get the elapsed_time, ETA."""
+ current_time = time.time()
+ if self.experiment.start_time:
+ elapsed_time = current_time - self.experiment.start_time
+ else:
+ elapsed_time = 0
+ try:
+ if self.completed != self.experiment.num_complete:
+ self.completed = self.experiment.num_complete
+ self.new_job_start_time = current_time
+ time_completed_jobs = elapsed_time - (
+ current_time - self.new_job_start_time
+ )
+ # eta is calculated as:
+ # ETA = (num_jobs_not_yet_started * estimated_time_per_job)
+ # + time_left_for_current_job
+ #
+ # where
+ # num_jobs_not_yet_started = (num_total - num_complete - 1)
+ #
+ # estimated_time_per_job = time_completed_jobs / num_run_complete
+ #
+ # time_left_for_current_job = estimated_time_per_job -
+ # time_spent_so_far_on_current_job
+ #
+ # The biggest problem with this calculation is its assumption that
+ # all jobs have roughly the same running time (blatantly false!).
+ #
+ # ETA can come out negative if the time spent on the current job is
+ # greater than the estimated time per job (e.g. you're running the
+ # first long job, after a series of short jobs). For now, if that
+ # happens, we set the ETA to "Unknown."
+ #
+ eta_seconds = float(
+ self.num_total - self.experiment.num_complete - 1
+ ) * time_completed_jobs / self.experiment.num_run_complete + (
+ time_completed_jobs / self.experiment.num_run_complete
+ - (current_time - self.new_job_start_time)
+ )
- eta_seconds = int(eta_seconds)
- if eta_seconds > 0:
- eta = datetime.timedelta(seconds=eta_seconds)
- else:
- eta = 'Unknown'
- except ZeroDivisionError:
- eta = 'Unknown'
- strings = []
- strings.append('Current time: %s Elapsed: %s ETA: %s' %
- (datetime.datetime.now(),
- datetime.timedelta(seconds=int(elapsed_time)), eta))
- strings.append(
- self._GetProgressBar(self.experiment.num_complete, self.num_total))
- return '\n'.join(strings)
+ eta_seconds = int(eta_seconds)
+ if eta_seconds > 0:
+ eta = datetime.timedelta(seconds=eta_seconds)
+ else:
+ eta = "Unknown"
+ except ZeroDivisionError:
+ eta = "Unknown"
+ strings = []
+ strings.append(
+ "Current time: %s Elapsed: %s ETA: %s"
+ % (
+ datetime.datetime.now(),
+ datetime.timedelta(seconds=int(elapsed_time)),
+ eta,
+ )
+ )
+ strings.append(
+ self._GetProgressBar(self.experiment.num_complete, self.num_total)
+ )
+ return "\n".join(strings)
- def GetStatusString(self):
- """Get the status string of all the benchmark_runs."""
- status_bins = collections.defaultdict(list)
- for benchmark_run in self.experiment.benchmark_runs:
- status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
+ def GetStatusString(self):
+ """Get the status string of all the benchmark_runs."""
+ status_bins = collections.defaultdict(list)
+ for benchmark_run in self.experiment.benchmark_runs:
+ status_bins[benchmark_run.timeline.GetLastEvent()].append(
+ benchmark_run
+ )
- status_strings = []
- for key, val in status_bins.items():
- if key == 'RUNNING':
- get_description = self._GetNamesAndIterations
- else:
- get_description = self._GetCompactNamesAndIterations
- status_strings.append('%s: %s' % (key, get_description(val)))
+ status_strings = []
+ for key, val in status_bins.items():
+ if key == "RUNNING":
+ get_description = self._GetNamesAndIterations
+ else:
+ get_description = self._GetCompactNamesAndIterations
+ status_strings.append("%s: %s" % (key, get_description(val)))
- thread_status = ''
- thread_status_format = 'Thread Status: \n{}\n'
- if (self.experiment.schedv2() is None and
- self.experiment.log_level == 'verbose'):
- # Add the machine manager status.
- thread_status = thread_status_format.format(
- self.experiment.machine_manager.AsString())
- elif self.experiment.schedv2():
- # In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(
- self.experiment.schedv2().threads_status_as_string())
+ thread_status = ""
+ thread_status_format = "Thread Status: \n{}\n"
+ if (
+ self.experiment.schedv2() is None
+ and self.experiment.log_level == "verbose"
+ ):
+ # Add the machine manager status.
+ thread_status = thread_status_format.format(
+ self.experiment.machine_manager.AsString()
+ )
+ elif self.experiment.schedv2():
+ # In schedv2 mode, we always print out thread status.
+ thread_status = thread_status_format.format(
+ self.experiment.schedv2().threads_status_as_string()
+ )
- result = '{}{}'.format(thread_status, '\n'.join(status_strings))
+ result = "{}{}".format(thread_status, "\n".join(status_strings))
- return result
+ return result
- def _GetNamesAndIterations(self, benchmark_runs):
- strings = []
- t = time.time()
- for benchmark_run in benchmark_runs:
- t_last = benchmark_run.timeline.GetLastEventTime()
- elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
- strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
- return ' %s (%s)' % (len(strings), ', '.join(strings))
+ def _GetNamesAndIterations(self, benchmark_runs):
+ strings = []
+ t = time.time()
+ for benchmark_run in benchmark_runs:
+ t_last = benchmark_run.timeline.GetLastEventTime()
+ elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
+ strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
+ return " %s (%s)" % (len(strings), ", ".join(strings))
- def _GetCompactNamesAndIterations(self, benchmark_runs):
- grouped_benchmarks = collections.defaultdict(list)
- for benchmark_run in benchmark_runs:
- grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
+ def _GetCompactNamesAndIterations(self, benchmark_runs):
+ grouped_benchmarks = collections.defaultdict(list)
+ for benchmark_run in benchmark_runs:
+ grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
- output_segs = []
- for label_name, label_runs in grouped_benchmarks.items():
- strings = []
- benchmark_iterations = collections.defaultdict(list)
- for benchmark_run in label_runs:
- assert benchmark_run.label.name == label_name
- benchmark_name = benchmark_run.benchmark.name
- benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
- for key, val in benchmark_iterations.items():
- val.sort()
- iterations = ','.join(str(v) for v in val)
- strings.append('{} [{}]'.format(key, iterations))
- output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n')
+ output_segs = []
+ for label_name, label_runs in grouped_benchmarks.items():
+ strings = []
+ benchmark_iterations = collections.defaultdict(list)
+ for benchmark_run in label_runs:
+ assert benchmark_run.label.name == label_name
+ benchmark_name = benchmark_run.benchmark.name
+ benchmark_iterations[benchmark_name].append(
+ benchmark_run.iteration
+ )
+ for key, val in benchmark_iterations.items():
+ val.sort()
+ iterations = ",".join(str(v) for v in val)
+ strings.append("{} [{}]".format(key, iterations))
+ output_segs.append(
+ " " + label_name + ": " + ", ".join(strings) + "\n"
+ )
- return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
+ return " %s \n%s" % (len(benchmark_runs), "".join(output_segs))
diff --git a/crosperf/field.py b/crosperf/field.py
index f6300f9..6b5ea11 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,150 +7,161 @@
class Field(object):
- """Class representing a Field in an experiment file."""
+ """Class representing a Field in an experiment file."""
- def __init__(self, name, required, default, inheritable, description):
- self.name = name
- self.required = required
- self.assigned = False
- self.default = default
- self._value = default
- self.inheritable = inheritable
- self.description = description
+ def __init__(self, name, required, default, inheritable, description):
+ self.name = name
+ self.required = required
+ self.assigned = False
+ self.default = default
+ self._value = default
+ self.inheritable = inheritable
+ self.description = description
- def Set(self, value, parse=True):
- if parse:
- self._value = self._Parse(value)
- else:
- self._value = value
- self.assigned = True
+ def Set(self, value, parse=True):
+ if parse:
+ self._value = self._Parse(value)
+ else:
+ self._value = value
+ self.assigned = True
- def Append(self, value):
- self._value += self._Parse(value)
- self.assigned = True
+ def Append(self, value):
+ self._value += self._Parse(value)
+ self.assigned = True
- def _Parse(self, value):
- return value
+ def _Parse(self, value):
+ return value
- def Get(self):
- return self._value
+ def Get(self):
+ return self._value
- def GetString(self):
- return str(self._value)
+ def GetString(self):
+ return str(self._value)
class TextField(Field):
- """Class of text field."""
+ """Class of text field."""
- def __init__(self,
- name,
- required=False,
- default='',
- inheritable=False,
- description=''):
- super(TextField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self,
+ name,
+ required=False,
+ default="",
+ inheritable=False,
+ description="",
+ ):
+ super(TextField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return str(value)
+ def _Parse(self, value):
+ return str(value)
class BooleanField(Field):
- """Class of boolean field."""
+ """Class of boolean field."""
- def __init__(self,
- name,
- required=False,
- default=False,
- inheritable=False,
- description=''):
- super(BooleanField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self,
+ name,
+ required=False,
+ default=False,
+ inheritable=False,
+ description="",
+ ):
+ super(BooleanField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- if value.lower() == 'true':
- return True
- elif value.lower() == 'false':
- return False
- raise TypeError(
- "Invalid value for '%s'. Must be true or false." % self.name)
+ def _Parse(self, value):
+ if value.lower() == "true":
+ return True
+ elif value.lower() == "false":
+ return False
+ raise TypeError(
+ "Invalid value for '%s'. Must be true or false." % self.name
+ )
class IntegerField(Field):
- """Class of integer field."""
+ """Class of integer field."""
- def __init__(self,
- name,
- required=False,
- default=0,
- inheritable=False,
- description=''):
- super(IntegerField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self, name, required=False, default=0, inheritable=False, description=""
+ ):
+ super(IntegerField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return int(value)
+ def _Parse(self, value):
+ return int(value)
class FloatField(Field):
- """Class of float field."""
+ """Class of float field."""
- def __init__(self,
- name,
- required=False,
- default=0,
- inheritable=False,
- description=''):
- super(FloatField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self, name, required=False, default=0, inheritable=False, description=""
+ ):
+ super(FloatField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return float(value)
+ def _Parse(self, value):
+ return float(value)
class ListField(Field):
- """Class of list field."""
+ """Class of list field."""
- def __init__(self,
- name,
- required=False,
- default=None,
- inheritable=False,
- description=''):
- super(ListField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self,
+ name,
+ required=False,
+ default=None,
+ inheritable=False,
+ description="",
+ ):
+ super(ListField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return value.split()
+ def _Parse(self, value):
+ return value.split()
- def GetString(self):
- return ' '.join(self._value)
+ def GetString(self):
+ return " ".join(self._value)
- def Append(self, value):
- v = self._Parse(value)
- if not self._value:
- self._value = v
- else:
- self._value += v
- self.assigned = True
+ def Append(self, value):
+ v = self._Parse(value)
+ if not self._value:
+ self._value = v
+ else:
+ self._value += v
+ self.assigned = True
class EnumField(Field):
- """Class of enum field."""
+ """Class of enum field."""
- def __init__(self,
- name,
- options,
- required=False,
- default='',
- inheritable=False,
- description=''):
- super(EnumField, self).__init__(name, required, default, inheritable,
- description)
- self.options = options
+ def __init__(
+ self,
+ name,
+ options,
+ required=False,
+ default="",
+ inheritable=False,
+ description="",
+ ):
+ super(EnumField, self).__init__(
+ name, required, default, inheritable, description
+ )
+ self.options = options
- def _Parse(self, value):
- if value not in self.options:
- raise TypeError("Invalid enum value for field '%s'. Must be one of (%s)" %
- (self.name, ', '.join(self.options)))
- return str(value)
+ def _Parse(self, value):
+ if value not in self.options:
+ raise TypeError(
+ "Invalid enum value for field '%s'. Must be one of (%s)"
+ % (self.name, ", ".join(self.options))
+ )
+ return str(value)
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index 1e77c8a..024849c 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unittest of flags."""
-from __future__ import print_function
import unittest
@@ -14,32 +13,32 @@
class FlagTestCase(unittest.TestCase):
- """The unittest class."""
+ """The unittest class."""
- def test_test_flag(self):
- # Verify that test_flag.is_test exists, that it is a list,
- # and that it contains 1 element.
- self.assertTrue(isinstance(test_flag.is_test, list))
- self.assertEqual(len(test_flag.is_test), 1)
+ def test_test_flag(self):
+ # Verify that test_flag.is_test exists, that it is a list,
+ # and that it contains 1 element.
+ self.assertTrue(isinstance(test_flag.is_test, list))
+ self.assertEqual(len(test_flag.is_test), 1)
- # Verify that the getting the flag works and that the flag
- # contains False, its starting value.
- save_flag = test_flag.GetTestMode()
- self.assertFalse(save_flag)
+ # Verify that the getting the flag works and that the flag
+ # contains False, its starting value.
+ save_flag = test_flag.GetTestMode()
+ self.assertFalse(save_flag)
- # Verify that setting the flat to True, then getting it, works.
- test_flag.SetTestMode(True)
- self.assertTrue(test_flag.GetTestMode())
+ # Verify that setting the flat to True, then getting it, works.
+ test_flag.SetTestMode(True)
+ self.assertTrue(test_flag.GetTestMode())
- # Verify that setting the flag to False, then getting it, works.
- test_flag.SetTestMode(save_flag)
- self.assertFalse(test_flag.GetTestMode())
+ # Verify that setting the flag to False, then getting it, works.
+ test_flag.SetTestMode(save_flag)
+ self.assertFalse(test_flag.GetTestMode())
- # Verify that test_flag.is_test still exists, that it still is a
- # list, and that it still contains 1 element.
- self.assertTrue(isinstance(test_flag.is_test, list))
- self.assertEqual(len(test_flag.is_test), 1)
+ # Verify that test_flag.is_test still exists, that it still is a
+ # list, and that it still contains 1 element.
+ self.assertTrue(isinstance(test_flag.is_test, list))
+ self.assertEqual(len(test_flag.is_test), 1)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
index bae365d..55c1321 100755
--- a/crosperf/generate_report.py
+++ b/crosperf/generate_report.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -44,8 +44,6 @@
and 1.423ms. None of the runs failed to complete.
"""
-from __future__ import division
-from __future__ import print_function
import argparse
import functools
@@ -61,223 +59,248 @@
def CountBenchmarks(benchmark_runs):
- """Counts the number of iterations for each benchmark in benchmark_runs."""
+ """Counts the number of iterations for each benchmark in benchmark_runs."""
- # Example input for benchmark_runs:
- # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
- def _MaxLen(results):
- return 0 if not results else max(len(r) for r in results)
+ # Example input for benchmark_runs:
+ # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
+ def _MaxLen(results):
+ return 0 if not results else max(len(r) for r in results)
- return [(name, _MaxLen(results)) for name, results in benchmark_runs.items()]
+ return [
+ (name, _MaxLen(results)) for name, results in benchmark_runs.items()
+ ]
def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
- """Limits the given benchmark results to max_keys keys in-place.
+ """Limits the given benchmark results to max_keys keys in-place.
- This takes the `data` field from the benchmark input, and mutates each
- benchmark run to contain `max_keys` elements (ignoring special elements, like
- "retval"). At the moment, it just selects the first `max_keys` keyvals,
- alphabetically.
+ This takes the `data` field from the benchmark input, and mutates each
+ benchmark run to contain `max_keys` elements (ignoring special elements, like
+ "retval"). At the moment, it just selects the first `max_keys` keyvals,
+ alphabetically.
- If complain_on_update is true, this will print a message noting that a
- truncation occurred.
+ If complain_on_update is true, this will print a message noting that a
+ truncation occurred.
- This returns the `results` object that was passed in, for convenience.
+ This returns the `results` object that was passed in, for convenience.
- e.g.
- >>> benchmark_data = {
- ... "bench_draw_line": [
- ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
- ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
- ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
- ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
- ... ]
- ... }
- >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
- {
- 'bench_draw_line': [
- [{'memory (mb)': 128.1, 'retval': 0},
- {'memory (mb)': 128.4, 'retval': 0}],
- [{'memory (mb)': 124.3, 'retval': 0},
- {'memory (mb)': 123.9, 'retval': 0}]
- ]
- }
- """
- actually_updated = False
- for bench_results in results.values():
- for platform_results in bench_results:
- for i, result in enumerate(platform_results):
- # Keep the keys that come earliest when sorted alphabetically.
- # Forcing alphabetical order is arbitrary, but necessary; otherwise,
- # the keyvals we'd emit would depend on our iteration order through a
- # map.
- removable_keys = sorted(k for k in result if k != 'retval')
- retained_keys = removable_keys[:max_keys]
- platform_results[i] = {k: result[k] for k in retained_keys}
- # retval needs to be passed through all of the time.
- retval = result.get('retval')
- if retval is not None:
- platform_results[i]['retval'] = retval
- actually_updated = actually_updated or \
- len(retained_keys) != len(removable_keys)
+ e.g.
+ >>> benchmark_data = {
+ ... "bench_draw_line": [
+ ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
+ ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
+ ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
+ ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
+ ... ]
+ ... }
+ >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
+ {
+ 'bench_draw_line': [
+ [{'memory (mb)': 128.1, 'retval': 0},
+ {'memory (mb)': 128.4, 'retval': 0}],
+ [{'memory (mb)': 124.3, 'retval': 0},
+ {'memory (mb)': 123.9, 'retval': 0}]
+ ]
+ }
+ """
+ actually_updated = False
+ for bench_results in results.values():
+ for platform_results in bench_results:
+ for i, result in enumerate(platform_results):
+ # Keep the keys that come earliest when sorted alphabetically.
+ # Forcing alphabetical order is arbitrary, but necessary; otherwise,
+ # the keyvals we'd emit would depend on our iteration order through a
+ # map.
+ removable_keys = sorted(k for k in result if k != "retval")
+ retained_keys = removable_keys[:max_keys]
+ platform_results[i] = {k: result[k] for k in retained_keys}
+ # retval needs to be passed through all of the time.
+ retval = result.get("retval")
+ if retval is not None:
+ platform_results[i]["retval"] = retval
+ actually_updated = actually_updated or len(
+ retained_keys
+ ) != len(removable_keys)
- if actually_updated and complain_on_update:
- print(
- 'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
- return results
+ if actually_updated and complain_on_update:
+ print(
+ "Warning: Some benchmark keyvals have been truncated.",
+ file=sys.stderr,
+ )
+ return results
def _PositiveInt(s):
- i = int(s)
- if i < 0:
- raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
- return i
+ i = int(s)
+ if i < 0:
+ raise argparse.ArgumentTypeError("%d is not a positive integer." % (i,))
+ return i
def _AccumulateActions(args):
- """Given program arguments, determines what actions we want to run.
+ """Given program arguments, determines what actions we want to run.
- Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
- ResultsReport, and the str is the file extension for the given report.
- """
- results = []
- # The order of these is arbitrary.
- if args.json:
- results.append((JSONResultsReport, 'json'))
- if args.text:
- results.append((TextResultsReport, 'txt'))
- if args.email:
- email_ctor = functools.partial(TextResultsReport, email=True)
- results.append((email_ctor, 'email'))
- # We emit HTML if nothing else was specified.
- if args.html or not results:
- results.append((HTMLResultsReport, 'html'))
- return results
+ Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
+ ResultsReport, and the str is the file extension for the given report.
+ """
+ results = []
+ # The order of these is arbitrary.
+ if args.json:
+ results.append((JSONResultsReport, "json"))
+ if args.text:
+ results.append((TextResultsReport, "txt"))
+ if args.email:
+ email_ctor = functools.partial(TextResultsReport, email=True)
+ results.append((email_ctor, "email"))
+ # We emit HTML if nothing else was specified.
+ if args.html or not results:
+ results.append((HTMLResultsReport, "html"))
+ return results
# Note: get_contents is a function, because it may be expensive (generating some
# HTML reports takes O(seconds) on my machine, depending on the size of the
# input data).
def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
- """Writes `contents` to a file named "${output_prefix}.${extension}".
+ """Writes `contents` to a file named "${output_prefix}.${extension}".
- get_contents should be a zero-args function that returns a string (of the
- contents to write).
- If output_prefix == '-', this writes to stdout.
- If overwrite is False, this will not overwrite files.
- """
- if output_prefix == '-':
- if verbose:
- print('Writing %s report to stdout' % (extension,), file=sys.stderr)
- sys.stdout.write(get_contents())
- return
+ get_contents should be a zero-args function that returns a string (of the
+ contents to write).
+ If output_prefix == '-', this writes to stdout.
+ If overwrite is False, this will not overwrite files.
+ """
+ if output_prefix == "-":
+ if verbose:
+ print("Writing %s report to stdout" % (extension,), file=sys.stderr)
+ sys.stdout.write(get_contents())
+ return
- file_name = '%s.%s' % (output_prefix, extension)
- if not overwrite and os.path.exists(file_name):
- raise IOError('Refusing to write %s -- it already exists' % (file_name,))
+ file_name = "%s.%s" % (output_prefix, extension)
+ if not overwrite and os.path.exists(file_name):
+ raise IOError(
+ "Refusing to write %s -- it already exists" % (file_name,)
+ )
- with open(file_name, 'w') as out_file:
- if verbose:
- print('Writing %s report to %s' % (extension, file_name), file=sys.stderr)
- out_file.write(get_contents())
+ with open(file_name, "w") as out_file:
+ if verbose:
+ print(
+ "Writing %s report to %s" % (extension, file_name),
+ file=sys.stderr,
+ )
+ out_file.write(get_contents())
def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
- """Runs `actions`, returning True if all succeeded."""
- failed = False
+ """Runs `actions`, returning True if all succeeded."""
+ failed = False
- report_ctor = None # Make the linter happy
- for report_ctor, extension in actions:
- try:
- get_contents = lambda: report_ctor(benchmark_results).GetReport()
- WriteFile(output_prefix, extension, get_contents, overwrite, verbose)
- except Exception:
- # Complain and move along; we may have more actions that might complete
- # successfully.
- failed = True
- traceback.print_exc()
- return not failed
+ report_ctor = None # Make the linter happy
+ for report_ctor, extension in actions:
+ try:
+ get_contents = lambda: report_ctor(benchmark_results).GetReport()
+ WriteFile(
+ output_prefix, extension, get_contents, overwrite, verbose
+ )
+ except Exception:
+ # Complain and move along; we may have more actions that might complete
+ # successfully.
+ failed = True
+ traceback.print_exc()
+ return not failed
def PickInputFile(input_name):
- """Given program arguments, returns file to read for benchmark input."""
- return sys.stdin if input_name == '-' else open(input_name)
+ """Given program arguments, returns file to read for benchmark input."""
+ return sys.stdin if input_name == "-" else open(input_name)
def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
- return {}
+ return {}
def _ParseArgs(argv):
- parser = argparse.ArgumentParser(description='Turns JSON into results '
- 'report(s).')
- parser.add_argument(
- '-v',
- '--verbose',
- action='store_true',
- help='Be a tiny bit more verbose.')
- parser.add_argument(
- '-f',
- '--force',
- action='store_true',
- help='Overwrite existing results files.')
- parser.add_argument(
- '-o',
- '--output',
- default='report',
- type=str,
- help='Prefix of the output filename (default: report). '
- '- means stdout.')
- parser.add_argument(
- '-i',
- '--input',
- required=True,
- type=str,
- help='Where to read the JSON from. - means stdin.')
- parser.add_argument(
- '-l',
- '--statistic-limit',
- default=0,
- type=_PositiveInt,
- help='The maximum number of benchmark statistics to '
- 'display from a single run. 0 implies unlimited.')
- parser.add_argument(
- '--json', action='store_true', help='Output a JSON report.')
- parser.add_argument(
- '--text', action='store_true', help='Output a text report.')
- parser.add_argument(
- '--email',
- action='store_true',
- help='Output a text report suitable for email.')
- parser.add_argument(
- '--html',
- action='store_true',
- help='Output an HTML report (this is the default if no '
- 'other output format is specified).')
- return parser.parse_args(argv)
+ parser = argparse.ArgumentParser(
+ description="Turns JSON into results " "report(s)."
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Be a tiny bit more verbose.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ help="Overwrite existing results files.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ default="report",
+ type=str,
+ help="Prefix of the output filename (default: report). "
+ "- means stdout.",
+ )
+ parser.add_argument(
+ "-i",
+ "--input",
+ required=True,
+ type=str,
+ help="Where to read the JSON from. - means stdin.",
+ )
+ parser.add_argument(
+ "-l",
+ "--statistic-limit",
+ default=0,
+ type=_PositiveInt,
+ help="The maximum number of benchmark statistics to "
+ "display from a single run. 0 implies unlimited.",
+ )
+ parser.add_argument(
+ "--json", action="store_true", help="Output a JSON report."
+ )
+ parser.add_argument(
+ "--text", action="store_true", help="Output a text report."
+ )
+ parser.add_argument(
+ "--email",
+ action="store_true",
+ help="Output a text report suitable for email.",
+ )
+ parser.add_argument(
+ "--html",
+ action="store_true",
+ help="Output an HTML report (this is the default if no "
+ "other output format is specified).",
+ )
+ return parser.parse_args(argv)
def Main(argv):
- args = _ParseArgs(argv)
- with PickInputFile(args.input) as in_file:
- raw_results = json.load(in_file)
+ args = _ParseArgs(argv)
+ with PickInputFile(args.input) as in_file:
+ raw_results = json.load(in_file)
- platform_names = raw_results['platforms']
- results = raw_results['data']
- if args.statistic_limit:
- results = CutResultsInPlace(results, max_keys=args.statistic_limit)
- benches = CountBenchmarks(results)
- # In crosperf, a label is essentially a platform+configuration. So, a name of
- # a label and a name of a platform are equivalent for our purposes.
- bench_results = BenchmarkResults(
- label_names=platform_names,
- benchmark_names_and_iterations=benches,
- run_keyvals=results,
- read_perf_report=_NoPerfReport)
- actions = _AccumulateActions(args)
- ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
- return 0 if ok else 1
+ platform_names = raw_results["platforms"]
+ results = raw_results["data"]
+ if args.statistic_limit:
+ results = CutResultsInPlace(results, max_keys=args.statistic_limit)
+ benches = CountBenchmarks(results)
+ # In crosperf, a label is essentially a platform+configuration. So, a name of
+ # a label and a name of a platform are equivalent for our purposes.
+ bench_results = BenchmarkResults(
+ label_names=platform_names,
+ benchmark_names_and_iterations=benches,
+ run_keyvals=results,
+ read_perf_report=_NoPerfReport,
+ )
+ actions = _AccumulateActions(args)
+ ok = RunActions(
+ actions, bench_results, args.output, args.force, args.verbose
+ )
+ return 0 if ok else 1
-if __name__ == '__main__':
- sys.exit(Main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv[1:]))
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
index 8c3510a..86bbc16 100755
--- a/crosperf/generate_report_unittest.py
+++ b/crosperf/generate_report_unittest.py
@@ -1,13 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test for generate_report.py."""
-from __future__ import division
-from __future__ import print_function
import copy
import json
@@ -18,161 +16,159 @@
import results_report
import test_flag
+
# pylint: disable=deprecated-module
try:
- from StringIO import StringIO # for Python 2
+ from StringIO import StringIO # for Python 2
except ImportError:
- from io import StringIO # for Python 3
+ from io import StringIO # for Python 3
class _ContextualStringIO(StringIO):
- """StringIO that can be used in `with` statements."""
+ """StringIO that can be used in `with` statements."""
- def __init__(self, *args):
- StringIO.__init__(self, *args)
+ def __init__(self, *args):
+ StringIO.__init__(self, *args)
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, _type, _value, _traceback):
- pass
+ def __exit__(self, _type, _value, _traceback):
+ pass
class GenerateReportTests(unittest.TestCase):
- """Tests for generate_report.py."""
+ """Tests for generate_report.py."""
- def testCountBenchmarks(self):
- runs = {
- 'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
- 'bar': [],
- 'baz': [[], [{}], [{}, {}, {}]]
- }
- results = generate_report.CountBenchmarks(runs)
- expected_results = [('foo', 4), ('bar', 0), ('baz', 3)]
- self.assertCountEqual(expected_results, results)
+ def testCountBenchmarks(self):
+ runs = {
+ "foo": [[{}, {}, {}], [{}, {}, {}, {}]],
+ "bar": [],
+ "baz": [[], [{}], [{}, {}, {}]],
+ }
+ results = generate_report.CountBenchmarks(runs)
+ expected_results = [("foo", 4), ("bar", 0), ("baz", 3)]
+ self.assertCountEqual(expected_results, results)
- def testCutResultsInPlace(self):
- bench_data = {
- 'foo': [[{
- 'a': 1,
- 'b': 2,
- 'c': 3
- }, {
- 'a': 3,
- 'b': 2.5,
- 'c': 1
- }]],
- 'bar': [[{
- 'd': 11,
- 'e': 12,
- 'f': 13
- }]],
- 'baz': [[{
- 'g': 12,
- 'h': 13
- }]],
- 'qux': [[{
- 'i': 11
- }]],
- }
- original_bench_data = copy.deepcopy(bench_data)
+ def testCutResultsInPlace(self):
+ bench_data = {
+ "foo": [[{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 2.5, "c": 1}]],
+ "bar": [[{"d": 11, "e": 12, "f": 13}]],
+ "baz": [[{"g": 12, "h": 13}]],
+ "qux": [[{"i": 11}]],
+ }
+ original_bench_data = copy.deepcopy(bench_data)
- max_keys = 2
- results = generate_report.CutResultsInPlace(
- bench_data, max_keys=max_keys, complain_on_update=False)
- # Cuts should be in-place.
- self.assertIs(results, bench_data)
- self.assertCountEqual(
- list(original_bench_data.keys()), list(bench_data.keys()))
- for bench_name, original_runs in original_bench_data.items():
- bench_runs = bench_data[bench_name]
- self.assertEqual(len(original_runs), len(bench_runs))
- # Order of these sub-lists shouldn't have changed.
- for original_list, new_list in zip(original_runs, bench_runs):
- self.assertEqual(len(original_list), len(new_list))
- for original_keyvals, sub_keyvals in zip(original_list, new_list):
- # sub_keyvals must be a subset of original_keyvals
- self.assertDictContainsSubset(sub_keyvals, original_keyvals)
+ max_keys = 2
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=max_keys, complain_on_update=False
+ )
+ # Cuts should be in-place.
+ self.assertIs(results, bench_data)
+ self.assertCountEqual(
+ list(original_bench_data.keys()), list(bench_data.keys())
+ )
+ for bench_name, original_runs in original_bench_data.items():
+ bench_runs = bench_data[bench_name]
+ self.assertEqual(len(original_runs), len(bench_runs))
+ # Order of these sub-lists shouldn't have changed.
+ for original_list, new_list in zip(original_runs, bench_runs):
+ self.assertEqual(len(original_list), len(new_list))
+ for original_keyvals, sub_keyvals in zip(
+ original_list, new_list
+ ):
+ # sub_keyvals must be a subset of original_keyvals
+ self.assertDictContainsSubset(sub_keyvals, original_keyvals)
- def testCutResultsInPlaceLeavesRetval(self):
- bench_data = {
- 'foo': [[{
- 'retval': 0,
- 'a': 1
- }]],
- 'bar': [[{
- 'retval': 1
- }]],
- 'baz': [[{
- 'RETVAL': 1
- }]],
- }
- results = generate_report.CutResultsInPlace(
- bench_data, max_keys=0, complain_on_update=False)
- # Just reach into results assuming we know it otherwise outputs things in
- # the expected way. If it doesn't, testCutResultsInPlace should give an
- # indication as to what, exactly, is broken.
- self.assertEqual(list(results['foo'][0][0].items()), [('retval', 0)])
- self.assertEqual(list(results['bar'][0][0].items()), [('retval', 1)])
- self.assertEqual(list(results['baz'][0][0].items()), [])
+ def testCutResultsInPlaceLeavesRetval(self):
+ bench_data = {
+ "foo": [[{"retval": 0, "a": 1}]],
+ "bar": [[{"retval": 1}]],
+ "baz": [[{"RETVAL": 1}]],
+ }
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=0, complain_on_update=False
+ )
+ # Just reach into results assuming we know it otherwise outputs things in
+ # the expected way. If it doesn't, testCutResultsInPlace should give an
+ # indication as to what, exactly, is broken.
+ self.assertEqual(list(results["foo"][0][0].items()), [("retval", 0)])
+ self.assertEqual(list(results["bar"][0][0].items()), [("retval", 1)])
+ self.assertEqual(list(results["baz"][0][0].items()), [])
- def _RunMainWithInput(self, args, input_obj):
- assert '-i' not in args
- args += ['-i', '-']
- input_buf = _ContextualStringIO(json.dumps(input_obj))
- with mock.patch('generate_report.PickInputFile', return_value=input_buf) \
- as patched_pick:
- result = generate_report.Main(args)
- patched_pick.assert_called_once_with('-')
- return result
+ def _RunMainWithInput(self, args, input_obj):
+ assert "-i" not in args
+ args += ["-i", "-"]
+ input_buf = _ContextualStringIO(json.dumps(input_obj))
+ with mock.patch(
+ "generate_report.PickInputFile", return_value=input_buf
+ ) as patched_pick:
+ result = generate_report.Main(args)
+ patched_pick.assert_called_once_with("-")
+ return result
- @mock.patch('generate_report.RunActions')
- def testMain(self, mock_run_actions):
- # Email is left out because it's a bit more difficult to test, and it'll be
- # mildly obvious if it's failing.
- args = ['--json', '--html', '--text']
- return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
- self.assertEqual(0, return_code)
- self.assertEqual(mock_run_actions.call_count, 1)
- ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
- self.assertEqual(ctors, [
- results_report.JSONResultsReport,
- results_report.TextResultsReport,
- results_report.HTMLResultsReport,
- ])
+ @mock.patch("generate_report.RunActions")
+ def testMain(self, mock_run_actions):
+ # Email is left out because it's a bit more difficult to test, and it'll be
+ # mildly obvious if it's failing.
+ args = ["--json", "--html", "--text"]
+ return_code = self._RunMainWithInput(
+ args, {"platforms": [], "data": {}}
+ )
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertEqual(
+ ctors,
+ [
+ results_report.JSONResultsReport,
+ results_report.TextResultsReport,
+ results_report.HTMLResultsReport,
+ ],
+ )
- @mock.patch('generate_report.RunActions')
- def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
- args = []
- return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
- self.assertEqual(0, return_code)
- self.assertEqual(mock_run_actions.call_count, 1)
- ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
- self.assertEqual(ctors, [results_report.HTMLResultsReport])
+ @mock.patch("generate_report.RunActions")
+ def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
+ args = []
+ return_code = self._RunMainWithInput(
+ args, {"platforms": [], "data": {}}
+ )
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertEqual(ctors, [results_report.HTMLResultsReport])
- # We only mock print_exc so we don't have exception info printed to stdout.
- @mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
- @mock.patch('traceback.print_exc')
- def testRunActionsRunsAllActionsRegardlessOfExceptions(
- self, mock_print_exc, mock_write_file):
- actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
- output_prefix = '-'
- ok = generate_report.RunActions(
- actions, {}, output_prefix, overwrite=False, verbose=False)
- self.assertFalse(ok)
- self.assertEqual(mock_write_file.call_count, len(actions))
- self.assertEqual(mock_print_exc.call_count, len(actions))
+ # We only mock print_exc so we don't have exception info printed to stdout.
+ @mock.patch("generate_report.WriteFile", side_effect=ValueError("Oh noo"))
+ @mock.patch("traceback.print_exc")
+ def testRunActionsRunsAllActionsRegardlessOfExceptions(
+ self, mock_print_exc, mock_write_file
+ ):
+ actions = [
+ (None, "json"),
+ (None, "html"),
+ (None, "text"),
+ (None, "email"),
+ ]
+ output_prefix = "-"
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False
+ )
+ self.assertFalse(ok)
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertEqual(mock_print_exc.call_count, len(actions))
- @mock.patch('generate_report.WriteFile')
- def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
- actions = [(None, 'json'), (None, 'html'), (None, 'text')]
- output_prefix = '-'
- ok = generate_report.RunActions(
- actions, {}, output_prefix, overwrite=False, verbose=False)
- self.assertEqual(mock_write_file.call_count, len(actions))
- self.assertTrue(ok)
+ @mock.patch("generate_report.WriteFile")
+ def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
+ actions = [(None, "json"), (None, "html"), (None, "text")]
+ output_prefix = "-"
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False
+ )
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertTrue(ok)
-if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
+if __name__ == "__main__":
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/help.py b/crosperf/help.py
index 4409b77..db95fc6 100644
--- a/crosperf/help.py
+++ b/crosperf/help.py
@@ -1,47 +1,49 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to print help message."""
-from __future__ import print_function
import sys
import textwrap
+
from settings_factory import BenchmarkSettings
from settings_factory import GlobalSettings
from settings_factory import LabelSettings
class Help(object):
- """The help class."""
+ """The help class."""
- def GetUsage(self):
- return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
+ def GetUsage(self):
+ return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
- def _WrapLine(self, line):
- return '\n'.join(textwrap.wrap(line, 80))
+ def _WrapLine(self, line):
+ return "\n".join(textwrap.wrap(line, 80))
- def _GetFieldDescriptions(self, fields):
- res = ''
- for field_name in fields:
- field = fields[field_name]
- res += 'Field:\t\t%s\n' % field.name
- res += self._WrapLine('Description:\t%s' % field.description) + '\n'
- res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '')
- res += 'Required:\t%s\n' % field.required
- if field.default:
- res += 'Default:\t%s\n' % field.default
- res += '\n'
- return res
+ def _GetFieldDescriptions(self, fields):
+ res = ""
+ for field_name in fields:
+ field = fields[field_name]
+ res += "Field:\t\t%s\n" % field.name
+ res += self._WrapLine("Description:\t%s" % field.description) + "\n"
+ res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "")
+ res += "Required:\t%s\n" % field.required
+ if field.default:
+ res += "Default:\t%s\n" % field.default
+ res += "\n"
+ return res
- def GetHelp(self):
- global_fields = self._GetFieldDescriptions(GlobalSettings('').fields)
- benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields)
- label_fields = self._GetFieldDescriptions(LabelSettings('').fields)
+ def GetHelp(self):
+ global_fields = self._GetFieldDescriptions(GlobalSettings("").fields)
+ benchmark_fields = self._GetFieldDescriptions(
+ BenchmarkSettings("").fields
+ )
+ label_fields = self._GetFieldDescriptions(LabelSettings("").fields)
- return """%s is a script for running performance experiments on
+ return """%s is a script for running performance experiments on
ChromeOS. It allows one to run ChromeOS Autotest benchmarks over
several images and compare the results to determine whether there
is a performance difference.
@@ -114,5 +116,11 @@
generates and displays a report based on the run, and emails the
report to the user. If the results were all read out of the cache,
then by default no email is generated.
-""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields,
- sys.argv[0])
+""" % (
+ sys.argv[0],
+ sys.argv[0],
+ global_fields,
+ benchmark_fields,
+ label_fields,
+ sys.argv[0],
+ )
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index 8ac5be2..87664e9 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compute image checksum."""
-from __future__ import print_function
import os
import threading
@@ -15,59 +14,71 @@
class ImageChecksummer(object):
- """Compute image checksum."""
+ """Compute image checksum."""
- class PerImageChecksummer(object):
- """Compute checksum for an image."""
+ class PerImageChecksummer(object):
+ """Compute checksum for an image."""
- def __init__(self, label, log_level):
- self._lock = threading.Lock()
- self.label = label
- self._checksum = None
- self.log_level = log_level
+ def __init__(self, label, log_level):
+ self._lock = threading.Lock()
+ self.label = label
+ self._checksum = None
+ self.log_level = log_level
- def Checksum(self):
- with self._lock:
- if not self._checksum:
- logger.GetLogger().LogOutput(
- "Acquiring checksum for '%s'." % self.label.name)
- self._checksum = None
- if self.label.image_type != 'local':
- raise RuntimeError('Called Checksum on non-local image!')
- if self.label.chromeos_image:
- if os.path.exists(self.label.chromeos_image):
- self._checksum = FileUtils().Md5File(
- self.label.chromeos_image, log_level=self.log_level)
- logger.GetLogger().LogOutput('Computed checksum is '
- ': %s' % self._checksum)
- if not self._checksum:
- raise RuntimeError('Checksum computing error.')
- logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum)
- return self._checksum
+ def Checksum(self):
+ with self._lock:
+ if not self._checksum:
+ logger.GetLogger().LogOutput(
+ "Acquiring checksum for '%s'." % self.label.name
+ )
+ self._checksum = None
+ if self.label.image_type != "local":
+ raise RuntimeError(
+ "Called Checksum on non-local image!"
+ )
+ if self.label.chromeos_image:
+ if os.path.exists(self.label.chromeos_image):
+ self._checksum = FileUtils().Md5File(
+ self.label.chromeos_image,
+ log_level=self.log_level,
+ )
+ logger.GetLogger().LogOutput(
+ "Computed checksum is " ": %s" % self._checksum
+ )
+ if not self._checksum:
+ raise RuntimeError("Checksum computing error.")
+ logger.GetLogger().LogOutput(
+ "Checksum is: %s" % self._checksum
+ )
+ return self._checksum
- _instance = None
- _lock = threading.Lock()
- _per_image_checksummers = {}
+ _instance = None
+ _lock = threading.Lock()
+ _per_image_checksummers = {}
- def __new__(cls, *args, **kwargs):
- with cls._lock:
- if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(
- cls, *args, **kwargs)
- return cls._instance
+ def __new__(cls, *args, **kwargs):
+ with cls._lock:
+ if not cls._instance:
+ cls._instance = super(ImageChecksummer, cls).__new__(
+ cls, *args, **kwargs
+ )
+ return cls._instance
- def Checksum(self, label, log_level):
- if label.image_type != 'local':
- raise RuntimeError('Attempt to call Checksum on non-local image.')
- with self._lock:
- if label.name not in self._per_image_checksummers:
- self._per_image_checksummers[label.name] = (
- ImageChecksummer.PerImageChecksummer(label, log_level))
- checksummer = self._per_image_checksummers[label.name]
+ def Checksum(self, label, log_level):
+ if label.image_type != "local":
+ raise RuntimeError("Attempt to call Checksum on non-local image.")
+ with self._lock:
+ if label.name not in self._per_image_checksummers:
+ self._per_image_checksummers[
+ label.name
+ ] = ImageChecksummer.PerImageChecksummer(label, log_level)
+ checksummer = self._per_image_checksummers[label.name]
- try:
- return checksummer.Checksum()
- except:
- logger.GetLogger().LogError('Could not compute checksum of image in label'
- " '%s'." % label.name)
- raise
+ try:
+ return checksummer.Checksum()
+ except:
+ logger.GetLogger().LogError(
+ "Could not compute checksum of image in label"
+ " '%s'." % label.name
+ )
+ raise
diff --git a/crosperf/label.py b/crosperf/label.py
index 30bf5f8..9aeff56 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -1,188 +1,203 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The label of benchamrks."""
-from __future__ import print_function
import hashlib
import os
-from image_checksummer import ImageChecksummer
-from cros_utils.file_utils import FileUtils
from cros_utils import misc
+from cros_utils.file_utils import FileUtils
+from image_checksummer import ImageChecksummer
class Label(object):
- """The label class."""
+ """The label class."""
- def __init__(self,
- name,
- build,
- chromeos_image,
- autotest_path,
- debug_path,
- chromeos_root,
- board,
- remote,
- image_args,
- cache_dir,
- cache_only,
- log_level,
- compiler,
- crosfleet=False,
- chrome_src=None):
+ def __init__(
+ self,
+ name,
+ build,
+ chromeos_image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet=False,
+ chrome_src=None,
+ ):
- self.image_type = self._GetImageType(chromeos_image)
+ self.image_type = self._GetImageType(chromeos_image)
- # Expand ~
- chromeos_root = os.path.expanduser(chromeos_root)
- if self.image_type == 'local':
- chromeos_image = os.path.expanduser(chromeos_image)
+ # Expand ~
+ chromeos_root = os.path.expanduser(chromeos_root)
+ if self.image_type == "local":
+ chromeos_image = os.path.expanduser(chromeos_image)
- self.name = name
- self.build = build
- self.chromeos_image = chromeos_image
- self.autotest_path = autotest_path
- self.debug_path = debug_path
- self.board = board
- self.remote = remote
- self.image_args = image_args
- self.cache_dir = cache_dir
- self.cache_only = cache_only
- self.log_level = log_level
- self.chrome_version = ''
- self.compiler = compiler
- self.crosfleet = crosfleet
+ self.name = name
+ self.build = build
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.debug_path = debug_path
+ self.board = board
+ self.remote = remote
+ self.image_args = image_args
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ self.log_level = log_level
+ self.chrome_version = ""
+ self.compiler = compiler
+ self.crosfleet = crosfleet
- if not chromeos_root:
- if self.image_type == 'local':
- chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
- if not chromeos_root:
- raise RuntimeError("No ChromeOS root given for label '%s' and could "
- "not determine one from image path: '%s'." %
- (name, chromeos_image))
- else:
- chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
- if not chromeos_root:
- raise RuntimeError("Invalid ChromeOS root given for label '%s': '%s'." %
- (name, chromeos_root))
+ if not chromeos_root:
+ if self.image_type == "local":
+ chromeos_root = FileUtils().ChromeOSRootFromImage(
+ chromeos_image
+ )
+ if not chromeos_root:
+ raise RuntimeError(
+ "No ChromeOS root given for label '%s' and could "
+ "not determine one from image path: '%s'."
+ % (name, chromeos_image)
+ )
+ else:
+ chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
+ if not chromeos_root:
+ raise RuntimeError(
+ "Invalid ChromeOS root given for label '%s': '%s'."
+ % (name, chromeos_root)
+ )
- self.chromeos_root = chromeos_root
- if not chrome_src:
- # Old and new chroots may have different chrome src locations.
- # The path also depends on the chrome build flags.
- # Give priority to chrome-src-internal.
- chrome_src_rel_paths = [
- '.cache/distfiles/target/chrome-src-internal',
- '.cache/distfiles/chrome-src-internal',
- '.cache/distfiles/target/chrome-src',
- '.cache/distfiles/chrome-src',
- ]
- for chrome_src_rel_path in chrome_src_rel_paths:
- chrome_src_abs_path = os.path.join(self.chromeos_root,
- chrome_src_rel_path)
- if os.path.exists(chrome_src_abs_path):
- chrome_src = chrome_src_abs_path
- break
- if not chrome_src:
- raise RuntimeError('Can not find location of Chrome sources.\n'
- f'Checked paths: {chrome_src_rel_paths}')
- else:
- chrome_src = misc.CanonicalizePath(chrome_src)
- # Make sure the path exists.
- if not os.path.exists(chrome_src):
- raise RuntimeError("Invalid Chrome src given for label '%s': '%s'." %
- (name, chrome_src))
- self.chrome_src = chrome_src
+ self.chromeos_root = chromeos_root
+ if not chrome_src:
+ # Old and new chroots may have different chrome src locations.
+ # The path also depends on the chrome build flags.
+ # Give priority to chrome-src-internal.
+ chrome_src_rel_paths = [
+ ".cache/distfiles/target/chrome-src-internal",
+ ".cache/distfiles/chrome-src-internal",
+ ".cache/distfiles/target/chrome-src",
+ ".cache/distfiles/chrome-src",
+ ]
+ for chrome_src_rel_path in chrome_src_rel_paths:
+ chrome_src_abs_path = os.path.join(
+ self.chromeos_root, chrome_src_rel_path
+ )
+ if os.path.exists(chrome_src_abs_path):
+ chrome_src = chrome_src_abs_path
+ break
+ if not chrome_src:
+ raise RuntimeError(
+ "Can not find location of Chrome sources.\n"
+ f"Checked paths: {chrome_src_rel_paths}"
+ )
+ else:
+ chrome_src = misc.CanonicalizePath(chrome_src)
+ # Make sure the path exists.
+ if not os.path.exists(chrome_src):
+ raise RuntimeError(
+ "Invalid Chrome src given for label '%s': '%s'."
+ % (name, chrome_src)
+ )
+ self.chrome_src = chrome_src
- self._SetupChecksum()
+ self._SetupChecksum()
- def _SetupChecksum(self):
- """Compute label checksum only once."""
+ def _SetupChecksum(self):
+ """Compute label checksum only once."""
- self.checksum = None
- if self.image_type == 'local':
- self.checksum = ImageChecksummer().Checksum(self, self.log_level)
- elif self.image_type == 'trybot':
- self.checksum = hashlib.md5(
- self.chromeos_image.encode('utf-8')).hexdigest()
+ self.checksum = None
+ if self.image_type == "local":
+ self.checksum = ImageChecksummer().Checksum(self, self.log_level)
+ elif self.image_type == "trybot":
+ self.checksum = hashlib.md5(
+ self.chromeos_image.encode("utf-8")
+ ).hexdigest()
- def _GetImageType(self, chromeos_image):
- image_type = None
- if chromeos_image.find('xbuddy://') < 0:
- image_type = 'local'
- elif chromeos_image.find('trybot') >= 0:
- image_type = 'trybot'
- else:
- image_type = 'official'
- return image_type
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find("xbuddy://") < 0:
+ image_type = "local"
+ elif chromeos_image.find("trybot") >= 0:
+ image_type = "trybot"
+ else:
+ image_type = "official"
+ return image_type
- def __hash__(self):
- """Label objects are used in a map, so provide "hash" and "equal"."""
+ def __hash__(self):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
- return hash(self.name)
+ return hash(self.name)
- def __eq__(self, other):
- """Label objects are used in a map, so provide "hash" and "equal"."""
+ def __eq__(self, other):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
- return isinstance(other, Label) and other.name == self.name
+ return isinstance(other, Label) and other.name == self.name
- def __str__(self):
- """For better debugging."""
+ def __str__(self):
+ """For better debugging."""
- return 'label[name="{}"]'.format(self.name)
+ return 'label[name="{}"]'.format(self.name)
class MockLabel(object):
- """The mock label class."""
+ """The mock label class."""
- def __init__(self,
- name,
- build,
- chromeos_image,
- autotest_path,
- debug_path,
- chromeos_root,
- board,
- remote,
- image_args,
- cache_dir,
- cache_only,
- log_level,
- compiler,
- crosfleet=False,
- chrome_src=None):
- self.name = name
- self.build = build
- self.chromeos_image = chromeos_image
- self.autotest_path = autotest_path
- self.debug_path = debug_path
- self.board = board
- self.remote = remote
- self.cache_dir = cache_dir
- self.cache_only = cache_only
- if not chromeos_root:
- self.chromeos_root = '/tmp/chromeos_root'
- else:
- self.chromeos_root = chromeos_root
- self.image_args = image_args
- self.chrome_src = chrome_src
- self.image_type = self._GetImageType(chromeos_image)
- self.checksum = ''
- self.log_level = log_level
- self.compiler = compiler
- self.crosfleet = crosfleet
- self.chrome_version = 'Fake Chrome Version 50'
+ def __init__(
+ self,
+ name,
+ build,
+ chromeos_image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet=False,
+ chrome_src=None,
+ ):
+ self.name = name
+ self.build = build
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.debug_path = debug_path
+ self.board = board
+ self.remote = remote
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ if not chromeos_root:
+ self.chromeos_root = "/tmp/chromeos_root"
+ else:
+ self.chromeos_root = chromeos_root
+ self.image_args = image_args
+ self.chrome_src = chrome_src
+ self.image_type = self._GetImageType(chromeos_image)
+ self.checksum = ""
+ self.log_level = log_level
+ self.compiler = compiler
+ self.crosfleet = crosfleet
+ self.chrome_version = "Fake Chrome Version 50"
- def _GetImageType(self, chromeos_image):
- image_type = None
- if chromeos_image.find('xbuddy://') < 0:
- image_type = 'local'
- elif chromeos_image.find('trybot') >= 0:
- image_type = 'trybot'
- else:
- image_type = 'official'
- return image_type
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find("xbuddy://") < 0:
+ image_type = "local"
+ elif chromeos_image.find("trybot") >= 0:
+ image_type = "trybot"
+ else:
+ image_type = "official"
+ return image_type
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index ffdd643..74379bf 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,17 +1,16 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MachineImageManager allocates images to duts."""
-from __future__ import print_function
import functools
class MachineImageManager(object):
- """Management of allocating images to duts.
+ """Management of allocating images to duts.
* Data structure we have -
@@ -137,173 +136,180 @@
* Special / common case to handle seperately
We have only 1 dut or if we have only 1 label, that's simple enough.
- """
-
- def __init__(self, labels, duts):
- self.labels_ = labels
- self.duts_ = duts
- self.n_labels_ = len(labels)
- self.n_duts_ = len(duts)
- self.dut_name_ordinal_ = dict()
- for idx, dut in enumerate(self.duts_):
- self.dut_name_ordinal_[dut.name] = idx
-
- # Generate initial matrix containg 'X' or ' '.
- self.matrix_ = [['X' if l.remote else ' '
- for _ in range(self.n_duts_)]
- for l in self.labels_]
- for ol, l in enumerate(self.labels_):
- if l.remote:
- for r in l.remote:
- self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
-
- self.label_duts_ = [[] for _ in range(self.n_labels_)]
- self.allocate_log_ = []
-
- def compute_initial_allocation(self):
- """Compute the initial label-dut allocation.
-
- This method finds the most efficient way that every label gets imaged at
- least once.
-
- Returns:
- False, only if not all labels could be imaged to a certain machine,
- otherwise True.
"""
- if self.n_duts_ == 1:
- for i, v in self.matrix_vertical_generator(0):
- if v != 'X':
- self.matrix_[i][0] = 'Y'
- return
+ def __init__(self, labels, duts):
+ self.labels_ = labels
+ self.duts_ = duts
+ self.n_labels_ = len(labels)
+ self.n_duts_ = len(duts)
+ self.dut_name_ordinal_ = dict()
+ for idx, dut in enumerate(self.duts_):
+ self.dut_name_ordinal_[dut.name] = idx
- if self.n_labels_ == 1:
- for j, v in self.matrix_horizontal_generator(0):
- if v != 'X':
- self.matrix_[0][j] = 'Y'
- return
+ # Generate initial matrix containg 'X' or ' '.
+ self.matrix_ = [
+ ["X" if l.remote else " " for _ in range(self.n_duts_)]
+ for l in self.labels_
+ ]
+ for ol, l in enumerate(self.labels_):
+ if l.remote:
+ for r in l.remote:
+ self.matrix_[ol][self.dut_name_ordinal_[r]] = " "
- if self.n_duts_ >= self.n_labels_:
- n = 1
- else:
- n = self.n_labels_ - self.n_duts_ + 1
- while n <= self.n_labels_:
- if self._compute_initial_allocation_internal(0, n):
- break
- n += 1
+ self.label_duts_ = [[] for _ in range(self.n_labels_)]
+ self.allocate_log_ = []
- return n <= self.n_labels_
+ def compute_initial_allocation(self):
+ """Compute the initial label-dut allocation.
- def _record_allocate_log(self, label_i, dut_j):
- self.allocate_log_.append((label_i, dut_j))
- self.label_duts_[label_i].append(dut_j)
+ This method finds the most efficient way that every label gets imaged at
+ least once.
- def allocate(self, dut, schedv2=None):
- """Allocate a label for dut.
+ Returns:
+ False, only if not all labels could be imaged to a certain machine,
+ otherwise True.
+ """
- Args:
- dut: the dut that asks for a new image.
- schedv2: the scheduling instance, we need the benchmark run
- information with schedv2 for a better allocation.
+ if self.n_duts_ == 1:
+ for i, v in self.matrix_vertical_generator(0):
+ if v != "X":
+ self.matrix_[i][0] = "Y"
+ return
- Returns:
- a label to image onto the dut or None if no more available images for
- the dut.
- """
- j = self.dut_name_ordinal_[dut.name]
- # 'can_' prefix means candidate label's.
- can_reimage_number = 999
- can_i = 999
- can_label = None
- can_pending_br_num = 0
- for i, v in self.matrix_vertical_generator(j):
- label = self.labels_[i]
+ if self.n_labels_ == 1:
+ for j, v in self.matrix_horizontal_generator(0):
+ if v != "X":
+ self.matrix_[0][j] = "Y"
+ return
- # 2 optimizations here regarding allocating label to dut.
- # Note schedv2 might be None in case we do not need this
- # optimization or we are in testing mode.
- if schedv2 is not None:
- pending_br_num = len(schedv2.get_label_map()[label])
- if pending_br_num == 0:
- # (A) - we have finished all br of this label,
- # apparently, we do not want to reimaeg dut to
- # this label.
- continue
- else:
- # In case we do not have a schedv2 instance, mark
- # pending_br_num as 0, so pending_br_num >=
- # can_pending_br_num is always True.
- pending_br_num = 0
+ if self.n_duts_ >= self.n_labels_:
+ n = 1
+ else:
+ n = self.n_labels_ - self.n_duts_ + 1
+ while n <= self.n_labels_:
+ if self._compute_initial_allocation_internal(0, n):
+ break
+ n += 1
- # For this time being, I just comment this out until we have a
- # better estimation how long each benchmarkrun takes.
- # if (pending_br_num <= 5 and
- # len(self.label_duts_[i]) >= 1):
- # # (B) this is heuristic - if there are just a few test cases
- # # (say <5) left undone for this label, and there is at least
- # # 1 other machine working on this lable, we probably not want
- # # to bother to reimage this dut to help with these 5 test
- # # cases
- # continue
+ return n <= self.n_labels_
- if v == 'Y':
- self.matrix_[i][j] = '_'
- self._record_allocate_log(i, j)
- return label
- if v == ' ':
- label_reimage_number = len(self.label_duts_[i])
- if ((can_label is None) or
- (label_reimage_number < can_reimage_number or
- (label_reimage_number == can_reimage_number and
- pending_br_num >= can_pending_br_num))):
- can_reimage_number = label_reimage_number
- can_i = i
- can_label = label
- can_pending_br_num = pending_br_num
+ def _record_allocate_log(self, label_i, dut_j):
+ self.allocate_log_.append((label_i, dut_j))
+ self.label_duts_[label_i].append(dut_j)
- # All labels are marked either '_' (already taken) or 'X' (not
- # compatible), so return None to notify machine thread to quit.
- if can_label is None:
- return None
+ def allocate(self, dut, schedv2=None):
+ """Allocate a label for dut.
- # At this point, we don't find any 'Y' for the machine, so we go the
- # 'min' approach.
- self.matrix_[can_i][j] = '_'
- self._record_allocate_log(can_i, j)
- return can_label
+ Args:
+ dut: the dut that asks for a new image.
+ schedv2: the scheduling instance, we need the benchmark run
+ information with schedv2 for a better allocation.
- def matrix_vertical_generator(self, col):
- """Iterate matrix vertically at column 'col'.
+ Returns:
+ a label to image onto the dut or None if no more available images for
+ the dut.
+ """
+ j = self.dut_name_ordinal_[dut.name]
+ # 'can_' prefix means candidate label's.
+ can_reimage_number = 999
+ can_i = 999
+ can_label = None
+ can_pending_br_num = 0
+ for i, v in self.matrix_vertical_generator(j):
+ label = self.labels_[i]
- Yield row number i and value at matrix_[i][col].
- """
- for i, _ in enumerate(self.labels_):
- yield i, self.matrix_[i][col]
+ # 2 optimizations here regarding allocating label to dut.
+ # Note schedv2 might be None in case we do not need this
+ # optimization or we are in testing mode.
+ if schedv2 is not None:
+ pending_br_num = len(schedv2.get_label_map()[label])
+ if pending_br_num == 0:
+ # (A) - we have finished all br of this label,
+ # apparently, we do not want to reimaeg dut to
+ # this label.
+ continue
+ else:
+ # In case we do not have a schedv2 instance, mark
+ # pending_br_num as 0, so pending_br_num >=
+ # can_pending_br_num is always True.
+ pending_br_num = 0
- def matrix_horizontal_generator(self, row):
- """Iterate matrix horizontally at row 'row'.
+ # For this time being, I just comment this out until we have a
+ # better estimation how long each benchmarkrun takes.
+ # if (pending_br_num <= 5 and
+ # len(self.label_duts_[i]) >= 1):
+ # # (B) this is heuristic - if there are just a few test cases
+ # # (say <5) left undone for this label, and there is at least
+ # # 1 other machine working on this lable, we probably not want
+ # # to bother to reimage this dut to help with these 5 test
+ # # cases
+ # continue
- Yield col number j and value at matrix_[row][j].
- """
- for j, _ in enumerate(self.duts_):
- yield j, self.matrix_[row][j]
+ if v == "Y":
+ self.matrix_[i][j] = "_"
+ self._record_allocate_log(i, j)
+ return label
+ if v == " ":
+ label_reimage_number = len(self.label_duts_[i])
+ if (can_label is None) or (
+ label_reimage_number < can_reimage_number
+ or (
+ label_reimage_number == can_reimage_number
+ and pending_br_num >= can_pending_br_num
+ )
+ ):
+ can_reimage_number = label_reimage_number
+ can_i = i
+ can_label = label
+ can_pending_br_num = pending_br_num
- def _compute_initial_allocation_internal(self, level, N):
- """Search matrix for d with N."""
+ # All labels are marked either '_' (already taken) or 'X' (not
+ # compatible), so return None to notify machine thread to quit.
+ if can_label is None:
+ return None
- if level == self.n_labels_:
- return True
+ # At this point, we don't find any 'Y' for the machine, so we go the
+ # 'min' approach.
+ self.matrix_[can_i][j] = "_"
+ self._record_allocate_log(can_i, j)
+ return can_label
- for j, v in self.matrix_horizontal_generator(level):
- if v == ' ':
- # Before we put a 'Y', we check how many Y column 'j' has.
- # Note y[0] is row idx, y[1] is the cell value.
- ny = functools.reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
- self.matrix_vertical_generator(j), 0)
- if ny < N:
- self.matrix_[level][j] = 'Y'
- if self._compute_initial_allocation_internal(level + 1, N):
+ def matrix_vertical_generator(self, col):
+ """Iterate matrix vertically at column 'col'.
+
+ Yield row number i and value at matrix_[i][col].
+ """
+ for i, _ in enumerate(self.labels_):
+ yield i, self.matrix_[i][col]
+
+ def matrix_horizontal_generator(self, row):
+ """Iterate matrix horizontally at row 'row'.
+
+ Yield col number j and value at matrix_[row][j].
+ """
+ for j, _ in enumerate(self.duts_):
+ yield j, self.matrix_[row][j]
+
+ def _compute_initial_allocation_internal(self, level, N):
+ """Search matrix for d with N."""
+
+ if level == self.n_labels_:
return True
- self.matrix_[level][j] = ' '
- return False
+ for j, v in self.matrix_horizontal_generator(level):
+ if v == " ":
+ # Before we put a 'Y', we check how many Y column 'j' has.
+ # Note y[0] is row idx, y[1] is the cell value.
+ ny = functools.reduce(
+ lambda x, y: x + 1 if (y[1] == "Y") else x,
+ self.matrix_vertical_generator(j),
+ 0,
+ )
+ if ny < N:
+ self.matrix_[level][j] = "Y"
+ if self._compute_initial_allocation_internal(level + 1, N):
+ return True
+ self.matrix_[level][j] = " "
+
+ return False
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index fbbca7b..1ea63b1 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MachineImageManager class."""
-from __future__ import print_function
import random
import unittest
@@ -15,251 +14,282 @@
class MockLabel(object):
- """Class for generating a mock Label."""
+ """Class for generating a mock Label."""
- def __init__(self, name, remotes=None):
- self.name = name
- self.remote = remotes
+ def __init__(self, name, remotes=None):
+ self.name = name
+ self.remote = remotes
- def __hash__(self):
- """Provide hash function for label.
+ def __hash__(self):
+ """Provide hash function for label.
- This is required because Label object is used inside a dict as key.
- """
- return hash(self.name)
+ This is required because Label object is used inside a dict as key.
+ """
+ return hash(self.name)
- def __eq__(self, other):
- """Provide eq function for label.
+ def __eq__(self, other):
+ """Provide eq function for label.
- This is required because Label object is used inside a dict as key.
- """
- return isinstance(other, MockLabel) and other.name == self.name
+ This is required because Label object is used inside a dict as key.
+ """
+ return isinstance(other, MockLabel) and other.name == self.name
class MockDut(object):
- """Class for creating a mock Device-Under-Test (DUT)."""
+ """Class for creating a mock Device-Under-Test (DUT)."""
- def __init__(self, name, label=None):
- self.name = name
- self.label_ = label
+ def __init__(self, name, label=None):
+ self.name = name
+ self.label_ = label
class MachineImageManagerTester(unittest.TestCase):
- """Class for testing MachineImageManager."""
+ """Class for testing MachineImageManager."""
- def gen_duts_by_name(self, *names):
- duts = []
- for n in names:
- duts.append(MockDut(n))
- return duts
+ def gen_duts_by_name(self, *names):
+ duts = []
+ for n in names:
+ duts.append(MockDut(n))
+ return duts
- def create_labels_and_duts_from_pattern(self, pattern):
- labels = []
- duts = []
- for i, r in enumerate(pattern):
- l = MockLabel('l{}'.format(i), [])
- for j, v in enumerate(r.split()):
- if v == '.':
- l.remote.append('m{}'.format(j))
- if i == 0:
- duts.append(MockDut('m{}'.format(j)))
- labels.append(l)
- return labels, duts
+ def create_labels_and_duts_from_pattern(self, pattern):
+ labels = []
+ duts = []
+ for i, r in enumerate(pattern):
+ l = MockLabel("l{}".format(i), [])
+ for j, v in enumerate(r.split()):
+ if v == ".":
+ l.remote.append("m{}".format(j))
+ if i == 0:
+ duts.append(MockDut("m{}".format(j)))
+ labels.append(l)
+ return labels, duts
- def check_matrix_against_pattern(self, matrix, pattern):
- for i, s in enumerate(pattern):
- for j, v in enumerate(s.split()):
- self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j])
+ def check_matrix_against_pattern(self, matrix, pattern):
+ for i, s in enumerate(pattern):
+ for j, v in enumerate(s.split()):
+ self.assertTrue(
+ v == "." and matrix[i][j] == " " or v == matrix[i][j]
+ )
- def pattern_based_test(self, inp, output):
- labels, duts = self.create_labels_and_duts_from_pattern(inp)
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.check_matrix_against_pattern(mim.matrix_, output)
- return mim
+ def pattern_based_test(self, inp, output):
+ labels, duts = self.create_labels_and_duts_from_pattern(inp)
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.check_matrix_against_pattern(mim.matrix_, output)
+ return mim
- def test_single_dut(self):
- labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')]
- dut = MockDut('m1')
- mim = MachineImageManager(labels, [dut])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
+ def test_single_dut(self):
+ labels = [MockLabel("l1"), MockLabel("l2"), MockLabel("l3")]
+ dut = MockDut("m1")
+ mim = MachineImageManager(labels, [dut])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [["Y"], ["Y"], ["Y"]])
- def test_single_label(self):
- labels = [MockLabel('l1')]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
+ def test_single_label(self):
+ labels = [MockLabel("l1")]
+ duts = self.gen_duts_by_name("m1", "m2", "m3")
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [["Y", "Y", "Y"]])
- def test_case1(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(
- mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
+ def test_case1(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(
+ mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]]
+ )
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
- def test_case2(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(
- mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
+ def test_case2(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(
+ mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]]
+ )
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
- def test_case3(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
+ def test_case3(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1", labels[0]), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
- def test_case4(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
+ def test_case4(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2", labels[0]), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
- def test_case5(self):
- labels = [
- MockLabel('l1', ['m3']),
- MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])
- ]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(
- mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X', 'X']])
+ def test_case5(self):
+ labels = [
+ MockLabel("l1", ["m3"]),
+ MockLabel("l2", ["m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = self.gen_duts_by_name("m1", "m2", "m3")
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(
+ mim.matrix_ == [["X", "X", "Y"], ["X", "X", "Y"], ["Y", "X", "X"]]
+ )
- def test_2x2_with_allocation(self):
- labels = [MockLabel('l0'), MockLabel('l1')]
- duts = [MockDut('m0'), MockDut('m1')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.allocate(duts[0]) == labels[0])
- self.assertTrue(mim.allocate(duts[0]) == labels[1])
- self.assertTrue(mim.allocate(duts[0]) is None)
- self.assertTrue(mim.matrix_[0][0] == '_')
- self.assertTrue(mim.matrix_[1][0] == '_')
- self.assertTrue(mim.allocate(duts[1]) == labels[1])
+ def test_2x2_with_allocation(self):
+ labels = [MockLabel("l0"), MockLabel("l1")]
+ duts = [MockDut("m0"), MockDut("m1")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.allocate(duts[0]) == labels[0])
+ self.assertTrue(mim.allocate(duts[0]) == labels[1])
+ self.assertTrue(mim.allocate(duts[0]) is None)
+ self.assertTrue(mim.matrix_[0][0] == "_")
+ self.assertTrue(mim.matrix_[1][0] == "_")
+ self.assertTrue(mim.allocate(duts[1]) == labels[1])
- def test_10x10_general(self):
- """Gen 10x10 matrix."""
- n = 10
- labels = []
- duts = []
- for i in range(n):
- labels.append(MockLabel('l{}'.format(i)))
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- for i in range(n):
- for j in range(n):
- if i == j:
- self.assertTrue(mim.matrix_[i][j] == 'Y')
- else:
- self.assertTrue(mim.matrix_[i][j] == ' ')
- self.assertTrue(mim.allocate(duts[3]).name == 'l3')
+ def test_10x10_general(self):
+ """Gen 10x10 matrix."""
+ n = 10
+ labels = []
+ duts = []
+ for i in range(n):
+ labels.append(MockLabel("l{}".format(i)))
+ duts.append(MockDut("m{}".format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ for i in range(n):
+ for j in range(n):
+ if i == j:
+ self.assertTrue(mim.matrix_[i][j] == "Y")
+ else:
+ self.assertTrue(mim.matrix_[i][j] == " ")
+ self.assertTrue(mim.allocate(duts[3]).name == "l3")
- def test_random_generated(self):
- n = 10
- labels = []
- duts = []
- for i in range(10):
- # generate 3-5 machines that is compatible with this label
- l = MockLabel('l{}'.format(i), [])
- r = random.random()
- for _ in range(4):
- t = int(r * 10) % n
- r *= 10
- l.remote.append('m{}'.format(t))
- labels.append(l)
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
+ def test_random_generated(self):
+ n = 10
+ labels = []
+ duts = []
+ for i in range(10):
+ # generate 3-5 machines that is compatible with this label
+ l = MockLabel("l{}".format(i), [])
+ r = random.random()
+ for _ in range(4):
+ t = int(r * 10) % n
+ r *= 10
+ l.remote.append("m{}".format(t))
+ labels.append(l)
+ duts.append(MockDut("m{}".format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
- def test_10x10_fully_random(self):
- inp = [
- 'X . . . X X . X X .', 'X X . X . X . X X .',
- 'X X X . . X . X . X', 'X . X X . . X X . X',
- 'X X X X . . . X . .', 'X X . X . X . . X .',
- '. X . X . X X X . .', '. X . X X . X X . .',
- 'X X . . . X X X . .', '. X X X X . . . . X'
- ]
- output = [
- 'X Y . . X X . X X .', 'X X Y X . X . X X .',
- 'X X X Y . X . X . X', 'X . X X Y . X X . X',
- 'X X X X . Y . X . .', 'X X . X . X Y . X .',
- 'Y X . X . X X X . .', '. X . X X . X X Y .',
- 'X X . . . X X X . Y', '. X X X X . . Y . X'
- ]
- self.pattern_based_test(inp, output)
+ def test_10x10_fully_random(self):
+ inp = [
+ "X . . . X X . X X .",
+ "X X . X . X . X X .",
+ "X X X . . X . X . X",
+ "X . X X . . X X . X",
+ "X X X X . . . X . .",
+ "X X . X . X . . X .",
+ ". X . X . X X X . .",
+ ". X . X X . X X . .",
+ "X X . . . X X X . .",
+ ". X X X X . . . . X",
+ ]
+ output = [
+ "X Y . . X X . X X .",
+ "X X Y X . X . X X .",
+ "X X X Y . X . X . X",
+ "X . X X Y . X X . X",
+ "X X X X . Y . X . .",
+ "X X . X . X Y . X .",
+ "Y X . X . X X X . .",
+ ". X . X X . X X Y .",
+ "X X . . . X X X . Y",
+ ". X X X X . . Y . X",
+ ]
+ self.pattern_based_test(inp, output)
- def test_10x10_fully_random2(self):
- inp = [
- 'X . X . . X . X X X', 'X X X X X X . . X .',
- 'X . X X X X X . . X', 'X X X . X . X X . .',
- '. X . X . X X X X X', 'X X X X X X X . . X',
- 'X . X X X X X . . X', 'X X X . X X X X . .',
- 'X X X . . . X X X X', '. X X . X X X . X X'
- ]
- output = [
- 'X . X Y . X . X X X', 'X X X X X X Y . X .',
- 'X Y X X X X X . . X', 'X X X . X Y X X . .',
- '. X Y X . X X X X X', 'X X X X X X X Y . X',
- 'X . X X X X X . Y X', 'X X X . X X X X . Y',
- 'X X X . Y . X X X X', 'Y X X . X X X . X X'
- ]
- self.pattern_based_test(inp, output)
+ def test_10x10_fully_random2(self):
+ inp = [
+ "X . X . . X . X X X",
+ "X X X X X X . . X .",
+ "X . X X X X X . . X",
+ "X X X . X . X X . .",
+ ". X . X . X X X X X",
+ "X X X X X X X . . X",
+ "X . X X X X X . . X",
+ "X X X . X X X X . .",
+ "X X X . . . X X X X",
+ ". X X . X X X . X X",
+ ]
+ output = [
+ "X . X Y . X . X X X",
+ "X X X X X X Y . X .",
+ "X Y X X X X X . . X",
+ "X X X . X Y X X . .",
+ ". X Y X . X X X X X",
+ "X X X X X X X Y . X",
+ "X . X X X X X . Y X",
+ "X X X . X X X X . Y",
+ "X X X . Y . X X X X",
+ "Y X X . X X X . X X",
+ ]
+ self.pattern_based_test(inp, output)
- def test_3x4_with_allocation(self):
- inp = ['X X . .', '. . X .', 'X . X .']
- output = ['X X Y .', 'Y . X .', 'X Y X .']
- mim = self.pattern_based_test(inp, output)
- self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
- self.assertTrue(mim.label_duts_[0] == [2, 3])
- self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
- self.assertTrue(mim.label_duts_[2] == [3, 1])
- self.assertListEqual(mim.allocate_log_, [(0, 2), (2, 3), (1, 0), (2, 1),
- (1, 3), (0, 3), (1, 1)])
+ def test_3x4_with_allocation(self):
+ inp = ["X X . .", ". . X .", "X . X ."]
+ output = ["X X Y .", "Y . X .", "X Y X ."]
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ self.assertTrue(mim.label_duts_[0] == [2, 3])
+ self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
+ self.assertTrue(mim.label_duts_[2] == [3, 1])
+ self.assertListEqual(
+ mim.allocate_log_,
+ [(0, 2), (2, 3), (1, 0), (2, 1), (1, 3), (0, 3), (1, 1)],
+ )
- def test_cornercase_1(self):
- """This corner case is brought up by Caroline.
+ def test_cornercase_1(self):
+ """This corner case is brought up by Caroline.
The description is -
@@ -292,18 +322,18 @@
l1 Y X X
l2 Y X X
- """
+ """
- inp = ['. X X', '. X X', '. X X']
- output = ['Y X X', 'Y X X', 'Y X X']
- mim = self.pattern_based_test(inp, output)
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ inp = [". X X", ". X X", ". X X"]
+ output = ["Y X X", "Y X X", "Y X X"]
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index aaf09bf..ffb0b5e 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Machine Manager module."""
-from __future__ import division
-from __future__ import print_function
import collections
import hashlib
@@ -17,538 +15,600 @@
import threading
import time
+from cros_utils import command_executer
+from cros_utils import logger
import file_lock_machine
import image_chromeos
import test_flag
-from cros_utils import command_executer
-from cros_utils import logger
-CHECKSUM_FILE = '/usr/local/osimage_checksum_file'
+
+CHECKSUM_FILE = "/usr/local/osimage_checksum_file"
class BadChecksum(Exception):
- """Raised if all machines for a label don't have the same checksum."""
+ """Raised if all machines for a label don't have the same checksum."""
class BadChecksumString(Exception):
- """Raised if all machines for a label don't have the same checksum string."""
+ """Raised if all machines for a label don't have the same checksum string."""
class MissingLocksDirectory(Exception):
- """Raised when cannot find/access the machine locks directory."""
+ """Raised when cannot find/access the machine locks directory."""
class CrosCommandError(Exception):
- """Raised when an error occurs running command on DUT."""
+ """Raised when an error occurs running command on DUT."""
class CrosMachine(object):
- """The machine class."""
+ """The machine class."""
- def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
- self.name = name
- self.image = None
- # We relate a dut with a label if we reimage the dut using label or we
- # detect at the very beginning that the dut is running this label.
- self.label = None
- self.checksum = None
- self.locked = False
- self.released_time = time.time()
- self.test_run = None
- self.chromeos_root = chromeos_root
- self.log_level = log_level
- self.cpuinfo = None
- self.machine_id = None
- self.checksum_string = None
- self.meminfo = None
- self.phys_kbytes = None
- self.cooldown_wait_time = 0
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- log_level=self.log_level)
- self.SetUpChecksumInfo()
+ def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
+ self.name = name
+ self.image = None
+ # We relate a dut with a label if we reimage the dut using label or we
+ # detect at the very beginning that the dut is running this label.
+ self.label = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.log_level = log_level
+ self.cpuinfo = None
+ self.machine_id = None
+ self.checksum_string = None
+ self.meminfo = None
+ self.phys_kbytes = None
+ self.cooldown_wait_time = 0
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level
+ )
+ self.SetUpChecksumInfo()
- def SetUpChecksumInfo(self):
- if not self.IsReachable():
- self.machine_checksum = None
- return
- self._GetMemoryInfo()
- self._GetCPUInfo()
- self._ComputeMachineChecksumString()
- self._GetMachineID()
- self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
- self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
+ def SetUpChecksumInfo(self):
+ if not self.IsReachable():
+ self.machine_checksum = None
+ return
+ self._GetMemoryInfo()
+ self._GetCPUInfo()
+ self._ComputeMachineChecksumString()
+ self._GetMachineID()
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
- def IsReachable(self):
- command = 'ls'
- ret = self.ce.CrosRunCommand(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- if ret:
- return False
- return True
+ def IsReachable(self):
+ command = "ls"
+ ret = self.ce.CrosRunCommand(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ if ret:
+ return False
+ return True
- def AddCooldownWaitTime(self, wait_time):
- self.cooldown_wait_time += wait_time
+ def AddCooldownWaitTime(self, wait_time):
+ self.cooldown_wait_time += wait_time
- def GetCooldownWaitTime(self):
- return self.cooldown_wait_time
+ def GetCooldownWaitTime(self):
+ return self.cooldown_wait_time
- def _ParseMemoryInfo(self):
- line = self.meminfo.splitlines()[0]
- usable_kbytes = int(line.split()[1])
- # This code is from src/third_party/test/files/client/bin/base_utils.py
- # usable_kbytes is system's usable DRAM in kbytes,
- # as reported by memtotal() from device /proc/meminfo memtotal
- # after Linux deducts 1.5% to 9.5% for system table overhead
- # Undo the unknown actual deduction by rounding up
- # to next small multiple of a big power-of-two
- # eg 12GB - 5.1% gets rounded back up to 12GB
- mindeduct = 0.005 # 0.5 percent
- maxdeduct = 0.095 # 9.5 percent
- # deduction range 1.5% .. 9.5% supports physical mem sizes
- # 6GB .. 12GB in steps of .5GB
- # 12GB .. 24GB in steps of 1 GB
- # 24GB .. 48GB in steps of 2 GB ...
- # Finer granularity in physical mem sizes would require
- # tighter spread between min and max possible deductions
+ def _ParseMemoryInfo(self):
+ line = self.meminfo.splitlines()[0]
+ usable_kbytes = int(line.split()[1])
+ # This code is from src/third_party/test/files/client/bin/base_utils.py
+ # usable_kbytes is system's usable DRAM in kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 9.5% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.005 # 0.5 percent
+ maxdeduct = 0.095 # 9.5 percent
+ # deduction range 1.5% .. 9.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
- # increase mem size by at least min deduction, without rounding
- min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
- # increase mem size further by 2**n rounding, by 0..roundKb or more
- round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
- # find least binary roundup 2**n that covers worst-cast roundKb
- mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
- # have round_kbytes <= mod2n < round_kbytes*2
- # round min_kbytes up to next multiple of mod2n
- phys_kbytes = min_kbytes + mod2n - 1
- phys_kbytes -= phys_kbytes % mod2n # clear low bits
- self.phys_kbytes = phys_kbytes
+ # increase mem size by at least min deduction, without rounding
+ min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
+ # have round_kbytes <= mod2n < round_kbytes*2
+ # round min_kbytes up to next multiple of mod2n
+ phys_kbytes = min_kbytes + mod2n - 1
+ phys_kbytes -= phys_kbytes % mod2n # clear low bits
+ self.phys_kbytes = phys_kbytes
- def _GetMemoryInfo(self):
- # TODO yunlian: when the machine in rebooting, it will not return
- # meminfo, the assert does not catch it either
- command = 'cat /proc/meminfo'
- ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
- if ret == 0:
- self._ParseMemoryInfo()
+ def _GetMemoryInfo(self):
+ # TODO yunlian: when the machine in rebooting, it will not return
+ # meminfo, the assert does not catch it either
+ command = "cat /proc/meminfo"
+ ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ assert ret == 0, "Could not get meminfo from machine: %s" % self.name
+ if ret == 0:
+ self._ParseMemoryInfo()
- def _GetCPUInfo(self):
- command = 'cat /proc/cpuinfo'
- ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
+ def _GetCPUInfo(self):
+ command = "cat /proc/cpuinfo"
+ ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name
- def _ComputeMachineChecksumString(self):
- self.checksum_string = ''
- # Some lines from cpuinfo have to be excluded because they are not
- # persistent across DUTs.
- # MHz, BogoMIPS are dynamically changing values.
- # core id, apicid are identifiers assigned on startup
- # and may differ on the same type of machine.
- exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips', 'core id', 'apicid']
- for line in self.cpuinfo.splitlines():
- if not any(e in line for e in exclude_lines_list):
- self.checksum_string += line
- self.checksum_string += ' ' + str(self.phys_kbytes)
+ def _ComputeMachineChecksumString(self):
+ self.checksum_string = ""
+ # Some lines from cpuinfo have to be excluded because they are not
+ # persistent across DUTs.
+ # MHz, BogoMIPS are dynamically changing values.
+ # core id, apicid are identifiers assigned on startup
+ # and may differ on the same type of machine.
+ exclude_lines_list = [
+ "MHz",
+ "BogoMIPS",
+ "bogomips",
+ "core id",
+ "apicid",
+ ]
+ for line in self.cpuinfo.splitlines():
+ if not any(e in line for e in exclude_lines_list):
+ self.checksum_string += line
+ self.checksum_string += " " + str(self.phys_kbytes)
- def _GetMD5Checksum(self, ss):
- if ss:
- return hashlib.md5(ss.encode('utf-8')).hexdigest()
- return ''
+ def _GetMD5Checksum(self, ss):
+ if ss:
+ return hashlib.md5(ss.encode("utf-8")).hexdigest()
+ return ""
- def _GetMachineID(self):
- command = 'dump_vpd_log --full --stdout'
- _, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- b = if_out.splitlines()
- a = [l for l in b if 'Product' in l]
- if a:
- self.machine_id = a[0]
- return
- command = 'ifconfig'
- _, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- b = if_out.splitlines()
- a = [l for l in b if 'HWaddr' in l]
- if a:
- self.machine_id = '_'.join(a)
- return
- a = [l for l in b if 'ether' in l]
- if a:
- self.machine_id = '_'.join(a)
- return
- assert 0, 'Could not get machine_id from machine: %s' % self.name
+ def _GetMachineID(self):
+ command = "dump_vpd_log --full --stdout"
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ b = if_out.splitlines()
+ a = [l for l in b if "Product" in l]
+ if a:
+ self.machine_id = a[0]
+ return
+ command = "ifconfig"
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ b = if_out.splitlines()
+ a = [l for l in b if "HWaddr" in l]
+ if a:
+ self.machine_id = "_".join(a)
+ return
+ a = [l for l in b if "ether" in l]
+ if a:
+ self.machine_id = "_".join(a)
+ return
+ assert 0, "Could not get machine_id from machine: %s" % self.name
- def __str__(self):
- l = []
- l.append(self.name)
- l.append(str(self.image))
- l.append(str(self.checksum))
- l.append(str(self.locked))
- l.append(str(self.released_time))
- return ', '.join(l)
+ def __str__(self):
+ l = []
+ l.append(self.name)
+ l.append(str(self.image))
+ l.append(str(self.checksum))
+ l.append(str(self.locked))
+ l.append(str(self.released_time))
+ return ", ".join(l)
class MachineManager(object):
- """Lock, image and unlock machines locally for benchmark runs.
+ """Lock, image and unlock machines locally for benchmark runs.
- This class contains methods and calls to lock, unlock and image
- machines and distribute machines to each benchmark run. The assumption is
- that all of the machines for the experiment have been globally locked
- in the ExperimentRunner, but the machines still need to be locally
- locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark
- runs within the same experiment from trying to use the same machine at the
- same time.
- """
+ This class contains methods and calls to lock, unlock and image
+ machines and distribute machines to each benchmark run. The assumption is
+ that all of the machines for the experiment have been globally locked
+ in the ExperimentRunner, but the machines still need to be locally
+ locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark
+ runs within the same experiment from trying to use the same machine at the
+ same time.
+ """
- def __init__(self,
- chromeos_root,
- acquire_timeout,
- log_level,
- locks_dir,
- cmd_exec=None,
- lgr=None):
- self._lock = threading.RLock()
- self._all_machines = []
- self._machines = []
- self.image_lock = threading.Lock()
- self.num_reimages = 0
- self.chromeos_root = None
- self.machine_checksum = {}
- self.machine_checksum_string = {}
- self.acquire_timeout = acquire_timeout
- self.log_level = log_level
- self.locks_dir = locks_dir
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- log_level=self.log_level)
- self.logger = lgr or logger.GetLogger()
+ def __init__(
+ self,
+ chromeos_root,
+ acquire_timeout,
+ log_level,
+ locks_dir,
+ cmd_exec=None,
+ lgr=None,
+ ):
+ self._lock = threading.RLock()
+ self._all_machines = []
+ self._machines = []
+ self.image_lock = threading.Lock()
+ self.num_reimages = 0
+ self.chromeos_root = None
+ self.machine_checksum = {}
+ self.machine_checksum_string = {}
+ self.acquire_timeout = acquire_timeout
+ self.log_level = log_level
+ self.locks_dir = locks_dir
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level
+ )
+ self.logger = lgr or logger.GetLogger()
- if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory('Cannot access locks directory: %s' %
- self.locks_dir)
+ if self.locks_dir and not os.path.isdir(self.locks_dir):
+ raise MissingLocksDirectory(
+ "Cannot access locks directory: %s" % self.locks_dir
+ )
- self._initialized_machines = []
- self.chromeos_root = chromeos_root
+ self._initialized_machines = []
+ self.chromeos_root = chromeos_root
- def RemoveNonLockedMachines(self, locked_machines):
- for m in self._all_machines:
- if m.name not in locked_machines:
- self._all_machines.remove(m)
+ def RemoveNonLockedMachines(self, locked_machines):
+ for m in self._all_machines:
+ if m.name not in locked_machines:
+ self._all_machines.remove(m)
- for m in self._machines:
- if m.name not in locked_machines:
- self._machines.remove(m)
+ for m in self._machines:
+ if m.name not in locked_machines:
+ self._machines.remove(m)
- def GetChromeVersion(self, machine):
- """Get the version of Chrome running on the DUT."""
+ def GetChromeVersion(self, machine):
+ """Get the version of Chrome running on the DUT."""
- cmd = '/opt/google/chrome/chrome --version'
- ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
- if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s." %
- machine.name)
+ cmd = "/opt/google/chrome/chrome --version"
+ ret, version, _ = self.ce.CrosRunCommandWOutput(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root
+ )
+ if ret != 0:
+ raise CrosCommandError(
+ "Couldn't get Chrome version from %s." % machine.name
+ )
- if ret != 0:
- version = ''
- return version.rstrip()
+ if ret != 0:
+ version = ""
+ return version.rstrip()
- def ImageMachine(self, machine, label):
- checksum = label.checksum
+ def ImageMachine(self, machine, label):
+ checksum = label.checksum
- if checksum and (machine.checksum == checksum):
- return
- chromeos_root = label.chromeos_root
- if not chromeos_root:
- chromeos_root = self.chromeos_root
- image_chromeos_args = [
- image_chromeos.__file__, '--no_lock',
- '--chromeos_root=%s' % chromeos_root,
- '--image=%s' % label.chromeos_image,
- '--image_args=%s' % label.image_args,
- '--remote=%s' % machine.name,
- '--logging_level=%s' % self.log_level
- ]
- if label.board:
- image_chromeos_args.append('--board=%s' % label.board)
+ if checksum and (machine.checksum == checksum):
+ return
+ chromeos_root = label.chromeos_root
+ if not chromeos_root:
+ chromeos_root = self.chromeos_root
+ image_chromeos_args = [
+ image_chromeos.__file__,
+ "--no_lock",
+ "--chromeos_root=%s" % chromeos_root,
+ "--image=%s" % label.chromeos_image,
+ "--image_args=%s" % label.image_args,
+ "--remote=%s" % machine.name,
+ "--logging_level=%s" % self.log_level,
+ ]
+ if label.board:
+ image_chromeos_args.append("--board=%s" % label.board)
- # Currently can't image two machines at once.
- # So have to serialized on this lock.
- save_ce_log_level = self.ce.log_level
- if self.log_level != 'verbose':
- self.ce.log_level = 'average'
+ # Currently can't image two machines at once.
+ # So have to serialized on this lock.
+ save_ce_log_level = self.ce.log_level
+ if self.log_level != "verbose":
+ self.ce.log_level = "average"
- with self.image_lock:
- if self.log_level != 'verbose':
- self.logger.LogOutput('Pushing image onto machine.')
- self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
- ' '.join(image_chromeos_args))
- retval = 0
- if not test_flag.GetTestMode():
- retval = image_chromeos.DoImage(image_chromeos_args)
- if retval:
- cmd = 'reboot && exit'
- if self.log_level != 'verbose':
- self.logger.LogOutput('reboot & exit.')
- self.ce.CrosRunCommand(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
- time.sleep(60)
- if self.log_level != 'verbose':
- self.logger.LogOutput('Pushing image onto machine.')
- self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
- ' '.join(image_chromeos_args))
- retval = image_chromeos.DoImage(image_chromeos_args)
- if retval:
- raise RuntimeError("Could not image machine: '%s'." % machine.name)
+ with self.image_lock:
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Pushing image onto machine.")
+ self.logger.LogOutput(
+ "Running image_chromeos.DoImage with %s"
+ % " ".join(image_chromeos_args)
+ )
+ retval = 0
+ if not test_flag.GetTestMode():
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ cmd = "reboot && exit"
+ if self.log_level != "verbose":
+ self.logger.LogOutput("reboot & exit.")
+ self.ce.CrosRunCommand(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root
+ )
+ time.sleep(60)
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Pushing image onto machine.")
+ self.logger.LogOutput(
+ "Running image_chromeos.DoImage with %s"
+ % " ".join(image_chromeos_args)
+ )
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ raise RuntimeError(
+ "Could not image machine: '%s'." % machine.name
+ )
- self.num_reimages += 1
- machine.checksum = checksum
- machine.image = label.chromeos_image
- machine.label = label
+ self.num_reimages += 1
+ machine.checksum = checksum
+ machine.image = label.chromeos_image
+ machine.label = label
- if not label.chrome_version:
- label.chrome_version = self.GetChromeVersion(machine)
+ if not label.chrome_version:
+ label.chrome_version = self.GetChromeVersion(machine)
- self.ce.log_level = save_ce_log_level
- return retval
+ self.ce.log_level = save_ce_log_level
+ return retval
- def ComputeCommonCheckSum(self, label):
- # Since this is used for cache lookups before the machines have been
- # compared/verified, check here to make sure they all have the same
- # checksum (otherwise the cache lookup may not be valid).
- base = None
- for machine in self.GetMachines(label):
- # Make sure the machine's checksums are calculated.
- if not machine.machine_checksum:
- machine.SetUpChecksumInfo()
- # Use the first machine as the basis for comparison.
- if not base:
- base = machine
- # Make sure this machine's checksum matches our 'common' checksum.
- if base.machine_checksum != machine.machine_checksum:
- # Found a difference. Fatal error.
- # Extract non-matching part and report it.
- for mismatch_index in range(len(base.checksum_string)):
- if (mismatch_index >= len(machine.checksum_string) or
- base.checksum_string[mismatch_index] !=
- machine.checksum_string[mismatch_index]):
- break
- # We want to show some context after the mismatch.
- end_ind = mismatch_index + 8
- # Print a mismatching string.
- raise BadChecksum(
- 'Machine checksums do not match!\n'
- 'Diff:\n'
- f'{base.name}: {base.checksum_string[:end_ind]}\n'
- f'{machine.name}: {machine.checksum_string[:end_ind]}\n'
- '\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n')
- self.machine_checksum[label.name] = base.machine_checksum
+ def ComputeCommonCheckSum(self, label):
+ # Since this is used for cache lookups before the machines have been
+ # compared/verified, check here to make sure they all have the same
+ # checksum (otherwise the cache lookup may not be valid).
+ base = None
+ for machine in self.GetMachines(label):
+ # Make sure the machine's checksums are calculated.
+ if not machine.machine_checksum:
+ machine.SetUpChecksumInfo()
+ # Use the first machine as the basis for comparison.
+ if not base:
+ base = machine
+ # Make sure this machine's checksum matches our 'common' checksum.
+ if base.machine_checksum != machine.machine_checksum:
+ # Found a difference. Fatal error.
+ # Extract non-matching part and report it.
+ for mismatch_index in range(len(base.checksum_string)):
+ if (
+ mismatch_index >= len(machine.checksum_string)
+ or base.checksum_string[mismatch_index]
+ != machine.checksum_string[mismatch_index]
+ ):
+ break
+ # We want to show some context after the mismatch.
+ end_ind = mismatch_index + 8
+ # Print a mismatching string.
+ raise BadChecksum(
+ "Machine checksums do not match!\n"
+ "Diff:\n"
+ f"{base.name}: {base.checksum_string[:end_ind]}\n"
+ f"{machine.name}: {machine.checksum_string[:end_ind]}\n"
+ "\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n"
+ )
+ self.machine_checksum[label.name] = base.machine_checksum
- def ComputeCommonCheckSumString(self, label):
- # The assumption is that this function is only called AFTER
- # ComputeCommonCheckSum, so there is no need to verify the machines
- # are the same here. If this is ever changed, this function should be
- # modified to verify that all the machines for a given label are the
- # same.
- for machine in self.GetMachines(label):
- if machine.checksum_string:
- self.machine_checksum_string[label.name] = machine.checksum_string
- break
+ def ComputeCommonCheckSumString(self, label):
+ # The assumption is that this function is only called AFTER
+ # ComputeCommonCheckSum, so there is no need to verify the machines
+ # are the same here. If this is ever changed, this function should be
+ # modified to verify that all the machines for a given label are the
+ # same.
+ for machine in self.GetMachines(label):
+ if machine.checksum_string:
+ self.machine_checksum_string[
+ label.name
+ ] = machine.checksum_string
+ break
- def _TryToLockMachine(self, cros_machine):
- with self._lock:
- assert cros_machine, "Machine can't be None"
- for m in self._machines:
- if m.name == cros_machine.name:
- return
- locked = True
- if self.locks_dir:
- locked = file_lock_machine.Machine(cros_machine.name,
- self.locks_dir).Lock(
- True, sys.argv[0])
- if locked:
- self._machines.append(cros_machine)
- command = 'cat %s' % CHECKSUM_FILE
- ret, out, _ = self.ce.CrosRunCommandWOutput(
- command,
- chromeos_root=self.chromeos_root,
- machine=cros_machine.name)
- if ret == 0:
- cros_machine.checksum = out.strip()
- elif self.locks_dir:
- self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name)
+ def _TryToLockMachine(self, cros_machine):
+ with self._lock:
+ assert cros_machine, "Machine can't be None"
+ for m in self._machines:
+ if m.name == cros_machine.name:
+ return
+ locked = True
+ if self.locks_dir:
+ locked = file_lock_machine.Machine(
+ cros_machine.name, self.locks_dir
+ ).Lock(True, sys.argv[0])
+ if locked:
+ self._machines.append(cros_machine)
+ command = "cat %s" % CHECKSUM_FILE
+ ret, out, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ chromeos_root=self.chromeos_root,
+ machine=cros_machine.name,
+ )
+ if ret == 0:
+ cros_machine.checksum = out.strip()
+ elif self.locks_dir:
+ self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name)
- # This is called from single threaded mode.
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
+ # This is called from single threaded mode.
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
- if self.log_level != 'verbose':
- self.logger.LogOutput('Setting up remote access to %s' % machine_name)
- self.logger.LogOutput('Checking machine characteristics for %s' %
- machine_name)
- cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
- if cm.machine_checksum:
- self._all_machines.append(cm)
+ if self.log_level != "verbose":
+ self.logger.LogOutput(
+ "Setting up remote access to %s" % machine_name
+ )
+ self.logger.LogOutput(
+ "Checking machine characteristics for %s" % machine_name
+ )
+ cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
+ if cm.machine_checksum:
+ self._all_machines.append(cm)
- def RemoveMachine(self, machine_name):
- with self._lock:
- self._machines = [m for m in self._machines if m.name != machine_name]
- if self.locks_dir:
- res = file_lock_machine.Machine(machine_name,
- self.locks_dir).Unlock(True)
- if not res:
- self.logger.LogError("Could not unlock machine: '%s'." % machine_name)
+ def RemoveMachine(self, machine_name):
+ with self._lock:
+ self._machines = [
+ m for m in self._machines if m.name != machine_name
+ ]
+ if self.locks_dir:
+ res = file_lock_machine.Machine(
+ machine_name, self.locks_dir
+ ).Unlock(True)
+ if not res:
+ self.logger.LogError(
+ "Could not unlock machine: '%s'." % machine_name
+ )
- def ForceSameImageToAllMachines(self, label):
- machines = self.GetMachines(label)
- for m in machines:
- self.ImageMachine(m, label)
- m.SetUpChecksumInfo()
-
- def AcquireMachine(self, label):
- image_checksum = label.checksum
- machines = self.GetMachines(label)
- check_interval_time = 120
- with self._lock:
- # Lazily external lock machines
- while self.acquire_timeout >= 0:
+ def ForceSameImageToAllMachines(self, label):
+ machines = self.GetMachines(label)
for m in machines:
- new_machine = m not in self._all_machines
- self._TryToLockMachine(m)
- if new_machine:
- m.released_time = time.time()
- if self.GetAvailableMachines(label):
- break
- sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
- time.sleep(sleep_time)
- self.acquire_timeout -= sleep_time
+ self.ImageMachine(m, label)
+ m.SetUpChecksumInfo()
- if self.acquire_timeout < 0:
- self.logger.LogFatal('Could not acquire any of the '
- "following machines: '%s'" %
- ', '.join(machine.name for machine in machines))
+ def AcquireMachine(self, label):
+ image_checksum = label.checksum
+ machines = self.GetMachines(label)
+ check_interval_time = 120
+ with self._lock:
+ # Lazily external lock machines
+ while self.acquire_timeout >= 0:
+ for m in machines:
+ new_machine = m not in self._all_machines
+ self._TryToLockMachine(m)
+ if new_machine:
+ m.released_time = time.time()
+ if self.GetAvailableMachines(label):
+ break
+ sleep_time = max(
+ 1, min(self.acquire_timeout, check_interval_time)
+ )
+ time.sleep(sleep_time)
+ self.acquire_timeout -= sleep_time
+ if self.acquire_timeout < 0:
+ self.logger.LogFatal(
+ "Could not acquire any of the "
+ "following machines: '%s'"
+ % ", ".join(machine.name for machine in machines)
+ )
-### for m in self._machines:
-### if (m.locked and time.time() - m.released_time < 10 and
-### m.checksum == image_checksum):
-### return None
- unlocked_machines = [
- machine for machine in self.GetAvailableMachines(label)
- if not machine.locked
- ]
- for m in unlocked_machines:
- if image_checksum and m.checksum == image_checksum:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- for m in unlocked_machines:
- if not m.checksum:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- # This logic ensures that threads waiting on a machine will get a machine
- # with a checksum equal to their image over other threads. This saves time
- # when crosperf initially assigns the machines to threads by minimizing
- # the number of re-images.
- # TODO(asharif): If we centralize the thread-scheduler, we wont need this
- # code and can implement minimal reimaging code more cleanly.
- for m in unlocked_machines:
- if time.time() - m.released_time > 15:
- # The release time gap is too large, so it is probably in the start
- # stage, we need to reset the released_time.
- m.released_time = time.time()
- elif time.time() - m.released_time > 8:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- return None
+ ### for m in self._machines:
+ ### if (m.locked and time.time() - m.released_time < 10 and
+ ### m.checksum == image_checksum):
+ ### return None
+ unlocked_machines = [
+ machine
+ for machine in self.GetAvailableMachines(label)
+ if not machine.locked
+ ]
+ for m in unlocked_machines:
+ if image_checksum and m.checksum == image_checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ for m in unlocked_machines:
+ if not m.checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ # This logic ensures that threads waiting on a machine will get a machine
+ # with a checksum equal to their image over other threads. This saves time
+ # when crosperf initially assigns the machines to threads by minimizing
+ # the number of re-images.
+ # TODO(asharif): If we centralize the thread-scheduler, we wont need this
+ # code and can implement minimal reimaging code more cleanly.
+ for m in unlocked_machines:
+ if time.time() - m.released_time > 15:
+ # The release time gap is too large, so it is probably in the start
+ # stage, we need to reset the released_time.
+ m.released_time = time.time()
+ elif time.time() - m.released_time > 8:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ return None
- def GetAvailableMachines(self, label=None):
- if not label:
- return self._machines
- return [m for m in self._machines if m.name in label.remote]
+ def GetAvailableMachines(self, label=None):
+ if not label:
+ return self._machines
+ return [m for m in self._machines if m.name in label.remote]
- def GetMachines(self, label=None):
- if not label:
- return self._all_machines
- return [m for m in self._all_machines if m.name in label.remote]
+ def GetMachines(self, label=None):
+ if not label:
+ return self._all_machines
+ return [m for m in self._all_machines if m.name in label.remote]
- def ReleaseMachine(self, machine):
- with self._lock:
- for m in self._machines:
- if machine.name == m.name:
- assert m.locked, 'Tried to double-release %s' % m.name
- m.released_time = time.time()
- m.locked = False
- m.status = 'Available'
- break
+ def ReleaseMachine(self, machine):
+ with self._lock:
+ for m in self._machines:
+ if machine.name == m.name:
+ assert m.locked, "Tried to double-release %s" % m.name
+ m.released_time = time.time()
+ m.locked = False
+ m.status = "Available"
+ break
- def Cleanup(self):
- with self._lock:
- # Unlock all machines (via file lock)
- for m in self._machines:
- res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True)
+ def Cleanup(self):
+ with self._lock:
+ # Unlock all machines (via file lock)
+ for m in self._machines:
+ res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(
+ True
+ )
- if not res:
- self.logger.LogError("Could not unlock machine: '%s'." % m.name)
+ if not res:
+ self.logger.LogError(
+ "Could not unlock machine: '%s'." % m.name
+ )
- def __str__(self):
- with self._lock:
- l = ['MachineManager Status:'] + [str(m) for m in self._machines]
- return '\n'.join(l)
+ def __str__(self):
+ with self._lock:
+ l = ["MachineManager Status:"] + [str(m) for m in self._machines]
+ return "\n".join(l)
- def AsString(self):
- with self._lock:
- stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
- header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
- 'Checksum')
- table = [header]
- for m in self._machines:
- if m.test_run:
- test_name = m.test_run.name
- test_status = m.test_run.timeline.GetLastEvent()
- else:
- test_name = ''
- test_status = ''
+ def AsString(self):
+ with self._lock:
+ stringify_fmt = "%-30s %-10s %-4s %-25s %-32s"
+ header = stringify_fmt % (
+ "Machine",
+ "Thread",
+ "Lock",
+ "Status",
+ "Checksum",
+ )
+ table = [header]
+ for m in self._machines:
+ if m.test_run:
+ test_name = m.test_run.name
+ test_status = m.test_run.timeline.GetLastEvent()
+ else:
+ test_name = ""
+ test_status = ""
- try:
- machine_string = stringify_fmt % (m.name, test_name, m.locked,
- test_status, m.checksum)
- except ValueError:
- machine_string = ''
- table.append(machine_string)
- return 'Machine Status:\n%s' % '\n'.join(table)
+ try:
+ machine_string = stringify_fmt % (
+ m.name,
+ test_name,
+ m.locked,
+ test_status,
+ m.checksum,
+ )
+ except ValueError:
+ machine_string = ""
+ table.append(machine_string)
+ return "Machine Status:\n%s" % "\n".join(table)
- def GetAllCPUInfo(self, labels):
- """Get cpuinfo for labels, merge them if their cpuinfo are the same."""
- dic = collections.defaultdict(list)
- for label in labels:
- for machine in self._all_machines:
- if machine.name in label.remote:
- dic[machine.cpuinfo].append(label.name)
- break
- output_segs = []
- for key, v in dic.items():
- output = ' '.join(v)
- output += '\n-------------------\n'
- output += key
- output += '\n\n\n'
- output_segs.append(output)
- return ''.join(output_segs)
+ def GetAllCPUInfo(self, labels):
+ """Get cpuinfo for labels, merge them if their cpuinfo are the same."""
+ dic = collections.defaultdict(list)
+ for label in labels:
+ for machine in self._all_machines:
+ if machine.name in label.remote:
+ dic[machine.cpuinfo].append(label.name)
+ break
+ output_segs = []
+ for key, v in dic.items():
+ output = " ".join(v)
+ output += "\n-------------------\n"
+ output += key
+ output += "\n\n\n"
+ output_segs.append(output)
+ return "".join(output_segs)
- def GetAllMachines(self):
- return self._all_machines
+ def GetAllMachines(self):
+ return self._all_machines
class MockCrosMachine(CrosMachine):
- """Mock cros machine class."""
- # pylint: disable=super-init-not-called
+ """Mock cros machine class."""
- MEMINFO_STRING = """MemTotal: 3990332 kB
+ # pylint: disable=super-init-not-called
+
+ MEMINFO_STRING = """MemTotal: 3990332 kB
MemFree: 2608396 kB
Buffers: 147168 kB
Cached: 811560 kB
@@ -585,7 +645,7 @@
DirectMap2M: 4096000 kB
"""
- CPUINFO_STRING = """processor: 0
+ CPUINFO_STRING = """processor: 0
vendor_id: GenuineIntel
cpu family: 6
model: 42
@@ -638,91 +698,97 @@
power management:
"""
- def __init__(self, name, chromeos_root, log_level):
- self.name = name
- self.image = None
- self.checksum = None
- self.locked = False
- self.released_time = time.time()
- self.test_run = None
- self.chromeos_root = chromeos_root
- self.checksum_string = re.sub(r'\d', '', name)
- # In test, we assume "lumpy1", "lumpy2" are the same machine.
- self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
- self.log_level = log_level
- self.label = None
- self.cooldown_wait_time = 0
- self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
- self._GetCPUInfo()
+ def __init__(self, name, chromeos_root, log_level):
+ self.name = name
+ self.image = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.checksum_string = re.sub(r"\d", "", name)
+ # In test, we assume "lumpy1", "lumpy2" are the same machine.
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.log_level = log_level
+ self.label = None
+ self.cooldown_wait_time = 0
+ self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
+ self._GetCPUInfo()
- def IsReachable(self):
- return True
+ def IsReachable(self):
+ return True
- def _GetMemoryInfo(self):
- self.meminfo = self.MEMINFO_STRING
- self._ParseMemoryInfo()
+ def _GetMemoryInfo(self):
+ self.meminfo = self.MEMINFO_STRING
+ self._ParseMemoryInfo()
- def _GetCPUInfo(self):
- self.cpuinfo = self.CPUINFO_STRING
+ def _GetCPUInfo(self):
+ self.cpuinfo = self.CPUINFO_STRING
class MockMachineManager(MachineManager):
- """Mock machine manager class."""
+ """Mock machine manager class."""
- def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
- super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout,
- log_level, locks_dir)
+ def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
+ super(MockMachineManager, self).__init__(
+ chromeos_root, acquire_timeout, log_level, locks_dir
+ )
- def _TryToLockMachine(self, cros_machine):
- self._machines.append(cros_machine)
- cros_machine.checksum = ''
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ""
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
- # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
- # machine is unreachable, then its machine_checksum is None. Here we
- # cannot do this, because machine_checksum is always faked, so we directly
- # test cm.IsReachable, which is properly mocked.
- if cm.IsReachable():
- self._all_machines.append(cm)
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
+ cm = MockCrosMachine(
+ machine_name, self.chromeos_root, self.log_level
+ )
+ assert cm.machine_checksum, (
+ "Could not find checksum for machine %s" % machine_name
+ )
+ # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
+ # machine is unreachable, then its machine_checksum is None. Here we
+ # cannot do this, because machine_checksum is always faked, so we directly
+ # test cm.IsReachable, which is properly mocked.
+ if cm.IsReachable():
+ self._all_machines.append(cm)
- def GetChromeVersion(self, machine):
- return 'Mock Chrome Version R50'
+ def GetChromeVersion(self, machine):
+ return "Mock Chrome Version R50"
- def AcquireMachine(self, label):
- for machine in self._all_machines:
- if not machine.locked:
- machine.locked = True
- return machine
- return None
+ def AcquireMachine(self, label):
+ for machine in self._all_machines:
+ if not machine.locked:
+ machine.locked = True
+ return machine
+ return None
- def ImageMachine(self, machine, label):
- if machine or label:
- return 0
- return 1
+ def ImageMachine(self, machine, label):
+ if machine or label:
+ return 0
+ return 1
- def ReleaseMachine(self, machine):
- machine.locked = False
+ def ReleaseMachine(self, machine):
+ machine.locked = False
- def GetMachines(self, label=None):
- return self._all_machines
+ def GetMachines(self, label=None):
+ return self._all_machines
- def GetAvailableMachines(self, label=None):
- return self._all_machines
+ def GetAvailableMachines(self, label=None):
+ return self._all_machines
- def ForceSameImageToAllMachines(self, label=None):
- return 0
+ def ForceSameImageToAllMachines(self, label=None):
+ return 0
- def ComputeCommonCheckSum(self, label=None):
- common_checksum = 12345
- for machine in self.GetMachines(label):
- machine.machine_checksum = common_checksum
- self.machine_checksum[label.name] = common_checksum
+ def ComputeCommonCheckSum(self, label=None):
+ common_checksum = 12345
+ for machine in self.GetMachines(label):
+ machine.machine_checksum = common_checksum
+ self.machine_checksum[label.name] = common_checksum
- def GetAllMachines(self):
- return self._all_machines
+ def GetAllMachines(self):
+ return self._all_machines
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index f47cc88..6324a22 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -1,493 +1,574 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for machine_manager."""
-from __future__ import print_function
+import hashlib
import os.path
import time
-import hashlib
import unittest
import unittest.mock as mock
-import label
-import machine_manager
-import image_checksummer
-import test_flag
-
from benchmark import Benchmark
from benchmark_run import MockBenchmarkRun
from cros_utils import command_executer
from cros_utils import logger
+import image_checksummer
+import label
+import machine_manager
+import test_flag
+
# pylint: disable=protected-access
class MyMachineManager(machine_manager.MachineManager):
- """Machine manager for test."""
+ """Machine manager for test."""
- def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root, 0, 'average', '')
+ def __init__(self, chromeos_root):
+ super(MyMachineManager, self).__init__(chromeos_root, 0, "average", "")
- def _TryToLockMachine(self, cros_machine):
- self._machines.append(cros_machine)
- cros_machine.checksum = ''
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ""
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
- 'average')
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
- self._all_machines.append(cm)
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
+ cm = machine_manager.MockCrosMachine(
+ machine_name, self.chromeos_root, "average"
+ )
+ assert cm.machine_checksum, (
+ "Could not find checksum for machine %s" % machine_name
+ )
+ self._all_machines.append(cm)
-CHROMEOS_ROOT = '/tmp/chromeos-root'
-MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
-LABEL_LUMPY = label.MockLabel('lumpy', 'build', 'lumpy_chromeos_image',
- 'autotest_dir', 'debug_dir', CHROMEOS_ROOT,
- 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'],
- '', '', False, 'average', 'gcc', False, None)
-LABEL_MIX = label.MockLabel('mix', 'build', 'chromeos_image', 'autotest_dir',
- 'debug_dir', CHROMEOS_ROOT, 'mix',
- ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average', 'gcc', False, None)
+CHROMEOS_ROOT = "/tmp/chromeos-root"
+MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"]
+LABEL_LUMPY = label.MockLabel(
+ "lumpy",
+ "build",
+ "lumpy_chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ CHROMEOS_ROOT,
+ "lumpy",
+ ["lumpy1", "lumpy2", "lumpy3", "lumpy4"],
+ "",
+ "",
+ False,
+ "average",
+ "gcc",
+ False,
+ None,
+)
+LABEL_MIX = label.MockLabel(
+ "mix",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ CHROMEOS_ROOT,
+ "mix",
+ ["daisy1", "daisy2", "lumpy3", "lumpy4"],
+ "",
+ "",
+ False,
+ "average",
+ "gcc",
+ False,
+ None,
+)
class MachineManagerTest(unittest.TestCase):
- """Test for machine manager class."""
+ """Test for machine manager class."""
- msgs = []
- image_log = []
- log_fatal_msgs = []
- fake_logger_count = 0
- fake_logger_msgs = []
+ msgs = []
+ image_log = []
+ log_fatal_msgs = []
+ fake_logger_count = 0
+ fake_logger_msgs = []
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
-
- mock_logger = mock.Mock(spec=logger.Logger)
-
- mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
-
- @mock.patch.object(os.path, 'isdir')
-
- # pylint: disable=arguments-differ
- def setUp(self, mock_isdir):
-
- mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
- 'average', None,
- self.mock_cmd_exec,
- self.mock_logger)
-
- self.mock_lumpy1.name = 'lumpy1'
- self.mock_lumpy2.name = 'lumpy2'
- self.mock_lumpy3.name = 'lumpy3'
- self.mock_lumpy4.name = 'lumpy4'
- self.mock_daisy1.name = 'daisy1'
- self.mock_daisy2.name = 'daisy2'
- self.mock_lumpy1.machine_checksum = 'lumpy123'
- self.mock_lumpy2.machine_checksum = 'lumpy123'
- self.mock_lumpy3.machine_checksum = 'lumpy123'
- self.mock_lumpy4.machine_checksum = 'lumpy123'
- self.mock_daisy1.machine_checksum = 'daisy12'
- self.mock_daisy2.machine_checksum = 'daisy12'
- self.mock_lumpy1.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy2.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy3.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy4.checksum_string = 'lumpy_checksum_str'
- self.mock_daisy1.checksum_string = 'daisy_checksum_str'
- self.mock_daisy2.checksum_string = 'daisy_checksum_str'
- self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info'
- self.mock_daisy1.cpuinfo = 'daisy_cpu_info'
- self.mock_daisy2.cpuinfo = 'daisy_cpu_info'
- self.mm._all_machines.append(self.mock_daisy1)
- self.mm._all_machines.append(self.mock_daisy2)
- self.mm._all_machines.append(self.mock_lumpy1)
- self.mm._all_machines.append(self.mock_lumpy2)
- self.mm._all_machines.append(self.mock_lumpy3)
-
- def testGetMachines(self):
- manager = MyMachineManager(CHROMEOS_ROOT)
- for m in MACHINE_NAMES:
- manager.AddMachine(m)
- names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
- self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3'])
-
- def testGetAvailableMachines(self):
- manager = MyMachineManager(CHROMEOS_ROOT)
- for m in MACHINE_NAMES:
- manager.AddMachine(m)
- for m in manager._all_machines:
- if int(m.name[-1]) % 2:
- manager._TryToLockMachine(m)
- names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
- self.assertEqual(names, ['lumpy1', 'lumpy3'])
-
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd,
- mock_sleep):
-
- def FakeMD5Checksum(_input_str):
- return 'machine_fake_md5_checksum'
-
- self.fake_logger_count = 0
- self.fake_logger_msgs = []
-
- def FakeLogOutput(msg):
- self.fake_logger_count += 1
- self.fake_logger_msgs.append(msg)
-
- def ResetValues():
- self.fake_logger_count = 0
- self.fake_logger_msgs = []
- mock_run_cmd.reset_mock()
- mock_run_croscmd.reset_mock()
- mock_checksummer.reset_mock()
- mock_sleep.reset_mock()
- machine.checksum = 'fake_md5_checksum'
- self.mm.checksum = None
- self.mm.num_reimages = 0
-
- self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd
- self.mock_cmd_exec.RunCommand = mock_run_cmd
-
- self.mm.logger.LogOutput = FakeLogOutput
- machine = self.mock_lumpy1
- machine._GetMD5Checksum = FakeMD5Checksum
- machine.checksum = 'fake_md5_checksum'
- mock_checksummer.return_value = 'fake_md5_checksum'
- self.mock_cmd_exec.log_level = 'verbose'
-
- test_flag.SetTestMode(True)
- # Test 1: label.image_type == "local"
- LABEL_LUMPY.image_type = 'local'
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_run_cmd.call_count, 0)
- self.assertEqual(mock_run_croscmd.call_count, 0)
-
- # Test 2: label.image_type == "trybot"
- ResetValues()
- LABEL_LUMPY.image_type = 'trybot'
- mock_run_cmd.return_value = 0
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_run_croscmd.call_count, 0)
- self.assertEqual(mock_checksummer.call_count, 0)
-
- # Test 3: label.image_type is neither local nor trybot; retval from
- # RunCommand is 1, i.e. image_chromeos fails...
- ResetValues()
- LABEL_LUMPY.image_type = 'other'
- mock_run_cmd.return_value = 1
- try:
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- except RuntimeError:
- self.assertEqual(mock_checksummer.call_count, 0)
- self.assertEqual(mock_run_cmd.call_count, 2)
- self.assertEqual(mock_run_croscmd.call_count, 1)
- self.assertEqual(mock_sleep.call_count, 1)
- image_call_args_str = mock_run_cmd.call_args[0][0]
- image_call_args = image_call_args_str.split(' ')
- self.assertEqual(image_call_args[0], 'python')
- self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
- image_call_args = image_call_args[2:]
- self.assertEqual(image_call_args, [
- '--chromeos_root=/tmp/chromeos-root', '--image=lumpy_chromeos_image',
- '--image_args=', '--remote=lumpy1', '--logging_level=average',
- '--board=lumpy'
- ])
- self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
-
- # Test 4: Everything works properly. Trybot image type.
- ResetValues()
- LABEL_LUMPY.image_type = 'trybot'
- mock_run_cmd.return_value = 0
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_checksummer.call_count, 0)
- self.assertEqual(mock_run_croscmd.call_count, 0)
- self.assertEqual(mock_sleep.call_count, 0)
-
- def test_compute_common_checksum(self):
- self.mm.machine_checksum = {}
- self.mm.ComputeCommonCheckSum(LABEL_LUMPY)
- self.assertEqual(self.mm.machine_checksum['lumpy'], 'lumpy123')
- self.assertEqual(len(self.mm.machine_checksum), 1)
-
- self.mm.machine_checksum = {}
- self.assertRaisesRegex(machine_manager.BadChecksum, r'daisy.*\n.*lumpy',
- self.mm.ComputeCommonCheckSum, LABEL_MIX)
-
- def test_compute_common_checksum_string(self):
- self.mm.machine_checksum_string = {}
- self.mm.ComputeCommonCheckSumString(LABEL_LUMPY)
- self.assertEqual(len(self.mm.machine_checksum_string), 1)
- self.assertEqual(self.mm.machine_checksum_string['lumpy'],
- 'lumpy_checksum_str')
-
- self.mm.machine_checksum_string = {}
- self.mm.ComputeCommonCheckSumString(LABEL_MIX)
- self.assertEqual(len(self.mm.machine_checksum_string), 1)
- self.assertEqual(self.mm.machine_checksum_string['mix'],
- 'daisy_checksum_str')
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_try_to_lock_machine(self, mock_cros_runcmd):
- mock_cros_runcmd.return_value = [0, 'false_lock_checksum', '']
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- self.mm._machines = []
- self.mm._TryToLockMachine(self.mock_lumpy1)
- self.assertEqual(len(self.mm._machines), 1)
- self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
- self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum')
- self.assertEqual(mock_cros_runcmd.call_count, 1)
- cmd_str = mock_cros_runcmd.call_args[0][0]
- self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file')
- args_dict = mock_cros_runcmd.call_args[1]
- self.assertEqual(len(args_dict), 2)
- self.assertEqual(args_dict['machine'], self.mock_lumpy1.name)
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
-
- @mock.patch.object(machine_manager, 'CrosMachine')
- def test_add_machine(self, mock_machine):
-
- mock_machine.machine_checksum = 'daisy123'
- self.assertEqual(len(self.mm._all_machines), 5)
- self.mm.AddMachine('daisy3')
- self.assertEqual(len(self.mm._all_machines), 6)
-
- self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1')
-
- def test_remove_machine(self):
- self.mm._machines = self.mm._all_machines
- self.assertTrue(self.mock_lumpy2 in self.mm._machines)
- self.mm.RemoveMachine(self.mock_lumpy2.name)
- self.assertFalse(self.mock_lumpy2 in self.mm._machines)
-
- def test_force_same_image_to_all_machines(self):
- self.image_log = []
-
- def FakeImageMachine(machine, label_arg):
- image = label_arg.chromeos_image
- self.image_log.append('Pushed %s onto %s' % (image, machine.name))
-
- def FakeSetUpChecksumInfo():
- pass
-
- self.mm.ImageMachine = FakeImageMachine
- self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo
- self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo
- self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo
-
- self.mm.ForceSameImageToAllMachines(LABEL_LUMPY)
- self.assertEqual(len(self.image_log), 3)
- self.assertEqual(self.image_log[0],
- 'Pushed lumpy_chromeos_image onto lumpy1')
- self.assertEqual(self.image_log[1],
- 'Pushed lumpy_chromeos_image onto lumpy2')
- self.assertEqual(self.image_log[2],
- 'Pushed lumpy_chromeos_image onto lumpy3')
-
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- @mock.patch.object(hashlib, 'md5')
- def test_acquire_machine(self, mock_md5, mock_checksum):
-
- self.msgs = []
- self.log_fatal_msgs = []
-
- def FakeLock(machine):
- self.msgs.append('Tried to lock %s' % machine.name)
-
- def FakeLogFatal(msg):
- self.log_fatal_msgs.append(msg)
-
- self.mm._TryToLockMachine = FakeLock
- self.mm.logger.LogFatal = FakeLogFatal
-
- mock_md5.return_value = '123456'
- mock_checksum.return_value = 'fake_md5_checksum'
-
- self.mm._machines = self.mm._all_machines
- self.mock_lumpy1.locked = True
- self.mock_lumpy2.locked = True
- self.mock_lumpy3.locked = False
- self.mock_lumpy3.checksum = 'fake_md5_checksum'
- self.mock_daisy1.locked = True
- self.mock_daisy2.locked = False
- self.mock_daisy2.checksum = 'fake_md5_checksum'
-
- self.mock_lumpy1.released_time = time.time()
- self.mock_lumpy2.released_time = time.time()
- self.mock_lumpy3.released_time = time.time()
- self.mock_daisy1.released_time = time.time()
- self.mock_daisy2.released_time = time.time()
-
- # Test 1. Basic test. Acquire lumpy3.
- self.mm.AcquireMachine(LABEL_LUMPY)
- m = self.mock_lumpy1
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
- self.assertEqual(mock_md5.call_count, 0)
- self.assertEqual(self.msgs, [
- 'Tried to lock lumpy1', 'Tried to lock lumpy2', 'Tried to lock lumpy3'
- ])
-
- # Test the second return statment (machine is unlocked, has no checksum)
- save_locked = self.mock_lumpy1.locked
- self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = None
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
-
- # Test the third return statement:
- # - machine is unlocked
- # - checksums don't match
- # - current time minus release time is > 20.
- self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = '123'
- self.mock_lumpy1.released_time = time.time() - 8
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
-
- # Test all machines are already locked.
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertIsNone(m)
-
- # Restore values of mock_lumpy1, so other tests succeed.
- self.mock_lumpy1.locked = save_locked
- self.mock_lumpy1.checksum = '123'
-
- def test_get_available_machines(self):
- self.mm._machines = self.mm._all_machines
-
- machine_list = self.mm.GetAvailableMachines()
- self.assertEqual(machine_list, self.mm._all_machines)
-
- machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
- self.assertEqual(machine_list,
- [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
-
- machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
- self.assertEqual(machine_list,
- [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
-
- def test_get_machines(self):
- machine_list = self.mm.GetMachines()
- self.assertEqual(machine_list, self.mm._all_machines)
-
- machine_list = self.mm.GetMachines(LABEL_MIX)
- self.assertEqual(machine_list,
- [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
-
- machine_list = self.mm.GetMachines(LABEL_LUMPY)
- self.assertEqual(machine_list,
- [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
-
- def test_release_machines(self):
-
- self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
-
- self.mock_lumpy1.locked = True
- self.mock_daisy2.locked = True
-
- self.assertTrue(self.mock_lumpy1.locked)
- self.mm.ReleaseMachine(self.mock_lumpy1)
- self.assertFalse(self.mock_lumpy1.locked)
- self.assertEqual(self.mock_lumpy1.status, 'Available')
-
- self.assertTrue(self.mock_daisy2.locked)
- self.mm.ReleaseMachine(self.mock_daisy2)
- self.assertFalse(self.mock_daisy2.locked)
- self.assertEqual(self.mock_daisy2.status, 'Available')
-
- # Test double-relase...
- self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1)
-
- def test_cleanup(self):
- self.mock_logger.reset_mock()
- self.mm.Cleanup()
- self.assertEqual(self.mock_logger.call_count, 0)
-
- OUTPUT_STR = ('Machine Status:\nMachine Thread '
- 'Lock Status Checksum'
- ' \nlumpy1 test '
- 'run True PENDING 123'
- ' \nlumpy2 '
- 'test run False PENDING 123'
- ' \nlumpy3 '
- 'test run False PENDING 123'
- ' \ndaisy1 '
- 'test run False PENDING 678'
- ' \ndaisy2 '
- 'test run True PENDING 678'
- ' ')
-
- def test_as_string(self):
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_logger = mock.Mock(spec=logger.Logger)
- bench = Benchmark(
- 'page_cycler_v2.netsim.top_10', # name
- 'page_cycler_v2.netsim.top_10', # test_name
- '', # test_args
- 1, # iteratins
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
+ mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
- test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
- mock_logger, 'verbose', '', {})
+ @mock.patch.object(os.path, "isdir")
- self.mm._machines = [
- self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1,
- self.mock_daisy2
- ]
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isdir):
- self.mock_lumpy1.test_run = test_run
- self.mock_lumpy2.test_run = test_run
- self.mock_lumpy3.test_run = test_run
- self.mock_daisy1.test_run = test_run
- self.mock_daisy2.test_run = test_run
+ mock_isdir.return_value = True
+ self.mm = machine_manager.MachineManager(
+ "/usr/local/chromeos",
+ 0,
+ "average",
+ None,
+ self.mock_cmd_exec,
+ self.mock_logger,
+ )
- self.mock_lumpy1.locked = True
- self.mock_lumpy2.locked = False
- self.mock_lumpy3.locked = False
- self.mock_daisy1.locked = False
- self.mock_daisy2.locked = True
+ self.mock_lumpy1.name = "lumpy1"
+ self.mock_lumpy2.name = "lumpy2"
+ self.mock_lumpy3.name = "lumpy3"
+ self.mock_lumpy4.name = "lumpy4"
+ self.mock_daisy1.name = "daisy1"
+ self.mock_daisy2.name = "daisy2"
+ self.mock_lumpy1.machine_checksum = "lumpy123"
+ self.mock_lumpy2.machine_checksum = "lumpy123"
+ self.mock_lumpy3.machine_checksum = "lumpy123"
+ self.mock_lumpy4.machine_checksum = "lumpy123"
+ self.mock_daisy1.machine_checksum = "daisy12"
+ self.mock_daisy2.machine_checksum = "daisy12"
+ self.mock_lumpy1.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy2.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy3.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy4.checksum_string = "lumpy_checksum_str"
+ self.mock_daisy1.checksum_string = "daisy_checksum_str"
+ self.mock_daisy2.checksum_string = "daisy_checksum_str"
+ self.mock_lumpy1.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy2.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy3.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy4.cpuinfo = "lumpy_cpu_info"
+ self.mock_daisy1.cpuinfo = "daisy_cpu_info"
+ self.mock_daisy2.cpuinfo = "daisy_cpu_info"
+ self.mm._all_machines.append(self.mock_daisy1)
+ self.mm._all_machines.append(self.mock_daisy2)
+ self.mm._all_machines.append(self.mock_lumpy1)
+ self.mm._all_machines.append(self.mock_lumpy2)
+ self.mm._all_machines.append(self.mock_lumpy3)
- self.mock_lumpy1.checksum = '123'
- self.mock_lumpy2.checksum = '123'
- self.mock_lumpy3.checksum = '123'
- self.mock_daisy1.checksum = '678'
- self.mock_daisy2.checksum = '678'
+ def testGetMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ["lumpy1", "lumpy2", "lumpy3"])
- output = self.mm.AsString()
- self.assertEqual(output, self.OUTPUT_STR)
+ def testGetAvailableMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ for m in manager._all_machines:
+ if int(m.name[-1]) % 2:
+ manager._TryToLockMachine(m)
+ names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ["lumpy1", "lumpy3"])
- def test_get_all_cpu_info(self):
- info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
- self.assertEqual(
- info, 'lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-'
- '------------------\ndaisy_cpu_info\n\n\n')
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ def test_image_machine(
+ self, mock_checksummer, mock_run_croscmd, mock_run_cmd, mock_sleep
+ ):
+ def FakeMD5Checksum(_input_str):
+ return "machine_fake_md5_checksum"
+
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+
+ def FakeLogOutput(msg):
+ self.fake_logger_count += 1
+ self.fake_logger_msgs.append(msg)
+
+ def ResetValues():
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+ mock_run_cmd.reset_mock()
+ mock_run_croscmd.reset_mock()
+ mock_checksummer.reset_mock()
+ mock_sleep.reset_mock()
+ machine.checksum = "fake_md5_checksum"
+ self.mm.checksum = None
+ self.mm.num_reimages = 0
+
+ self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd
+ self.mock_cmd_exec.RunCommand = mock_run_cmd
+
+ self.mm.logger.LogOutput = FakeLogOutput
+ machine = self.mock_lumpy1
+ machine._GetMD5Checksum = FakeMD5Checksum
+ machine.checksum = "fake_md5_checksum"
+ mock_checksummer.return_value = "fake_md5_checksum"
+ self.mock_cmd_exec.log_level = "verbose"
+
+ test_flag.SetTestMode(True)
+ # Test 1: label.image_type == "local"
+ LABEL_LUMPY.image_type = "local"
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_cmd.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+
+ # Test 2: label.image_type == "trybot"
+ ResetValues()
+ LABEL_LUMPY.image_type = "trybot"
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_checksummer.call_count, 0)
+
+ # Test 3: label.image_type is neither local nor trybot; retval from
+ # RunCommand is 1, i.e. image_chromeos fails...
+ ResetValues()
+ LABEL_LUMPY.image_type = "other"
+ mock_run_cmd.return_value = 1
+ try:
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ except RuntimeError:
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ self.assertEqual(mock_run_croscmd.call_count, 1)
+ self.assertEqual(mock_sleep.call_count, 1)
+ image_call_args_str = mock_run_cmd.call_args[0][0]
+ image_call_args = image_call_args_str.split(" ")
+ self.assertEqual(image_call_args[0], "python")
+ self.assertEqual(
+ image_call_args[1].split("/")[-1], "image_chromeos.pyc"
+ )
+ image_call_args = image_call_args[2:]
+ self.assertEqual(
+ image_call_args,
+ [
+ "--chromeos_root=/tmp/chromeos-root",
+ "--image=lumpy_chromeos_image",
+ "--image_args=",
+ "--remote=lumpy1",
+ "--logging_level=average",
+ "--board=lumpy",
+ ],
+ )
+ self.assertEqual(mock_run_croscmd.call_args[0][0], "reboot && exit")
+
+ # Test 4: Everything works properly. Trybot image type.
+ ResetValues()
+ LABEL_LUMPY.image_type = "trybot"
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_sleep.call_count, 0)
+
+ def test_compute_common_checksum(self):
+ self.mm.machine_checksum = {}
+ self.mm.ComputeCommonCheckSum(LABEL_LUMPY)
+ self.assertEqual(self.mm.machine_checksum["lumpy"], "lumpy123")
+ self.assertEqual(len(self.mm.machine_checksum), 1)
+
+ self.mm.machine_checksum = {}
+ self.assertRaisesRegex(
+ machine_manager.BadChecksum,
+ r"daisy.*\n.*lumpy",
+ self.mm.ComputeCommonCheckSum,
+ LABEL_MIX,
+ )
+
+ def test_compute_common_checksum_string(self):
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_LUMPY)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(
+ self.mm.machine_checksum_string["lumpy"], "lumpy_checksum_str"
+ )
+
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_MIX)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(
+ self.mm.machine_checksum_string["mix"], "daisy_checksum_str"
+ )
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_try_to_lock_machine(self, mock_cros_runcmd):
+ mock_cros_runcmd.return_value = [0, "false_lock_checksum", ""]
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ self.mm._machines = []
+ self.mm._TryToLockMachine(self.mock_lumpy1)
+ self.assertEqual(len(self.mm._machines), 1)
+ self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
+ self.assertEqual(self.mock_lumpy1.checksum, "false_lock_checksum")
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ cmd_str = mock_cros_runcmd.call_args[0][0]
+ self.assertEqual(cmd_str, "cat /usr/local/osimage_checksum_file")
+ args_dict = mock_cros_runcmd.call_args[1]
+ self.assertEqual(len(args_dict), 2)
+ self.assertEqual(args_dict["machine"], self.mock_lumpy1.name)
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+
+ @mock.patch.object(machine_manager, "CrosMachine")
+ def test_add_machine(self, mock_machine):
+
+ mock_machine.machine_checksum = "daisy123"
+ self.assertEqual(len(self.mm._all_machines), 5)
+ self.mm.AddMachine("daisy3")
+ self.assertEqual(len(self.mm._all_machines), 6)
+
+ self.assertRaises(Exception, self.mm.AddMachine, "lumpy1")
+
+ def test_remove_machine(self):
+ self.mm._machines = self.mm._all_machines
+ self.assertTrue(self.mock_lumpy2 in self.mm._machines)
+ self.mm.RemoveMachine(self.mock_lumpy2.name)
+ self.assertFalse(self.mock_lumpy2 in self.mm._machines)
+
+ def test_force_same_image_to_all_machines(self):
+ self.image_log = []
+
+ def FakeImageMachine(machine, label_arg):
+ image = label_arg.chromeos_image
+ self.image_log.append("Pushed %s onto %s" % (image, machine.name))
+
+ def FakeSetUpChecksumInfo():
+ pass
+
+ self.mm.ImageMachine = FakeImageMachine
+ self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo
+
+ self.mm.ForceSameImageToAllMachines(LABEL_LUMPY)
+ self.assertEqual(len(self.image_log), 3)
+ self.assertEqual(
+ self.image_log[0], "Pushed lumpy_chromeos_image onto lumpy1"
+ )
+ self.assertEqual(
+ self.image_log[1], "Pushed lumpy_chromeos_image onto lumpy2"
+ )
+ self.assertEqual(
+ self.image_log[2], "Pushed lumpy_chromeos_image onto lumpy3"
+ )
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ @mock.patch.object(hashlib, "md5")
+ def test_acquire_machine(self, mock_md5, mock_checksum):
+
+ self.msgs = []
+ self.log_fatal_msgs = []
+
+ def FakeLock(machine):
+ self.msgs.append("Tried to lock %s" % machine.name)
+
+ def FakeLogFatal(msg):
+ self.log_fatal_msgs.append(msg)
+
+ self.mm._TryToLockMachine = FakeLock
+ self.mm.logger.LogFatal = FakeLogFatal
+
+ mock_md5.return_value = "123456"
+ mock_checksum.return_value = "fake_md5_checksum"
+
+ self.mm._machines = self.mm._all_machines
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = True
+ self.mock_lumpy3.locked = False
+ self.mock_lumpy3.checksum = "fake_md5_checksum"
+ self.mock_daisy1.locked = True
+ self.mock_daisy2.locked = False
+ self.mock_daisy2.checksum = "fake_md5_checksum"
+
+ self.mock_lumpy1.released_time = time.time()
+ self.mock_lumpy2.released_time = time.time()
+ self.mock_lumpy3.released_time = time.time()
+ self.mock_daisy1.released_time = time.time()
+ self.mock_daisy2.released_time = time.time()
+
+ # Test 1. Basic test. Acquire lumpy3.
+ self.mm.AcquireMachine(LABEL_LUMPY)
+ m = self.mock_lumpy1
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.assertEqual(mock_md5.call_count, 0)
+ self.assertEqual(
+ self.msgs,
+ [
+ "Tried to lock lumpy1",
+ "Tried to lock lumpy2",
+ "Tried to lock lumpy3",
+ ],
+ )
+
+ # Test the second return statment (machine is unlocked, has no checksum)
+ save_locked = self.mock_lumpy1.locked
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = None
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test the third return statement:
+ # - machine is unlocked
+ # - checksums don't match
+ # - current time minus release time is > 20.
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy1.released_time = time.time() - 8
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test all machines are already locked.
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertIsNone(m)
+
+ # Restore values of mock_lumpy1, so other tests succeed.
+ self.mock_lumpy1.locked = save_locked
+ self.mock_lumpy1.checksum = "123"
+
+ def test_get_available_machines(self):
+ self.mm._machines = self.mm._all_machines
+
+ machine_list = self.mm.GetAvailableMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
+ self.assertEqual(
+ machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]
+ )
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
+ self.assertEqual(
+ machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]
+ )
+
+ def test_get_machines(self):
+ machine_list = self.mm.GetMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetMachines(LABEL_MIX)
+ self.assertEqual(
+ machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]
+ )
+
+ machine_list = self.mm.GetMachines(LABEL_LUMPY)
+ self.assertEqual(
+ machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]
+ )
+
+ def test_release_machines(self):
+
+ self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
+
+ self.mock_lumpy1.locked = True
+ self.mock_daisy2.locked = True
+
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.mm.ReleaseMachine(self.mock_lumpy1)
+ self.assertFalse(self.mock_lumpy1.locked)
+ self.assertEqual(self.mock_lumpy1.status, "Available")
+
+ self.assertTrue(self.mock_daisy2.locked)
+ self.mm.ReleaseMachine(self.mock_daisy2)
+ self.assertFalse(self.mock_daisy2.locked)
+ self.assertEqual(self.mock_daisy2.status, "Available")
+
+ # Test double-relase...
+ self.assertRaises(
+ AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1
+ )
+
+ def test_cleanup(self):
+ self.mock_logger.reset_mock()
+ self.mm.Cleanup()
+ self.assertEqual(self.mock_logger.call_count, 0)
+
+ OUTPUT_STR = (
+ "Machine Status:\nMachine Thread "
+ "Lock Status Checksum"
+ " \nlumpy1 test "
+ "run True PENDING 123"
+ " \nlumpy2 "
+ "test run False PENDING 123"
+ " \nlumpy3 "
+ "test run False PENDING 123"
+ " \ndaisy1 "
+ "test run False PENDING 678"
+ " \ndaisy2 "
+ "test run True PENDING 678"
+ " "
+ )
+
+ def test_as_string(self):
+
+ mock_logger = mock.Mock(spec=logger.Logger)
+
+ bench = Benchmark(
+ "page_cycler_v2.netsim.top_10", # name
+ "page_cycler_v2.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iteratins
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
+
+ test_run = MockBenchmarkRun(
+ "test run",
+ bench,
+ LABEL_LUMPY,
+ 1,
+ [],
+ self.mm,
+ mock_logger,
+ "verbose",
+ "",
+ {},
+ )
+
+ self.mm._machines = [
+ self.mock_lumpy1,
+ self.mock_lumpy2,
+ self.mock_lumpy3,
+ self.mock_daisy1,
+ self.mock_daisy2,
+ ]
+
+ self.mock_lumpy1.test_run = test_run
+ self.mock_lumpy2.test_run = test_run
+ self.mock_lumpy3.test_run = test_run
+ self.mock_daisy1.test_run = test_run
+ self.mock_daisy2.test_run = test_run
+
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = False
+ self.mock_lumpy3.locked = False
+ self.mock_daisy1.locked = False
+ self.mock_daisy2.locked = True
+
+ self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy2.checksum = "123"
+ self.mock_lumpy3.checksum = "123"
+ self.mock_daisy1.checksum = "678"
+ self.mock_daisy2.checksum = "678"
+
+ output = self.mm.AsString()
+ self.assertEqual(output, self.OUTPUT_STR)
+
+ def test_get_all_cpu_info(self):
+ info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
+ self.assertEqual(
+ info,
+ "lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-"
+ "------------------\ndaisy_cpu_info\n\n\n",
+ )
MEMINFO_STRING = """MemTotal: 3990332 kB
@@ -580,35 +661,37 @@
power management:
"""
-CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: '
- '42model name: Intel(R) Celeron(R) CPU 867 @ '
- '1.30GHzstepping: 7microcode: 0x25cache size: 2048 '
- 'KBphysical id: 0siblings: 2cpu cores: 2'
- 'fpu: yesfpu_exception: yescpuid level: '
- '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep'
- ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse '
- 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc '
- 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc '
- 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 '
- 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt '
- 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts '
- 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: '
- '64cache_alignment: 64address sizes: 36 bits physical, 48 '
- 'bits virtualpower management:processor: 1vendor_id: '
- 'GenuineIntelcpu family: 6model: 42model name: Intel(R) '
- 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache'
- ' size: 2048 KBphysical id: 0siblings: 2cpu cores:'
- ' 2fpu: yesfpu_exception: yescpuid'
- ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 '
- 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx '
- 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm '
- 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology '
- 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl '
- 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic '
- 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt '
- 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush '
- 'size: 64cache_alignment: 64address sizes: 36 bits physical,'
- ' 48 bits virtualpower management: 4194304')
+CHECKSUM_STRING = (
+ "processor: 0vendor_id: GenuineIntelcpu family: 6model: "
+ "42model name: Intel(R) Celeron(R) CPU 867 @ "
+ "1.30GHzstepping: 7microcode: 0x25cache size: 2048 "
+ "KBphysical id: 0siblings: 2cpu cores: 2"
+ "fpu: yesfpu_exception: yescpuid level: "
+ "13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep"
+ " mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse "
+ "sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc "
+ "arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc "
+ "aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 "
+ "ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt "
+ "tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts "
+ "dts tpr_shadow vnmi flexpriority ept vpidclflush size: "
+ "64cache_alignment: 64address sizes: 36 bits physical, 48 "
+ "bits virtualpower management:processor: 1vendor_id: "
+ "GenuineIntelcpu family: 6model: 42model name: Intel(R) "
+ "Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache"
+ " size: 2048 KBphysical id: 0siblings: 2cpu cores:"
+ " 2fpu: yesfpu_exception: yescpuid"
+ " level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 "
+ "apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx "
+ "fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm "
+ "constant_tsc arch_perfmon pebs bts rep_good nopl xtopology "
+ "nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl "
+ "vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic "
+ "popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt "
+ "pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush "
+ "size: 64cache_alignment: 64address sizes: 36 bits physical,"
+ " 48 bits virtualpower management: 4194304"
+)
DUMP_VPD_STRING = """
"PBA_SN"="Pba.txt"
@@ -667,187 +750,212 @@
class CrosMachineTest(unittest.TestCase):
- """Test for CrosMachine class."""
+ """Test for CrosMachine class."""
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_init(self, mock_setup):
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_init(self, mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(cm.chromeos_root, '/usr/local/chromeos')
- self.assertEqual(cm.log_level, 'average')
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(cm.chromeos_root, "/usr/local/chromeos")
+ self.assertEqual(cm.log_level, "average")
- @mock.patch.object(machine_manager.CrosMachine, 'IsReachable')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo')
- @mock.patch.object(machine_manager.CrosMachine, '_GetCPUInfo')
- @mock.patch.object(machine_manager.CrosMachine,
- '_ComputeMachineChecksumString')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMachineID')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMD5Checksum')
- def test_setup_checksum_info(self, mock_md5sum, mock_machineid,
- mock_checkstring, mock_cpuinfo, mock_meminfo,
- mock_isreachable):
+ @mock.patch.object(machine_manager.CrosMachine, "IsReachable")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMemoryInfo")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetCPUInfo")
+ @mock.patch.object(
+ machine_manager.CrosMachine, "_ComputeMachineChecksumString"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMachineID")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMD5Checksum")
+ def test_setup_checksum_info(
+ self,
+ mock_md5sum,
+ mock_machineid,
+ mock_checkstring,
+ mock_cpuinfo,
+ mock_meminfo,
+ mock_isreachable,
+ ):
- # Test 1. Machine is not reachable; SetUpChecksumInfo is called via
- # __init__.
- mock_isreachable.return_value = False
- mock_md5sum.return_value = 'md5_checksum'
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.checksum_string = 'This is a checksum string.'
- cm.machine_id = 'machine_id1'
- self.assertEqual(mock_isreachable.call_count, 1)
- self.assertIsNone(cm.machine_checksum)
- self.assertEqual(mock_meminfo.call_count, 0)
+ # Test 1. Machine is not reachable; SetUpChecksumInfo is called via
+ # __init__.
+ mock_isreachable.return_value = False
+ mock_md5sum.return_value = "md5_checksum"
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.checksum_string = "This is a checksum string."
+ cm.machine_id = "machine_id1"
+ self.assertEqual(mock_isreachable.call_count, 1)
+ self.assertIsNone(cm.machine_checksum)
+ self.assertEqual(mock_meminfo.call_count, 0)
- # Test 2. Machine is reachable. Call explicitly.
- mock_isreachable.return_value = True
- cm.checksum_string = 'This is a checksum string.'
- cm.machine_id = 'machine_id1'
- cm.SetUpChecksumInfo()
- self.assertEqual(mock_isreachable.call_count, 2)
- self.assertEqual(mock_meminfo.call_count, 1)
- self.assertEqual(mock_cpuinfo.call_count, 1)
- self.assertEqual(mock_checkstring.call_count, 1)
- self.assertEqual(mock_machineid.call_count, 1)
- self.assertEqual(mock_md5sum.call_count, 2)
- self.assertEqual(cm.machine_checksum, 'md5_checksum')
- self.assertEqual(cm.machine_id_checksum, 'md5_checksum')
- self.assertEqual(mock_md5sum.call_args_list[0][0][0],
- 'This is a checksum string.')
- self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1')
+ # Test 2. Machine is reachable. Call explicitly.
+ mock_isreachable.return_value = True
+ cm.checksum_string = "This is a checksum string."
+ cm.machine_id = "machine_id1"
+ cm.SetUpChecksumInfo()
+ self.assertEqual(mock_isreachable.call_count, 2)
+ self.assertEqual(mock_meminfo.call_count, 1)
+ self.assertEqual(mock_cpuinfo.call_count, 1)
+ self.assertEqual(mock_checkstring.call_count, 1)
+ self.assertEqual(mock_machineid.call_count, 1)
+ self.assertEqual(mock_md5sum.call_count, 2)
+ self.assertEqual(cm.machine_checksum, "md5_checksum")
+ self.assertEqual(cm.machine_id_checksum, "md5_checksum")
+ self.assertEqual(
+ mock_md5sum.call_args_list[0][0][0], "This is a checksum string."
+ )
+ self.assertEqual(mock_md5sum.call_args_list[1][0][0], "machine_id1")
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_is_reachable(self, mock_setup, mock_run_cmd):
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_is_reachable(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- # Test 1. CrosRunCommand returns 1 (fail)
- mock_run_cmd.return_value = 1
- result = cm.IsReachable()
- self.assertFalse(result)
- self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(mock_run_cmd.call_count, 1)
+ # Test 1. CrosRunCommand returns 1 (fail)
+ mock_run_cmd.return_value = 1
+ result = cm.IsReachable()
+ self.assertFalse(result)
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(mock_run_cmd.call_count, 1)
- # Test 2. CrosRunCommand returns 0 (success)
- mock_run_cmd.return_value = 0
- result = cm.IsReachable()
- self.assertTrue(result)
- self.assertEqual(mock_run_cmd.call_count, 2)
- first_args = mock_run_cmd.call_args_list[0]
- second_args = mock_run_cmd.call_args_list[1]
- self.assertEqual(first_args[0], second_args[0])
- self.assertEqual(first_args[1], second_args[1])
- self.assertEqual(len(first_args[0]), 1)
- self.assertEqual(len(first_args[1]), 2)
- self.assertEqual(first_args[0][0], 'ls')
- args_dict = first_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
+ # Test 2. CrosRunCommand returns 0 (success)
+ mock_run_cmd.return_value = 0
+ result = cm.IsReachable()
+ self.assertTrue(result)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ first_args = mock_run_cmd.call_args_list[0]
+ second_args = mock_run_cmd.call_args_list[1]
+ self.assertEqual(first_args[0], second_args[0])
+ self.assertEqual(first_args[1], second_args[1])
+ self.assertEqual(len(first_args[0]), 1)
+ self.assertEqual(len(first_args[1]), 2)
+ self.assertEqual(first_args[0][0], "ls")
+ args_dict = first_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_parse_memory_info(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.meminfo = MEMINFO_STRING
- cm._ParseMemoryInfo()
- self.assertEqual(cm.phys_kbytes, 4194304)
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_parse_memory_info(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ self.assertEqual(cm.phys_kbytes, 4194304)
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_memory_info(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, MEMINFO_STRING, '']
- cm._GetMemoryInfo()
- self.assertEqual(mock_run_cmd.call_count, 1)
- call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], 'cat /proc/meminfo')
- args_dict = call_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- self.assertEqual(cm.meminfo, MEMINFO_STRING)
- self.assertEqual(cm.phys_kbytes, 4194304)
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_memory_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, MEMINFO_STRING, ""]
+ cm._GetMemoryInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], "cat /proc/meminfo")
+ args_dict = call_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+ self.assertEqual(cm.meminfo, MEMINFO_STRING)
+ self.assertEqual(cm.phys_kbytes, 4194304)
- mock_run_cmd.return_value = [1, MEMINFO_STRING, '']
- self.assertRaises(Exception, cm._GetMemoryInfo)
+ mock_run_cmd.return_value = [1, MEMINFO_STRING, ""]
+ self.assertRaises(Exception, cm._GetMemoryInfo)
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_cpu_info(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, CPUINFO_STRING, '']
- cm._GetCPUInfo()
- self.assertEqual(mock_run_cmd.call_count, 1)
- call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo')
- args_dict = call_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_cpu_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, CPUINFO_STRING, ""]
+ cm._GetCPUInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], "cat /proc/cpuinfo")
+ args_dict = call_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+ self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_compute_machine_checksum_string(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.cpuinfo = CPUINFO_STRING
- cm.meminfo = MEMINFO_STRING
- cm._ParseMemoryInfo()
- cm._ComputeMachineChecksumString()
- self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_compute_machine_checksum_string(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.cpuinfo = CPUINFO_STRING
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ cm._ComputeMachineChecksumString()
+ self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_md5_checksum(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- temp_str = 'abcde'
- checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786')
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_md5_checksum(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ temp_str = "abcde"
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, "ab56b4d92b40713acc5af89985d4b786")
- temp_str = ''
- checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, '')
+ temp_str = ""
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, "")
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_machine_id(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, DUMP_VPD_STRING, '']
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_machine_id(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ""]
- cm._GetMachineID()
- self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
+ cm._GetMachineID()
+ self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
- mock_run_cmd.return_value = [0, IFCONFIG_STRING, '']
- cm._GetMachineID()
- self.assertEqual(
- cm.machine_id,
- ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ '
- 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether '
- '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)')
+ mock_run_cmd.return_value = [0, IFCONFIG_STRING, ""]
+ cm._GetMachineID()
+ self.assertEqual(
+ cm.machine_id,
+ " ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ "
+ "ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether "
+ "44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)",
+ )
- mock_run_cmd.return_value = [0, 'invalid hardware config', '']
- self.assertRaises(Exception, cm._GetMachineID)
+ mock_run_cmd.return_value = [0, "invalid hardware config", ""]
+ self.assertRaises(Exception, cm._GetMachineID)
- def test_add_cooldown_waittime(self):
- cm = machine_manager.CrosMachine('1.2.3.4.cros', '/usr/local/chromeos',
- 'average')
- self.assertEqual(cm.GetCooldownWaitTime(), 0)
- cm.AddCooldownWaitTime(250)
- self.assertEqual(cm.GetCooldownWaitTime(), 250)
- cm.AddCooldownWaitTime(1)
- self.assertEqual(cm.GetCooldownWaitTime(), 251)
+ def test_add_cooldown_waittime(self):
+ cm = machine_manager.CrosMachine(
+ "1.2.3.4.cros", "/usr/local/chromeos", "average"
+ )
+ self.assertEqual(cm.GetCooldownWaitTime(), 0)
+ cm.AddCooldownWaitTime(250)
+ self.assertEqual(cm.GetCooldownWaitTime(), 250)
+ cm.AddCooldownWaitTime(1)
+ self.assertEqual(cm.GetCooldownWaitTime(), 251)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index f44ed87..4a3f9a7 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -1,153 +1,171 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This contains some mock instances for testing."""
-from __future__ import print_function
from benchmark import Benchmark
from label import MockLabel
-perf_args = 'record -a -e cycles'
+
+perf_args = "record -a -e cycles"
label1 = MockLabel(
- 'test1',
- 'build1',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos-alex1',
- image_args='',
- cache_dir='',
+ "test1",
+ "build1",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos-alex1",
+ image_args="",
+ cache_dir="",
cache_only=False,
- log_level='average',
- compiler='gcc',
+ log_level="average",
+ compiler="gcc",
crosfleet=False,
- chrome_src=None)
+ chrome_src=None,
+)
label2 = MockLabel(
- 'test2',
- 'build2',
- 'image2',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run_2',
- 'x86-alex',
- 'chromeos-alex2',
- image_args='',
- cache_dir='',
+ "test2",
+ "build2",
+ "image2",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run_2",
+ "x86-alex",
+ "chromeos-alex2",
+ image_args="",
+ cache_dir="",
cache_only=False,
- log_level='average',
- compiler='gcc',
+ log_level="average",
+ compiler="gcc",
crosfleet=False,
- chrome_src=None)
+ chrome_src=None,
+)
-benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
- perf_args, 'telemetry_Crosperf', '')
+benchmark1 = Benchmark(
+ "benchmark1",
+ "autotest_name_1",
+ "autotest_args",
+ 2,
+ "",
+ perf_args,
+ "telemetry_Crosperf",
+ "",
+)
-benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
- perf_args, 'telemetry_Crosperf', '')
+benchmark2 = Benchmark(
+ "benchmark2",
+ "autotest_name_2",
+ "autotest_args",
+ 2,
+ "",
+ perf_args,
+ "telemetry_Crosperf",
+ "",
+)
keyval = {}
keyval[0] = {
- '': 'PASS',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'test{1}': '2',
- 'test{2}': '4',
- 'ms_1': '2.1',
- 'total': '5',
- 'bool': 'True'
+ "": "PASS",
+ "milliseconds_1": "1",
+ "milliseconds_2": "8",
+ "milliseconds_3": "9.2",
+ "test{1}": "2",
+ "test{2}": "4",
+ "ms_1": "2.1",
+ "total": "5",
+ "bool": "True",
}
keyval[1] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6',
- 'test{1}': '3',
- 'test{2}': '4',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_2": "5",
+ "ms_1": "2.2",
+ "total": "6",
+ "test{1}": "3",
+ "test{2}": "4",
+ "bool": "FALSE",
}
keyval[2] = {
- '': 'PASS',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6',
- 'test{1}': '5',
- 'test{2}': '4',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_4": "30",
+ "milliseconds_5": "50",
+ "ms_1": "2.23",
+ "total": "6",
+ "test{1}": "5",
+ "test{2}": "4",
+ "bool": "FALSE",
}
keyval[3] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_6": "7",
+ "ms_1": "2.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "6",
+ "bool": "FALSE",
}
keyval[4] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "6",
+ "bool": "TRUE",
}
keyval[5] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '2',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.2",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "2",
+ "bool": "TRUE",
}
keyval[6] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '4',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "4",
+ "bool": "TRUE",
}
keyval[7] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7',
- 'test{1}': '1',
- 'test{2}': '6',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "1",
+ "total": "7",
+ "test{1}": "1",
+ "test{2}": "6",
+ "bool": "TRUE",
}
keyval[8] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '3.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '8',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "3.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "8",
+ "bool": "TRUE",
}
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 5525858..043da99 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to deal with result cache."""
-from __future__ import division
-from __future__ import print_function
import collections
import glob
@@ -20,642 +18,729 @@
from cros_utils import command_executer
from cros_utils import misc
-
from image_checksummer import ImageChecksummer
-
import results_report
import test_flag
-SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
-RESULTS_FILE = 'results.pickle'
-MACHINE_FILE = 'machine.txt'
-AUTOTEST_TARBALL = 'autotest.tbz2'
-RESULTS_TARBALL = 'results.tbz2'
-PERF_RESULTS_FILE = 'perf-results.txt'
-CACHE_KEYS_FILE = 'cache_keys.txt'
+
+SCRATCH_DIR = os.path.expanduser("~/cros_scratch")
+RESULTS_FILE = "results.pickle"
+MACHINE_FILE = "machine.txt"
+AUTOTEST_TARBALL = "autotest.tbz2"
+RESULTS_TARBALL = "results.tbz2"
+PERF_RESULTS_FILE = "perf-results.txt"
+CACHE_KEYS_FILE = "cache_keys.txt"
class PidVerificationError(Exception):
- """Error of perf PID verification in per-process mode."""
+ """Error of perf PID verification in per-process mode."""
class PerfDataReadError(Exception):
- """Error of reading a perf.data header."""
+ """Error of reading a perf.data header."""
class Result(object):
- """Class for holding the results of a single test run.
+ """Class for holding the results of a single test run.
- This class manages what exactly is stored inside the cache without knowing
- what the key of the cache is. For runs with perf, it stores perf.data,
- perf.report, etc. The key generation is handled by the ResultsCache class.
- """
-
- def __init__(self, logger, label, log_level, machine, cmd_exec=None):
- self.chromeos_root = label.chromeos_root
- self._logger = logger
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger, log_level=log_level)
- self.temp_dir = None
- self.label = label
- self.results_dir = None
- self.log_level = log_level
- self.machine = machine
- self.perf_data_files = []
- self.perf_report_files = []
- self.results_file = []
- self.turbostat_log_file = ''
- self.cpustats_log_file = ''
- self.cpuinfo_file = ''
- self.top_log_file = ''
- self.wait_time_log_file = ''
- self.chrome_version = ''
- self.err = None
- self.chroot_results_dir = ''
- self.test_name = ''
- self.keyvals = None
- self.board = None
- self.suite = None
- self.cwp_dso = ''
- self.retval = None
- self.out = None
- self.top_cmds = []
-
- def GetTopCmds(self):
- """Get the list of top commands consuming CPU on the machine."""
- return self.top_cmds
-
- def FormatStringTopCommands(self):
- """Get formatted string of top commands.
-
- Get the formatted string with top commands consuming CPU on DUT machine.
- Number of "non-chrome" processes in the list is limited to 5.
- """
- format_list = [
- 'Top commands with highest CPU usage:',
- # Header.
- '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
- '-' * 50,
- ]
- if self.top_cmds:
- # After switching to top processes we have to expand the list since there
- # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
- # top.
- # Let's limit the list size by the number of non-chrome processes.
- limit_of_non_chrome_procs = 5
- num_of_non_chrome_procs = 0
- for topcmd in self.top_cmds:
- print_line = '%20s %9.2f %6s %s' % (
- topcmd['cmd'], topcmd['cpu_use_avg'], topcmd['count'],
- topcmd['top5_cpu_use'])
- format_list.append(print_line)
- if not topcmd['cmd'].startswith('chrome'):
- num_of_non_chrome_procs += 1
- if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
- break
- else:
- format_list.append('[NO DATA FROM THE TOP LOG]')
- format_list.append('-' * 50)
- return '\n'.join(format_list)
-
- def CopyFilesTo(self, dest_dir, files_to_copy):
- file_index = 0
- for file_to_copy in files_to_copy:
- if not os.path.isdir(dest_dir):
- command = 'mkdir -p %s' % dest_dir
- self.ce.RunCommand(command)
- dest_file = os.path.join(
- dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
- ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
- if ret:
- raise IOError('Could not copy results file: %s' % file_to_copy)
- file_index += 1
-
- def CopyResultsTo(self, dest_dir):
- self.CopyFilesTo(dest_dir, self.results_file)
- self.CopyFilesTo(dest_dir, self.perf_data_files)
- self.CopyFilesTo(dest_dir, self.perf_report_files)
- extra_files = []
- if self.top_log_file:
- extra_files.append(self.top_log_file)
- if self.cpuinfo_file:
- extra_files.append(self.cpuinfo_file)
- if extra_files:
- self.CopyFilesTo(dest_dir, extra_files)
- if self.results_file or self.perf_data_files or self.perf_report_files:
- self._logger.LogOutput('Results files stored in %s.' % dest_dir)
-
- def CompressResultsTo(self, dest_dir):
- tarball = os.path.join(self.results_dir, RESULTS_TARBALL)
- # Test_that runs hold all output under TEST_NAME_HASHTAG/results/,
- # while tast runs hold output under TEST_NAME/.
- # Both ensure to be unique.
- result_dir_name = self.test_name if self.suite == 'tast' else 'results'
- results_dir = self.FindFilesInResultsDir('-name %s' %
- result_dir_name).split('\n')[0]
-
- if not results_dir:
- self._logger.LogOutput('WARNING: No results dir matching %r found' %
- result_dir_name)
- return
-
- self.CreateTarball(results_dir, tarball)
- self.CopyFilesTo(dest_dir, [tarball])
- if results_dir:
- self._logger.LogOutput('Results files compressed into %s.' % dest_dir)
-
- def GetNewKeyvals(self, keyvals_dict):
- # Initialize 'units' dictionary.
- units_dict = {}
- for k in keyvals_dict:
- units_dict[k] = ''
- results_files = self.GetDataMeasurementsFiles()
- for f in results_files:
- # Make sure we can find the results file
- if os.path.exists(f):
- data_filename = f
- else:
- # Otherwise get the base filename and create the correct
- # path for it.
- _, f_base = misc.GetRoot(f)
- data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
- self.temp_dir, f_base)
- if data_filename.find('.json') > 0:
- raw_dict = dict()
- if os.path.exists(data_filename):
- with open(data_filename, 'r') as data_file:
- raw_dict = json.load(data_file)
-
- if 'charts' in raw_dict:
- raw_dict = raw_dict['charts']
- for k1 in raw_dict:
- field_dict = raw_dict[k1]
- for k2 in field_dict:
- result_dict = field_dict[k2]
- key = k1 + '__' + k2
- if 'value' in result_dict:
- keyvals_dict[key] = result_dict['value']
- elif 'values' in result_dict:
- values = result_dict['values']
- if ('type' in result_dict
- and result_dict['type'] == 'list_of_scalar_values' and values
- and values != 'null'):
- keyvals_dict[key] = sum(values) / float(len(values))
- else:
- keyvals_dict[key] = values
- units_dict[key] = result_dict['units']
- else:
- if os.path.exists(data_filename):
- with open(data_filename, 'r') as data_file:
- lines = data_file.readlines()
- for line in lines:
- tmp_dict = json.loads(line)
- graph_name = tmp_dict['graph']
- graph_str = (graph_name + '__') if graph_name else ''
- key = graph_str + tmp_dict['description']
- keyvals_dict[key] = tmp_dict['value']
- units_dict[key] = tmp_dict['units']
-
- return keyvals_dict, units_dict
-
- def AppendTelemetryUnits(self, keyvals_dict, units_dict):
- """keyvals_dict is the dict of key-value used to generate Crosperf reports.
-
- units_dict is a dictionary of the units for the return values in
- keyvals_dict. We need to associate the units with the return values,
- for Telemetry tests, so that we can include the units in the reports.
- This function takes each value in keyvals_dict, finds the corresponding
- unit in the units_dict, and replaces the old value with a list of the
- old value and the units. This later gets properly parsed in the
- ResultOrganizer class, for generating the reports.
+ This class manages what exactly is stored inside the cache without knowing
+ what the key of the cache is. For runs with perf, it stores perf.data,
+ perf.report, etc. The key generation is handled by the ResultsCache class.
"""
- results_dict = {}
- for k in keyvals_dict:
- # We don't want these lines in our reports; they add no useful data.
- if not k or k == 'telemetry_Crosperf':
- continue
- val = keyvals_dict[k]
- units = units_dict[k]
- new_val = [val, units]
- results_dict[k] = new_val
- return results_dict
+ def __init__(self, logger, label, log_level, machine, cmd_exec=None):
+ self.chromeos_root = label.chromeos_root
+ self._logger = logger
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level
+ )
+ self.temp_dir = None
+ self.label = label
+ self.results_dir = None
+ self.log_level = log_level
+ self.machine = machine
+ self.perf_data_files = []
+ self.perf_report_files = []
+ self.results_file = []
+ self.turbostat_log_file = ""
+ self.cpustats_log_file = ""
+ self.cpuinfo_file = ""
+ self.top_log_file = ""
+ self.wait_time_log_file = ""
+ self.chrome_version = ""
+ self.err = None
+ self.chroot_results_dir = ""
+ self.test_name = ""
+ self.keyvals = None
+ self.board = None
+ self.suite = None
+ self.cwp_dso = ""
+ self.retval = None
+ self.out = None
+ self.top_cmds = []
- def GetKeyvals(self):
- results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
- if not self.temp_dir:
- self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = f'cp -r {self.results_dir}/* {self.temp_dir}'
- self.ce.RunCommand(command, print_to_console=False)
+ def GetTopCmds(self):
+ """Get the list of top commands consuming CPU on the machine."""
+ return self.top_cmds
- command = ('./generate_test_report --no-color --csv %s' %
- (os.path.join('/tmp', os.path.basename(self.temp_dir))))
- _, out, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
- command,
- print_to_console=False)
- keyvals_dict = {}
- tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
- self.temp_dir)
- for line in out.splitlines():
- tokens = re.split('=|,', line)
- key = tokens[-2]
- if key.startswith(tmp_dir_in_chroot):
- key = key[len(tmp_dir_in_chroot) + 1:]
- value = tokens[-1]
- keyvals_dict[key] = value
+ def FormatStringTopCommands(self):
+ """Get formatted string of top commands.
- # Check to see if there is a perf_measurements file and get the
- # data from it if so.
- keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
- if self.suite == 'telemetry_Crosperf':
- # For telemtry_Crosperf results, append the units to the return
- # results, for use in generating the reports.
- keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
- return keyvals_dict
+ Get the formatted string with top commands consuming CPU on DUT machine.
+ Number of "non-chrome" processes in the list is limited to 5.
+ """
+ format_list = [
+ "Top commands with highest CPU usage:",
+ # Header.
+ "%20s %9s %6s %s" % ("COMMAND", "AVG CPU%", "COUNT", "HIGHEST 5"),
+ "-" * 50,
+ ]
+ if self.top_cmds:
+ # After switching to top processes we have to expand the list since there
+ # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
+ # top.
+ # Let's limit the list size by the number of non-chrome processes.
+ limit_of_non_chrome_procs = 5
+ num_of_non_chrome_procs = 0
+ for topcmd in self.top_cmds:
+ print_line = "%20s %9.2f %6s %s" % (
+ topcmd["cmd"],
+ topcmd["cpu_use_avg"],
+ topcmd["count"],
+ topcmd["top5_cpu_use"],
+ )
+ format_list.append(print_line)
+ if not topcmd["cmd"].startswith("chrome"):
+ num_of_non_chrome_procs += 1
+ if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
+ break
+ else:
+ format_list.append("[NO DATA FROM THE TOP LOG]")
+ format_list.append("-" * 50)
+ return "\n".join(format_list)
- def GetSamples(self):
- actual_samples = 0
- for perf_data_file in self.perf_data_files:
- chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_data_file)
- perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
- perf_file = '/usr/sbin/perf'
- if os.path.exists(perf_path):
- perf_file = '/usr/bin/perf'
+ def CopyFilesTo(self, dest_dir, files_to_copy):
+ file_index = 0
+ for file_to_copy in files_to_copy:
+ if not os.path.isdir(dest_dir):
+ command = "mkdir -p %s" % dest_dir
+ self.ce.RunCommand(command)
+ dest_file = os.path.join(
+ dest_dir,
+ ("%s.%s" % (os.path.basename(file_to_copy), file_index)),
+ )
+ ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
+ if ret:
+ raise IOError("Could not copy results file: %s" % file_to_copy)
+ file_index += 1
- # For each perf.data, we want to collect sample count for specific DSO.
- # We specify exact match for known DSO type, and every sample for `all`.
- exact_match = ''
- if self.cwp_dso == 'all':
- exact_match = '""'
- elif self.cwp_dso == 'chrome':
- exact_match = '" chrome "'
- elif self.cwp_dso == 'kallsyms':
- exact_match = '"[kernel.kallsyms]"'
- else:
- # This will need to be updated once there are more DSO types supported,
- # if user want an exact match for the field they want.
- exact_match = '"%s"' % self.cwp_dso
+ def CopyResultsTo(self, dest_dir):
+ self.CopyFilesTo(dest_dir, self.results_file)
+ self.CopyFilesTo(dest_dir, self.perf_data_files)
+ self.CopyFilesTo(dest_dir, self.perf_report_files)
+ extra_files = []
+ if self.top_log_file:
+ extra_files.append(self.top_log_file)
+ if self.cpuinfo_file:
+ extra_files.append(self.cpuinfo_file)
+ if extra_files:
+ self.CopyFilesTo(dest_dir, extra_files)
+ if self.results_file or self.perf_data_files or self.perf_report_files:
+ self._logger.LogOutput("Results files stored in %s." % dest_dir)
- command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' %
- (perf_file, chroot_perf_data_file, exact_match))
- _, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
- command)
- # Accumulate the sample count for all matched fields.
- # Each line looks like this:
- # 45.42% 237210 chrome
- # And we want the second number which is the sample count.
- samples = 0
- try:
- for line in result.split('\n'):
- attr = line.split()
- if len(attr) == 3 and '%' in attr[0]:
- samples += int(attr[1])
- except:
- raise RuntimeError('Cannot parse perf dso result')
+ def CompressResultsTo(self, dest_dir):
+ tarball = os.path.join(self.results_dir, RESULTS_TARBALL)
+ # Test_that runs hold all output under TEST_NAME_HASHTAG/results/,
+ # while tast runs hold output under TEST_NAME/.
+ # Both ensure to be unique.
+ result_dir_name = self.test_name if self.suite == "tast" else "results"
+ results_dir = self.FindFilesInResultsDir(
+ "-name %s" % result_dir_name
+ ).split("\n")[0]
- actual_samples += samples
+ if not results_dir:
+ self._logger.LogOutput(
+ "WARNING: No results dir matching %r found" % result_dir_name
+ )
+ return
- # Remove idle cycles from the accumulated sample count.
- perf_report_file = f'{perf_data_file}.report'
- if not os.path.exists(perf_report_file):
- raise RuntimeError(f'Missing perf report file: {perf_report_file}')
+ self.CreateTarball(results_dir, tarball)
+ self.CopyFilesTo(dest_dir, [tarball])
+ if results_dir:
+ self._logger.LogOutput(
+ "Results files compressed into %s." % dest_dir
+ )
- idle_functions = {
- '[kernel.kallsyms]':
- ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry',
- 'default_idle', 'cpu_idle_loop', 'do_idle'),
- }
- idle_samples = 0
-
- with open(perf_report_file) as f:
- try:
- for line in f:
- line = line.strip()
- if not line or line[0] == '#':
- continue
- # Each line has the following fields,
- # pylint: disable=line-too-long
- # Overhead Samples Command Shared Object Symbol
- # pylint: disable=line-too-long
- # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
- # pylint: disable=line-too-long
- # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*>
- _, samples, _, dso, _, function = line.split(None, 5)
-
- if dso in idle_functions and function in idle_functions[dso]:
- if self.log_level != 'verbose':
- self._logger.LogOutput('Removing %s samples from %s in %s' %
- (samples, function, dso))
- idle_samples += int(samples)
- except:
- raise RuntimeError('Cannot parse perf report')
- actual_samples -= idle_samples
- return [actual_samples, u'samples']
-
- def GetResultsDir(self):
- if self.suite == 'tast':
- mo = re.search(r'Writing results to (\S+)', self.out)
- else:
- mo = re.search(r'Results placed in (\S+)', self.out)
- if mo:
- result = mo.group(1)
- return result
- raise RuntimeError('Could not find results directory.')
-
- def FindFilesInResultsDir(self, find_args):
- if not self.results_dir:
- return ''
-
- command = 'find %s %s' % (self.results_dir, find_args)
- ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
- if ret:
- raise RuntimeError('Could not run find command!')
- return out
-
- def GetResultsFile(self):
- if self.suite == 'telemetry_Crosperf':
- return self.FindFilesInResultsDir('-name histograms.json').splitlines()
- return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
-
- def GetPerfDataFiles(self):
- return self.FindFilesInResultsDir('-name perf.data').splitlines()
-
- def GetPerfReportFiles(self):
- return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
-
- def GetDataMeasurementsFiles(self):
- result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
- if not result:
- if self.suite == 'telemetry_Crosperf':
- result = (
- self.FindFilesInResultsDir('-name histograms.json').splitlines())
- else:
- result = (self.FindFilesInResultsDir(
- '-name results-chart.json').splitlines())
- return result
-
- def GetTurbostatFile(self):
- """Get turbostat log path string."""
- return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0]
-
- def GetCpustatsFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0]
-
- def GetCpuinfoFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name cpuinfo.log').split('\n')[0]
-
- def GetTopFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name top.log').split('\n')[0]
-
- def GetWaitTimeFile(self):
- """Get wait time log path string."""
- return self.FindFilesInResultsDir('-name wait_time.log').split('\n')[0]
-
- def _CheckDebugPath(self, option, path):
- relative_path = path[1:]
- out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
- if os.path.exists(out_chroot_path):
- if option == 'kallsyms':
- path = os.path.join(path, 'System.map-*')
- return '--' + option + ' ' + path
- else:
- print('** WARNING **: --%s option not applied, %s does not exist' %
- (option, out_chroot_path))
- return ''
-
- def GeneratePerfReportFiles(self):
- perf_report_files = []
- for perf_data_file in self.perf_data_files:
- # Generate a perf.report and store it side-by-side with the perf.data
- # file.
- chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_data_file)
- perf_report_file = '%s.report' % perf_data_file
- if os.path.exists(perf_report_file):
- raise RuntimeError('Perf report file already exists: %s' %
- perf_report_file)
- chroot_perf_report_file = misc.GetInsideChrootPath(
- self.chromeos_root, perf_report_file)
- perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
-
- perf_file = '/usr/sbin/perf'
- if os.path.exists(perf_path):
- perf_file = '/usr/bin/perf'
-
- debug_path = self.label.debug_path
-
- if debug_path:
- symfs = '--symfs ' + debug_path
- vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib',
- 'debug', 'boot', 'vmlinux')
- kallsyms = ''
- print('** WARNING **: --kallsyms option not applied, no System.map-* '
- 'for downloaded image.')
- else:
- if self.label.image_type != 'local':
- print('** WARNING **: Using local debug info in /build, this may '
- 'not match the downloaded image.')
- build_path = os.path.join('/build', self.board)
- symfs = self._CheckDebugPath('symfs', build_path)
- vmlinux_path = os.path.join(build_path, 'usr/lib/debug/boot/vmlinux')
- vmlinux = self._CheckDebugPath('vmlinux', vmlinux_path)
- kallsyms_path = os.path.join(build_path, 'boot')
- kallsyms = self._CheckDebugPath('kallsyms', kallsyms_path)
-
- command = ('%s report -n %s %s %s -i %s --stdio > %s' %
- (perf_file, symfs, vmlinux, kallsyms, chroot_perf_data_file,
- chroot_perf_report_file))
- if self.log_level != 'verbose':
- self._logger.LogOutput('Generating perf report...\nCMD: %s' % command)
- exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
- if exit_code == 0:
- if self.log_level != 'verbose':
- self._logger.LogOutput('Perf report generated successfully.')
- else:
- raise RuntimeError('Perf report not generated correctly. CMD: %s' %
- command)
-
- # Add a keyval to the dictionary for the events captured.
- perf_report_files.append(
- misc.GetOutsideChrootPath(self.chromeos_root,
- chroot_perf_report_file))
- return perf_report_files
-
- def GatherPerfResults(self):
- report_id = 0
- for perf_report_file in self.perf_report_files:
- with open(perf_report_file, 'r') as f:
- report_contents = f.read()
- for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
- num_events = group[0]
- event_name = group[1]
- key = 'perf_%s_%s' % (report_id, event_name)
- value = str(misc.UnitToNumber(num_events))
- self.keyvals[key] = value
-
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.board = self.label.board
- self.out = out
- self.err = err
- self.retval = retval
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- self.chroot_results_dir = self.GetResultsDir()
- self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
- self.chroot_results_dir)
- self.results_file = self.GetResultsFile()
- self.perf_data_files = self.GetPerfDataFiles()
- # Include all perf.report data in table.
- self.perf_report_files = self.GeneratePerfReportFiles()
- self.turbostat_log_file = self.GetTurbostatFile()
- self.cpustats_log_file = self.GetCpustatsFile()
- self.cpuinfo_file = self.GetCpuinfoFile()
- self.top_log_file = self.GetTopFile()
- self.wait_time_log_file = self.GetWaitTimeFile()
- # TODO(asharif): Do something similar with perf stat.
-
- # Grab keyvals from the directory.
- self.ProcessResults()
-
- def ProcessChartResults(self):
- # Open and parse the json results file generated by telemetry/test_that.
- if not self.results_file:
- raise IOError('No results file found.')
- filename = self.results_file[0]
- if not filename.endswith('.json'):
- raise IOError('Attempt to call json on non-json file: %s' % filename)
- if not os.path.exists(filename):
- raise IOError('%s does not exist' % filename)
-
- keyvals = {}
- with open(filename, 'r') as f:
- raw_dict = json.load(f)
- if 'charts' in raw_dict:
- raw_dict = raw_dict['charts']
- for k, field_dict in raw_dict.items():
- for item in field_dict:
- keyname = k + '__' + item
- value_dict = field_dict[item]
- if 'value' in value_dict:
- result = value_dict['value']
- elif 'values' in value_dict:
- values = value_dict['values']
- if not values:
- continue
- if ('type' in value_dict
- and value_dict['type'] == 'list_of_scalar_values'
- and values != 'null'):
- result = sum(values) / float(len(values))
+ def GetNewKeyvals(self, keyvals_dict):
+ # Initialize 'units' dictionary.
+ units_dict = {}
+ for k in keyvals_dict:
+ units_dict[k] = ""
+ results_files = self.GetDataMeasurementsFiles()
+ for f in results_files:
+ # Make sure we can find the results file
+ if os.path.exists(f):
+ data_filename = f
else:
- result = values
- else:
- continue
- units = value_dict['units']
- new_value = [result, units]
- keyvals[keyname] = new_value
- return keyvals
+ # Otherwise get the base filename and create the correct
+ # path for it.
+ _, f_base = misc.GetRoot(f)
+ data_filename = os.path.join(
+ self.chromeos_root, "chroot/tmp", self.temp_dir, f_base
+ )
+ if data_filename.find(".json") > 0:
+ raw_dict = dict()
+ if os.path.exists(data_filename):
+ with open(data_filename, "r") as data_file:
+ raw_dict = json.load(data_file)
- def ProcessTurbostatResults(self):
- """Given turbostat_log_file non-null parse cpu stats from file.
+ if "charts" in raw_dict:
+ raw_dict = raw_dict["charts"]
+ for k1 in raw_dict:
+ field_dict = raw_dict[k1]
+ for k2 in field_dict:
+ result_dict = field_dict[k2]
+ key = k1 + "__" + k2
+ if "value" in result_dict:
+ keyvals_dict[key] = result_dict["value"]
+ elif "values" in result_dict:
+ values = result_dict["values"]
+ if (
+ "type" in result_dict
+ and result_dict["type"]
+ == "list_of_scalar_values"
+ and values
+ and values != "null"
+ ):
+ keyvals_dict[key] = sum(values) / float(
+ len(values)
+ )
+ else:
+ keyvals_dict[key] = values
+ units_dict[key] = result_dict["units"]
+ else:
+ if os.path.exists(data_filename):
+ with open(data_filename, "r") as data_file:
+ lines = data_file.readlines()
+ for line in lines:
+ tmp_dict = json.loads(line)
+ graph_name = tmp_dict["graph"]
+ graph_str = (
+ (graph_name + "__") if graph_name else ""
+ )
+ key = graph_str + tmp_dict["description"]
+ keyvals_dict[key] = tmp_dict["value"]
+ units_dict[key] = tmp_dict["units"]
- Returns:
- Dictionary of 'cpufreq', 'cputemp' where each
- includes dictionary 'all': [list_of_values]
+ return keyvals_dict, units_dict
- Example of the output of turbostat_log.
- ----------------------
- CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- - 329 12.13 2723 2393 10975 77
- 0 336 12.41 2715 2393 6328 77
- 2 323 11.86 2731 2393 4647 69
- CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- - 1940 67.46 2884 2393 39920 83
- 0 1827 63.70 2877 2393 21184 83
- """
- cpustats = {}
- read_data = ''
- with open(self.turbostat_log_file) as f:
- read_data = f.readlines()
+ def AppendTelemetryUnits(self, keyvals_dict, units_dict):
+ """keyvals_dict is the dict of key-value used to generate Crosperf reports.
- if not read_data:
- self._logger.LogOutput('WARNING: Turbostat output file is empty.')
- return {}
+ units_dict is a dictionary of the units for the return values in
+ keyvals_dict. We need to associate the units with the return values,
+ for Telemetry tests, so that we can include the units in the reports.
+ This function takes each value in keyvals_dict, finds the corresponding
+ unit in the units_dict, and replaces the old value with a list of the
+ old value and the units. This later gets properly parsed in the
+ ResultOrganizer class, for generating the reports.
+ """
- # First line always contains the header.
- stats = read_data[0].split()
+ results_dict = {}
+ for k in keyvals_dict:
+ # We don't want these lines in our reports; they add no useful data.
+ if not k or k == "telemetry_Crosperf":
+ continue
+ val = keyvals_dict[k]
+ units = units_dict[k]
+ new_val = [val, units]
+ results_dict[k] = new_val
+ return results_dict
- # Mandatory parameters.
- if 'CPU' not in stats:
- self._logger.LogOutput(
- 'WARNING: Missing data for CPU# in Turbostat output.')
- return {}
- if 'Bzy_MHz' not in stats:
- self._logger.LogOutput(
- 'WARNING: Missing data for Bzy_MHz in Turbostat output.')
- return {}
- cpu_index = stats.index('CPU')
- cpufreq_index = stats.index('Bzy_MHz')
- cpufreq = cpustats.setdefault('cpufreq', {'all': []})
+ def GetKeyvals(self):
+ results_in_chroot = os.path.join(self.chromeos_root, "chroot", "tmp")
+ if not self.temp_dir:
+ self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
+ command = f"cp -r {self.results_dir}/* {self.temp_dir}"
+ self.ce.RunCommand(command, print_to_console=False)
- # Optional parameters.
- cputemp_index = -1
- if 'CoreTmp' in stats:
- cputemp_index = stats.index('CoreTmp')
- cputemp = cpustats.setdefault('cputemp', {'all': []})
+ command = "./generate_test_report --no-color --csv %s" % (
+ os.path.join("/tmp", os.path.basename(self.temp_dir))
+ )
+ _, out, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, command, print_to_console=False
+ )
+ keyvals_dict = {}
+ tmp_dir_in_chroot = misc.GetInsideChrootPath(
+ self.chromeos_root, self.temp_dir
+ )
+ for line in out.splitlines():
+ tokens = re.split("=|,", line)
+ key = tokens[-2]
+ if key.startswith(tmp_dir_in_chroot):
+ key = key[len(tmp_dir_in_chroot) + 1 :]
+ value = tokens[-1]
+ keyvals_dict[key] = value
- # Parse data starting from the second line ignoring repeating headers.
- for st in read_data[1:]:
- # Data represented by int or float separated by spaces.
- numbers = st.split()
- if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]):
- # Skip the line if data mismatch.
- continue
- if numbers[cpu_index] != '-':
- # Ignore Core-specific statistics which starts with Core number.
- # Combined statistics for all core has "-" CPU identifier.
- continue
+ # Check to see if there is a perf_measurements file and get the
+ # data from it if so.
+ keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
+ if self.suite == "telemetry_Crosperf":
+ # For telemtry_Crosperf results, append the units to the return
+ # results, for use in generating the reports.
+ keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
+ return keyvals_dict
- cpufreq['all'].append(int(numbers[cpufreq_index]))
- if cputemp_index != -1:
- cputemp['all'].append(int(numbers[cputemp_index]))
- return cpustats
+ def GetSamples(self):
+ actual_samples = 0
+ for perf_data_file in self.perf_data_files:
+ chroot_perf_data_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_file
+ )
+ perf_path = os.path.join(
+ self.chromeos_root, "chroot", "usr/bin/perf"
+ )
+ perf_file = "/usr/sbin/perf"
+ if os.path.exists(perf_path):
+ perf_file = "/usr/bin/perf"
- def ProcessTopResults(self):
- """Given self.top_log_file process top log data.
+ # For each perf.data, we want to collect sample count for specific DSO.
+ # We specify exact match for known DSO type, and every sample for `all`.
+ exact_match = ""
+ if self.cwp_dso == "all":
+ exact_match = '""'
+ elif self.cwp_dso == "chrome":
+ exact_match = '" chrome "'
+ elif self.cwp_dso == "kallsyms":
+ exact_match = '"[kernel.kallsyms]"'
+ else:
+ # This will need to be updated once there are more DSO types supported,
+ # if user want an exact match for the field they want.
+ exact_match = '"%s"' % self.cwp_dso
- Returns:
- List of dictionaries with the following keyvals:
- 'cmd': command name (string),
- 'cpu_use_avg': average cpu usage (float),
- 'count': number of occurrences (int),
- 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
+ command = "%s report -n -s dso -i %s 2> /dev/null | grep %s" % (
+ perf_file,
+ chroot_perf_data_file,
+ exact_match,
+ )
+ _, result, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, command
+ )
+ # Accumulate the sample count for all matched fields.
+ # Each line looks like this:
+ # 45.42% 237210 chrome
+ # And we want the second number which is the sample count.
+ samples = 0
+ try:
+ for line in result.split("\n"):
+ attr = line.split()
+ if len(attr) == 3 and "%" in attr[0]:
+ samples += int(attr[1])
+ except:
+ raise RuntimeError("Cannot parse perf dso result")
- Example of the top log:
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
- 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
- 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker
- 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd
+ actual_samples += samples
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
- 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
- 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro
- 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
- """
- all_data = ''
- with open(self.top_log_file) as f:
- all_data = f.read()
+ # Remove idle cycles from the accumulated sample count.
+ perf_report_file = f"{perf_data_file}.report"
+ if not os.path.exists(perf_report_file):
+ raise RuntimeError(
+ f"Missing perf report file: {perf_report_file}"
+ )
- if not all_data:
- self._logger.LogOutput('WARNING: Top log file is empty.')
- return []
+ idle_functions = {
+ "[kernel.kallsyms]": (
+ "intel_idle",
+ "arch_cpu_idle",
+ "intel_idle",
+ "cpu_startup_entry",
+ "default_idle",
+ "cpu_idle_loop",
+ "do_idle",
+ ),
+ }
+ idle_samples = 0
- top_line_regex = re.compile(
- r"""
+ with open(perf_report_file) as f:
+ try:
+ for line in f:
+ line = line.strip()
+ if not line or line[0] == "#":
+ continue
+ # Each line has the following fields,
+ # pylint: disable=line-too-long
+ # Overhead Samples Command Shared Object Symbol
+ # pylint: disable=line-too-long
+ # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
+ # pylint: disable=line-too-long
+ # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*>
+ _, samples, _, dso, _, function = line.split(None, 5)
+
+ if (
+ dso in idle_functions
+ and function in idle_functions[dso]
+ ):
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Removing %s samples from %s in %s"
+ % (samples, function, dso)
+ )
+ idle_samples += int(samples)
+ except:
+ raise RuntimeError("Cannot parse perf report")
+ actual_samples -= idle_samples
+ return [actual_samples, "samples"]
+
+ def GetResultsDir(self):
+ if self.suite == "tast":
+ mo = re.search(r"Writing results to (\S+)", self.out)
+ else:
+ mo = re.search(r"Results placed in (\S+)", self.out)
+ if mo:
+ result = mo.group(1)
+ return result
+ raise RuntimeError("Could not find results directory.")
+
+ def FindFilesInResultsDir(self, find_args):
+ if not self.results_dir:
+ return ""
+
+ command = "find %s %s" % (self.results_dir, find_args)
+ ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
+ if ret:
+ raise RuntimeError("Could not run find command!")
+ return out
+
+ def GetResultsFile(self):
+ if self.suite == "telemetry_Crosperf":
+ return self.FindFilesInResultsDir(
+ "-name histograms.json"
+ ).splitlines()
+ return self.FindFilesInResultsDir(
+ "-name results-chart.json"
+ ).splitlines()
+
+ def GetPerfDataFiles(self):
+ return self.FindFilesInResultsDir("-name perf.data").splitlines()
+
+ def GetPerfReportFiles(self):
+ return self.FindFilesInResultsDir("-name perf.data.report").splitlines()
+
+ def GetDataMeasurementsFiles(self):
+ result = self.FindFilesInResultsDir(
+ "-name perf_measurements"
+ ).splitlines()
+ if not result:
+ if self.suite == "telemetry_Crosperf":
+ result = self.FindFilesInResultsDir(
+ "-name histograms.json"
+ ).splitlines()
+ else:
+ result = self.FindFilesInResultsDir(
+ "-name results-chart.json"
+ ).splitlines()
+ return result
+
+ def GetTurbostatFile(self):
+ """Get turbostat log path string."""
+ return self.FindFilesInResultsDir("-name turbostat.log").split("\n")[0]
+
+ def GetCpustatsFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name cpustats.log").split("\n")[0]
+
+ def GetCpuinfoFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name cpuinfo.log").split("\n")[0]
+
+ def GetTopFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name top.log").split("\n")[0]
+
+ def GetWaitTimeFile(self):
+ """Get wait time log path string."""
+ return self.FindFilesInResultsDir("-name wait_time.log").split("\n")[0]
+
+ def _CheckDebugPath(self, option, path):
+ relative_path = path[1:]
+ out_chroot_path = os.path.join(
+ self.chromeos_root, "chroot", relative_path
+ )
+ if os.path.exists(out_chroot_path):
+ if option == "kallsyms":
+ path = os.path.join(path, "System.map-*")
+ return "--" + option + " " + path
+ else:
+ print(
+ "** WARNING **: --%s option not applied, %s does not exist"
+ % (option, out_chroot_path)
+ )
+ return ""
+
+ def GeneratePerfReportFiles(self):
+ perf_report_files = []
+ for perf_data_file in self.perf_data_files:
+ # Generate a perf.report and store it side-by-side with the perf.data
+ # file.
+ chroot_perf_data_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_file
+ )
+ perf_report_file = "%s.report" % perf_data_file
+ if os.path.exists(perf_report_file):
+ raise RuntimeError(
+ "Perf report file already exists: %s" % perf_report_file
+ )
+ chroot_perf_report_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_report_file
+ )
+ perf_path = os.path.join(
+ self.chromeos_root, "chroot", "usr/bin/perf"
+ )
+
+ perf_file = "/usr/sbin/perf"
+ if os.path.exists(perf_path):
+ perf_file = "/usr/bin/perf"
+
+ debug_path = self.label.debug_path
+
+ if debug_path:
+ symfs = "--symfs " + debug_path
+ vmlinux = "--vmlinux " + os.path.join(
+ debug_path, "usr", "lib", "debug", "boot", "vmlinux"
+ )
+ kallsyms = ""
+ print(
+ "** WARNING **: --kallsyms option not applied, no System.map-* "
+ "for downloaded image."
+ )
+ else:
+ if self.label.image_type != "local":
+ print(
+ "** WARNING **: Using local debug info in /build, this may "
+ "not match the downloaded image."
+ )
+ build_path = os.path.join("/build", self.board)
+ symfs = self._CheckDebugPath("symfs", build_path)
+ vmlinux_path = os.path.join(
+ build_path, "usr/lib/debug/boot/vmlinux"
+ )
+ vmlinux = self._CheckDebugPath("vmlinux", vmlinux_path)
+ kallsyms_path = os.path.join(build_path, "boot")
+ kallsyms = self._CheckDebugPath("kallsyms", kallsyms_path)
+
+ command = "%s report -n %s %s %s -i %s --stdio > %s" % (
+ perf_file,
+ symfs,
+ vmlinux,
+ kallsyms,
+ chroot_perf_data_file,
+ chroot_perf_report_file,
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Generating perf report...\nCMD: %s" % command
+ )
+ exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
+ if exit_code == 0:
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Perf report generated successfully."
+ )
+ else:
+ raise RuntimeError(
+ "Perf report not generated correctly. CMD: %s" % command
+ )
+
+ # Add a keyval to the dictionary for the events captured.
+ perf_report_files.append(
+ misc.GetOutsideChrootPath(
+ self.chromeos_root, chroot_perf_report_file
+ )
+ )
+ return perf_report_files
+
+ def GatherPerfResults(self):
+ report_id = 0
+ for perf_report_file in self.perf_report_files:
+ with open(perf_report_file, "r") as f:
+ report_contents = f.read()
+ for group in re.findall(
+ r"Events: (\S+) (\S+)", report_contents
+ ):
+ num_events = group[0]
+ event_name = group[1]
+ key = "perf_%s_%s" % (report_id, event_name)
+ value = str(misc.UnitToNumber(num_events))
+ self.keyvals[key] = value
+
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.board = self.label.board
+ self.out = out
+ self.err = err
+ self.retval = retval
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ self.chroot_results_dir = self.GetResultsDir()
+ self.results_dir = misc.GetOutsideChrootPath(
+ self.chromeos_root, self.chroot_results_dir
+ )
+ self.results_file = self.GetResultsFile()
+ self.perf_data_files = self.GetPerfDataFiles()
+ # Include all perf.report data in table.
+ self.perf_report_files = self.GeneratePerfReportFiles()
+ self.turbostat_log_file = self.GetTurbostatFile()
+ self.cpustats_log_file = self.GetCpustatsFile()
+ self.cpuinfo_file = self.GetCpuinfoFile()
+ self.top_log_file = self.GetTopFile()
+ self.wait_time_log_file = self.GetWaitTimeFile()
+ # TODO(asharif): Do something similar with perf stat.
+
+ # Grab keyvals from the directory.
+ self.ProcessResults()
+
+ def ProcessChartResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError("No results file found.")
+ filename = self.results_file[0]
+ if not filename.endswith(".json"):
+ raise IOError(
+ "Attempt to call json on non-json file: %s" % filename
+ )
+ if not os.path.exists(filename):
+ raise IOError("%s does not exist" % filename)
+
+ keyvals = {}
+ with open(filename, "r") as f:
+ raw_dict = json.load(f)
+ if "charts" in raw_dict:
+ raw_dict = raw_dict["charts"]
+ for k, field_dict in raw_dict.items():
+ for item in field_dict:
+ keyname = k + "__" + item
+ value_dict = field_dict[item]
+ if "value" in value_dict:
+ result = value_dict["value"]
+ elif "values" in value_dict:
+ values = value_dict["values"]
+ if not values:
+ continue
+ if (
+ "type" in value_dict
+ and value_dict["type"] == "list_of_scalar_values"
+ and values != "null"
+ ):
+ result = sum(values) / float(len(values))
+ else:
+ result = values
+ else:
+ continue
+ units = value_dict["units"]
+ new_value = [result, units]
+ keyvals[keyname] = new_value
+ return keyvals
+
+ def ProcessTurbostatResults(self):
+ """Given turbostat_log_file non-null parse cpu stats from file.
+
+ Returns:
+ Dictionary of 'cpufreq', 'cputemp' where each
+ includes dictionary 'all': [list_of_values]
+
+ Example of the output of turbostat_log.
+ ----------------------
+ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ - 329 12.13 2723 2393 10975 77
+ 0 336 12.41 2715 2393 6328 77
+ 2 323 11.86 2731 2393 4647 69
+ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ - 1940 67.46 2884 2393 39920 83
+ 0 1827 63.70 2877 2393 21184 83
+ """
+ cpustats = {}
+ read_data = ""
+ with open(self.turbostat_log_file) as f:
+ read_data = f.readlines()
+
+ if not read_data:
+ self._logger.LogOutput("WARNING: Turbostat output file is empty.")
+ return {}
+
+ # First line always contains the header.
+ stats = read_data[0].split()
+
+ # Mandatory parameters.
+ if "CPU" not in stats:
+ self._logger.LogOutput(
+ "WARNING: Missing data for CPU# in Turbostat output."
+ )
+ return {}
+ if "Bzy_MHz" not in stats:
+ self._logger.LogOutput(
+ "WARNING: Missing data for Bzy_MHz in Turbostat output."
+ )
+ return {}
+ cpu_index = stats.index("CPU")
+ cpufreq_index = stats.index("Bzy_MHz")
+ cpufreq = cpustats.setdefault("cpufreq", {"all": []})
+
+ # Optional parameters.
+ cputemp_index = -1
+ if "CoreTmp" in stats:
+ cputemp_index = stats.index("CoreTmp")
+ cputemp = cpustats.setdefault("cputemp", {"all": []})
+
+ # Parse data starting from the second line ignoring repeating headers.
+ for st in read_data[1:]:
+ # Data represented by int or float separated by spaces.
+ numbers = st.split()
+ if not all(
+ word.replace(".", "", 1).isdigit() for word in numbers[1:]
+ ):
+ # Skip the line if data mismatch.
+ continue
+ if numbers[cpu_index] != "-":
+ # Ignore Core-specific statistics which starts with Core number.
+ # Combined statistics for all core has "-" CPU identifier.
+ continue
+
+ cpufreq["all"].append(int(numbers[cpufreq_index]))
+ if cputemp_index != -1:
+ cputemp["all"].append(int(numbers[cputemp_index]))
+ return cpustats
+
+ def ProcessTopResults(self):
+ """Given self.top_log_file process top log data.
+
+ Returns:
+ List of dictionaries with the following keyvals:
+ 'cmd': command name (string),
+ 'cpu_use_avg': average cpu usage (float),
+ 'count': number of occurrences (int),
+ 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
+
+ Example of the top log:
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
+ 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker
+ 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
+ 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro
+ 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
+ """
+ all_data = ""
+ with open(self.top_log_file) as f:
+ all_data = f.read()
+
+ if not all_data:
+ self._logger.LogOutput("WARNING: Top log file is empty.")
+ return []
+
+ top_line_regex = re.compile(
+ r"""
^\s*(?P<pid>\d+)\s+ # Group 1: PID
\S+\s+\S+\s+-?\d+\s+ # Ignore: user, prio, nice
\d+\s+\d+\s+\d+\s+ # Ignore: virt/res/shared mem
@@ -663,814 +748,922 @@
(?P<cpu_use>\d+\.\d+)\s+ # Group 2: CPU usage
\d+\.\d+\s+\d+:\d+\.\d+\s+ # Ignore: mem usage, time
(?P<cmd>\S+)$ # Group 3: command
- """, re.VERBOSE)
- # Page represents top log data per one measurement within time interval
- # 'top_interval'.
- # Pages separated by empty line.
- pages = all_data.split('\n\n')
- # Snapshots are structured representation of the pages.
- snapshots = []
- for page in pages:
- if not page:
- continue
+ """,
+ re.VERBOSE,
+ )
+ # Page represents top log data per one measurement within time interval
+ # 'top_interval'.
+ # Pages separated by empty line.
+ pages = all_data.split("\n\n")
+ # Snapshots are structured representation of the pages.
+ snapshots = []
+ for page in pages:
+ if not page:
+ continue
- # Snapshot list will contain all processes (command duplicates are
- # allowed).
- snapshot = []
- for line in page.splitlines():
- match = top_line_regex.match(line)
- if match:
- # Top line is valid, collect data.
- process = {
- # NOTE: One command may be represented by multiple processes.
- 'cmd': match.group('cmd'),
- 'pid': match.group('pid'),
- 'cpu_use': float(match.group('cpu_use')),
- }
+ # Snapshot list will contain all processes (command duplicates are
+ # allowed).
+ snapshot = []
+ for line in page.splitlines():
+ match = top_line_regex.match(line)
+ if match:
+ # Top line is valid, collect data.
+ process = {
+ # NOTE: One command may be represented by multiple processes.
+ "cmd": match.group("cmd"),
+ "pid": match.group("pid"),
+ "cpu_use": float(match.group("cpu_use")),
+ }
- # Filter out processes with 0 CPU usage and top command.
- if process['cpu_use'] > 0 and process['cmd'] != 'top':
- snapshot.append(process)
+ # Filter out processes with 0 CPU usage and top command.
+ if process["cpu_use"] > 0 and process["cmd"] != "top":
+ snapshot.append(process)
- # If page contained meaningful data add snapshot to the list.
- if snapshot:
- snapshots.append(snapshot)
+ # If page contained meaningful data add snapshot to the list.
+ if snapshot:
+ snapshots.append(snapshot)
- # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
- # running.
- # Ideally it should be 100% but it will be hardly reachable with 1 core.
- # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
- # 90% equally occurs in 72-74% of all top log snapshots.
- # Further decreasing of load threshold leads to a shifting percent of
- # "high load" snapshots which might include snapshots when benchmark is
- # not running.
- # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
- # of snapshots accordingly.
- # Threshold of "high load" is reduced to 70% (from 90) when we switched to
- # topstats per process. From experiment data the rest 20% are distributed
- # among other chrome processes.
- CHROME_HIGH_CPU_LOAD = 70
- # Number of snapshots where chrome is heavily used.
- high_load_snapshots = 0
- # Total CPU use per process in ALL active snapshots.
- cmd_total_cpu_use = collections.defaultdict(float)
- # Top CPU usages per command.
- cmd_top5_cpu_use = collections.defaultdict(list)
- # List of Top Commands to be returned.
- topcmds = []
+ # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
+ # running.
+ # Ideally it should be 100% but it will be hardly reachable with 1 core.
+ # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
+ # 90% equally occurs in 72-74% of all top log snapshots.
+ # Further decreasing of load threshold leads to a shifting percent of
+ # "high load" snapshots which might include snapshots when benchmark is
+ # not running.
+ # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
+ # of snapshots accordingly.
+ # Threshold of "high load" is reduced to 70% (from 90) when we switched to
+ # topstats per process. From experiment data the rest 20% are distributed
+ # among other chrome processes.
+ CHROME_HIGH_CPU_LOAD = 70
+ # Number of snapshots where chrome is heavily used.
+ high_load_snapshots = 0
+ # Total CPU use per process in ALL active snapshots.
+ cmd_total_cpu_use = collections.defaultdict(float)
+ # Top CPU usages per command.
+ cmd_top5_cpu_use = collections.defaultdict(list)
+ # List of Top Commands to be returned.
+ topcmds = []
- for snapshot_processes in snapshots:
- # CPU usage per command, per PID in one snapshot.
- cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
- for process in snapshot_processes:
- cmd = process['cmd']
- cpu_use = process['cpu_use']
- pid = process['pid']
- cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
+ for snapshot_processes in snapshots:
+ # CPU usage per command, per PID in one snapshot.
+ cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
+ for process in snapshot_processes:
+ cmd = process["cmd"]
+ cpu_use = process["cpu_use"]
+ pid = process["pid"]
+ cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
- # Chrome processes, pid: cpu_usage.
- chrome_processes = cmd_cpu_use_per_snapshot.get('chrome', {})
- chrome_cpu_use_list = chrome_processes.values()
+ # Chrome processes, pid: cpu_usage.
+ chrome_processes = cmd_cpu_use_per_snapshot.get("chrome", {})
+ chrome_cpu_use_list = chrome_processes.values()
- if chrome_cpu_use_list and max(
- chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD:
- # CPU usage of any of the "chrome" processes exceeds "High load"
- # threshold which means DUT is busy running a benchmark.
- high_load_snapshots += 1
- for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
- for pid, cpu_use in cpu_use_per_pid.items():
- # Append PID to the name of the command.
- cmd_with_pid = cmd + '-' + pid
- cmd_total_cpu_use[cmd_with_pid] += cpu_use
+ if (
+ chrome_cpu_use_list
+ and max(chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD
+ ):
+ # CPU usage of any of the "chrome" processes exceeds "High load"
+ # threshold which means DUT is busy running a benchmark.
+ high_load_snapshots += 1
+ for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
+ for pid, cpu_use in cpu_use_per_pid.items():
+ # Append PID to the name of the command.
+ cmd_with_pid = cmd + "-" + pid
+ cmd_total_cpu_use[cmd_with_pid] += cpu_use
- # Add cpu_use into command top cpu usages, sorted in descending
- # order.
- heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
+ # Add cpu_use into command top cpu usages, sorted in descending
+ # order.
+ heapq.heappush(
+ cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1)
+ )
- for consumer, usage in sorted(cmd_total_cpu_use.items(),
- key=lambda x: x[1],
- reverse=True):
- # Iterate through commands by descending order of total CPU usage.
- topcmd = {
- 'cmd': consumer,
- 'cpu_use_avg': usage / high_load_snapshots,
- 'count': len(cmd_top5_cpu_use[consumer]),
- 'top5_cpu_use': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
- }
- topcmds.append(topcmd)
+ for consumer, usage in sorted(
+ cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True
+ ):
+ # Iterate through commands by descending order of total CPU usage.
+ topcmd = {
+ "cmd": consumer,
+ "cpu_use_avg": usage / high_load_snapshots,
+ "count": len(cmd_top5_cpu_use[consumer]),
+ "top5_cpu_use": heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
+ }
+ topcmds.append(topcmd)
- return topcmds
+ return topcmds
- def ProcessCpustatsResults(self):
- """Given cpustats_log_file non-null parse cpu data from file.
+ def ProcessCpustatsResults(self):
+ """Given cpustats_log_file non-null parse cpu data from file.
- Returns:
- Dictionary of 'cpufreq', 'cputemp' where each
- includes dictionary of parameter: [list_of_values]
+ Returns:
+ Dictionary of 'cpufreq', 'cputemp' where each
+ includes dictionary of parameter: [list_of_values]
- Example of cpustats.log output.
- ----------------------
- /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
- /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
- little-cpu 41234
- big-cpu 51234
+ Example of cpustats.log output.
+ ----------------------
+ /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
+ /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
+ little-cpu 41234
+ big-cpu 51234
- If cores share the same policy their frequencies may always match
- on some devices.
- To make report concise we should eliminate redundancy in the output.
- Function removes cpuN data if it duplicates data from other cores.
- """
+ If cores share the same policy their frequencies may always match
+ on some devices.
+ To make report concise we should eliminate redundancy in the output.
+ Function removes cpuN data if it duplicates data from other cores.
+ """
- cpustats = {}
- read_data = ''
- with open(self.cpustats_log_file) as f:
- read_data = f.readlines()
+ cpustats = {}
+ read_data = ""
+ with open(self.cpustats_log_file) as f:
+ read_data = f.readlines()
- if not read_data:
- self._logger.LogOutput('WARNING: Cpustats output file is empty.')
- return {}
+ if not read_data:
+ self._logger.LogOutput("WARNING: Cpustats output file is empty.")
+ return {}
- cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$')
- cputemp_regex = re.compile(r'^([^/\s]+)\s+(\d+)$')
+ cpufreq_regex = re.compile(r"^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$")
+ cputemp_regex = re.compile(r"^([^/\s]+)\s+(\d+)$")
- for st in read_data:
- match = cpufreq_regex.match(st)
- if match:
- cpu = match.group(1)
- # CPU frequency comes in kHz.
- freq_khz = int(match.group(2))
- freq_mhz = freq_khz / 1000
- # cpufreq represents a dictionary with CPU frequency-related
- # data from cpustats.log.
- cpufreq = cpustats.setdefault('cpufreq', {})
- cpu_n_freq = cpufreq.setdefault(cpu, [])
- cpu_n_freq.append(freq_mhz)
- else:
- match = cputemp_regex.match(st)
- if match:
- therm_type = match.group(1)
- # The value is int, uCelsius unit.
- temp_uc = float(match.group(2))
- # Round to XX.X float.
- temp_c = round(temp_uc / 1000, 1)
- # cputemp represents a dictionary with temperature measurements
- # from cpustats.log.
- cputemp = cpustats.setdefault('cputemp', {})
- therm_type = cputemp.setdefault(therm_type, [])
- therm_type.append(temp_c)
-
- # Remove duplicate statistics from cpustats.
- pruned_stats = {}
- for cpukey, cpuparam in cpustats.items():
- # Copy 'cpufreq' and 'cputemp'.
- pruned_params = pruned_stats.setdefault(cpukey, {})
- for paramkey, paramvalue in sorted(cpuparam.items()):
- # paramvalue is list of all measured data.
- if paramvalue not in pruned_params.values():
- pruned_params[paramkey] = paramvalue
-
- return pruned_stats
-
- def ProcessHistogramsResults(self):
- # Open and parse the json results file generated by telemetry/test_that.
- if not self.results_file:
- raise IOError('No results file found.')
- filename = self.results_file[0]
- if not filename.endswith('.json'):
- raise IOError('Attempt to call json on non-json file: %s' % filename)
- if not os.path.exists(filename):
- raise IOError('%s does not exist' % filename)
-
- keyvals = {}
- with open(filename) as f:
- histograms = json.load(f)
- value_map = {}
- # Gets generic set values.
- for obj in histograms:
- if 'type' in obj and obj['type'] == 'GenericSet':
- value_map[obj['guid']] = obj['values']
-
- for obj in histograms:
- if 'name' not in obj or 'sampleValues' not in obj:
- continue
- metric_name = obj['name']
- vals = obj['sampleValues']
- if isinstance(vals, list):
- # Remove None elements from the list
- vals = [val for val in vals if val is not None]
- if vals:
- result = float(sum(vals)) / len(vals)
- else:
- result = 0
- else:
- result = vals
- unit = obj['unit']
- diagnostics = obj['diagnostics']
- # for summaries of benchmarks
- key = metric_name
- if key not in keyvals:
- keyvals[key] = [[result], unit]
- else:
- keyvals[key][0].append(result)
- # TODO: do we need summaries of stories?
- # for summaries of story tags
- if 'storyTags' in diagnostics:
- guid = diagnostics['storyTags']
- if guid not in value_map:
- raise RuntimeError('Unrecognized storyTags in %s ' % (obj))
- for story_tag in value_map[guid]:
- key = metric_name + '__' + story_tag
- if key not in keyvals:
- keyvals[key] = [[result], unit]
+ for st in read_data:
+ match = cpufreq_regex.match(st)
+ if match:
+ cpu = match.group(1)
+ # CPU frequency comes in kHz.
+ freq_khz = int(match.group(2))
+ freq_mhz = freq_khz / 1000
+ # cpufreq represents a dictionary with CPU frequency-related
+ # data from cpustats.log.
+ cpufreq = cpustats.setdefault("cpufreq", {})
+ cpu_n_freq = cpufreq.setdefault(cpu, [])
+ cpu_n_freq.append(freq_mhz)
else:
- keyvals[key][0].append(result)
- # calculate summary
- for key in keyvals:
- vals = keyvals[key][0]
- unit = keyvals[key][1]
- result = float(sum(vals)) / len(vals)
- keyvals[key] = [result, unit]
- return keyvals
+ match = cputemp_regex.match(st)
+ if match:
+ therm_type = match.group(1)
+ # The value is int, uCelsius unit.
+ temp_uc = float(match.group(2))
+ # Round to XX.X float.
+ temp_c = round(temp_uc / 1000, 1)
+ # cputemp represents a dictionary with temperature measurements
+ # from cpustats.log.
+ cputemp = cpustats.setdefault("cputemp", {})
+ therm_type = cputemp.setdefault(therm_type, [])
+ therm_type.append(temp_c)
- def ReadPidFromPerfData(self):
- """Read PIDs from perf.data files.
+ # Remove duplicate statistics from cpustats.
+ pruned_stats = {}
+ for cpukey, cpuparam in cpustats.items():
+ # Copy 'cpufreq' and 'cputemp'.
+ pruned_params = pruned_stats.setdefault(cpukey, {})
+ for paramkey, paramvalue in sorted(cpuparam.items()):
+ # paramvalue is list of all measured data.
+ if paramvalue not in pruned_params.values():
+ pruned_params[paramkey] = paramvalue
- Extract PID from perf.data if "perf record" was running per process,
- i.e. with "-p <PID>" and no "-a".
+ return pruned_stats
- Returns:
- pids: list of PIDs.
+ def ProcessHistogramsResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError("No results file found.")
+ filename = self.results_file[0]
+ if not filename.endswith(".json"):
+ raise IOError(
+ "Attempt to call json on non-json file: %s" % filename
+ )
+ if not os.path.exists(filename):
+ raise IOError("%s does not exist" % filename)
- Raises:
- PerfDataReadError when perf.data header reading fails.
- """
- cmd = ['/usr/bin/perf', 'report', '--header-only', '-i']
- pids = []
+ keyvals = {}
+ with open(filename) as f:
+ histograms = json.load(f)
+ value_map = {}
+ # Gets generic set values.
+ for obj in histograms:
+ if "type" in obj and obj["type"] == "GenericSet":
+ value_map[obj["guid"]] = obj["values"]
- for perf_data_path in self.perf_data_files:
- perf_data_path_in_chroot = misc.GetInsideChrootPath(
- self.chromeos_root, perf_data_path)
- path_str = ' '.join(cmd + [perf_data_path_in_chroot])
- status, output, _ = self.ce.ChrootRunCommandWOutput(
- self.chromeos_root, path_str)
- if status:
- # Error of reading a perf.data profile is fatal.
- raise PerfDataReadError(
- f'Failed to read perf.data profile: {path_str}')
+ for obj in histograms:
+ if "name" not in obj or "sampleValues" not in obj:
+ continue
+ metric_name = obj["name"]
+ vals = obj["sampleValues"]
+ if isinstance(vals, list):
+ # Remove None elements from the list
+ vals = [val for val in vals if val is not None]
+ if vals:
+ result = float(sum(vals)) / len(vals)
+ else:
+ result = 0
+ else:
+ result = vals
+ unit = obj["unit"]
+ diagnostics = obj["diagnostics"]
+ # for summaries of benchmarks
+ key = metric_name
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
+ else:
+ keyvals[key][0].append(result)
+ # TODO: do we need summaries of stories?
+ # for summaries of story tags
+ if "storyTags" in diagnostics:
+ guid = diagnostics["storyTags"]
+ if guid not in value_map:
+ raise RuntimeError(
+ "Unrecognized storyTags in %s " % (obj)
+ )
+ for story_tag in value_map[guid]:
+ key = metric_name + "__" + story_tag
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
+ else:
+ keyvals[key][0].append(result)
+ # calculate summary
+ for key in keyvals:
+ vals = keyvals[key][0]
+ unit = keyvals[key][1]
+ result = float(sum(vals)) / len(vals)
+ keyvals[key] = [result, unit]
+ return keyvals
- # Pattern to search a line with "perf record" command line:
- # # cmdline : /usr/bin/perf record -e instructions -p 123"
- cmdline_regex = re.compile(
- r'^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$')
- # Pattern to search PID in a command line.
- pid_regex = re.compile(r'^.*\s-p\s(?P<pid>\d+)\s*.*$')
- for line in output.splitlines():
- cmd_match = cmdline_regex.match(line)
- if cmd_match:
- # Found a perf command line.
- cmdline = cmd_match.group('cmd')
- # '-a' is a system-wide mode argument.
- if '-a' not in cmdline.split():
- # It can be that perf was attached to PID and was still running in
- # system-wide mode.
- # We filter out this case here since it's not per-process.
- pid_match = pid_regex.match(cmdline)
- if pid_match:
- pids.append(pid_match.group('pid'))
- # Stop the search and move to the next perf.data file.
- break
- else:
- # cmdline wasn't found in the header. It's a fatal error.
- raise PerfDataReadError(
- f'Perf command line is not found in {path_str}')
- return pids
+ def ReadPidFromPerfData(self):
+ """Read PIDs from perf.data files.
- def VerifyPerfDataPID(self):
- """Verify PIDs in per-process perf.data profiles.
+ Extract PID from perf.data if "perf record" was running per process,
+ i.e. with "-p <PID>" and no "-a".
- Check that at list one top process is profiled if perf was running in
- per-process mode.
+ Returns:
+ pids: list of PIDs.
- Raises:
- PidVerificationError if PID verification of per-process perf.data profiles
- fail.
- """
- perf_data_pids = self.ReadPidFromPerfData()
- if not perf_data_pids:
- # In system-wide mode there are no PIDs.
- self._logger.LogOutput('System-wide perf mode. Skip verification.')
- return
+ Raises:
+ PerfDataReadError when perf.data header reading fails.
+ """
+ cmd = ["/usr/bin/perf", "report", "--header-only", "-i"]
+ pids = []
- # PIDs will be present only in per-process profiles.
- # In this case we need to verify that profiles are collected on the
- # hottest processes.
- top_processes = [top_cmd['cmd'] for top_cmd in self.top_cmds]
- # top_process structure: <cmd>-<pid>
- top_pids = [top_process.split('-')[-1] for top_process in top_processes]
- for top_pid in top_pids:
- if top_pid in perf_data_pids:
- self._logger.LogOutput('PID verification passed! '
- f'Top process {top_pid} is profiled.')
- return
- raise PidVerificationError(
- f'top processes {top_processes} are missing in perf.data traces with'
- f' PID: {perf_data_pids}.')
+ for perf_data_path in self.perf_data_files:
+ perf_data_path_in_chroot = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_path
+ )
+ path_str = " ".join(cmd + [perf_data_path_in_chroot])
+ status, output, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, path_str
+ )
+ if status:
+ # Error of reading a perf.data profile is fatal.
+ raise PerfDataReadError(
+ f"Failed to read perf.data profile: {path_str}"
+ )
- def ProcessResults(self, use_cache=False):
- # Note that this function doesn't know anything about whether there is a
- # cache hit or miss. It should process results agnostic of the cache hit
- # state.
- if (self.results_file and self.suite == 'telemetry_Crosperf'
- and 'histograms.json' in self.results_file[0]):
- self.keyvals = self.ProcessHistogramsResults()
- elif (self.results_file and self.suite != 'telemetry_Crosperf'
- and 'results-chart.json' in self.results_file[0]):
- self.keyvals = self.ProcessChartResults()
- else:
- if not use_cache:
- print('\n ** WARNING **: Had to use deprecated output-method to '
- 'collect results.\n')
- self.keyvals = self.GetKeyvals()
- self.keyvals['retval'] = self.retval
- # If we are in CWP approximation mode, we want to collect DSO samples
- # for each perf.data file
- if self.cwp_dso and self.retval == 0:
- self.keyvals['samples'] = self.GetSamples()
- # If the samples count collected from perf file is 0, we will treat
- # it as a failed run.
- if self.keyvals['samples'][0] == 0:
- del self.keyvals['samples']
- self.keyvals['retval'] = 1
- # Generate report from all perf.data files.
- # Now parse all perf report files and include them in keyvals.
- self.GatherPerfResults()
+ # Pattern to search a line with "perf record" command line:
+ # # cmdline : /usr/bin/perf record -e instructions -p 123"
+ cmdline_regex = re.compile(
+ r"^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$"
+ )
+ # Pattern to search PID in a command line.
+ pid_regex = re.compile(r"^.*\s-p\s(?P<pid>\d+)\s*.*$")
+ for line in output.splitlines():
+ cmd_match = cmdline_regex.match(line)
+ if cmd_match:
+ # Found a perf command line.
+ cmdline = cmd_match.group("cmd")
+ # '-a' is a system-wide mode argument.
+ if "-a" not in cmdline.split():
+ # It can be that perf was attached to PID and was still running in
+ # system-wide mode.
+ # We filter out this case here since it's not per-process.
+ pid_match = pid_regex.match(cmdline)
+ if pid_match:
+ pids.append(pid_match.group("pid"))
+ # Stop the search and move to the next perf.data file.
+ break
+ else:
+ # cmdline wasn't found in the header. It's a fatal error.
+ raise PerfDataReadError(
+ f"Perf command line is not found in {path_str}"
+ )
+ return pids
- cpustats = {}
- # Turbostat output has higher priority of processing.
- if self.turbostat_log_file:
- cpustats = self.ProcessTurbostatResults()
- # Process cpustats output only if turbostat has no data.
- if not cpustats and self.cpustats_log_file:
- cpustats = self.ProcessCpustatsResults()
- if self.top_log_file:
- self.top_cmds = self.ProcessTopResults()
- # Verify that PID in non system-wide perf.data and top_cmds are matching.
- if self.perf_data_files and self.top_cmds:
- self.VerifyPerfDataPID()
- if self.wait_time_log_file:
- with open(self.wait_time_log_file) as f:
- wait_time = f.readline().strip()
+ def VerifyPerfDataPID(self):
+ """Verify PIDs in per-process perf.data profiles.
+
+ Check that at list one top process is profiled if perf was running in
+ per-process mode.
+
+ Raises:
+ PidVerificationError if PID verification of per-process perf.data profiles
+ fail.
+ """
+ perf_data_pids = self.ReadPidFromPerfData()
+ if not perf_data_pids:
+ # In system-wide mode there are no PIDs.
+ self._logger.LogOutput("System-wide perf mode. Skip verification.")
+ return
+
+ # PIDs will be present only in per-process profiles.
+ # In this case we need to verify that profiles are collected on the
+ # hottest processes.
+ top_processes = [top_cmd["cmd"] for top_cmd in self.top_cmds]
+ # top_process structure: <cmd>-<pid>
+ top_pids = [top_process.split("-")[-1] for top_process in top_processes]
+ for top_pid in top_pids:
+ if top_pid in perf_data_pids:
+ self._logger.LogOutput(
+ "PID verification passed! "
+ f"Top process {top_pid} is profiled."
+ )
+ return
+ raise PidVerificationError(
+ f"top processes {top_processes} are missing in perf.data traces with"
+ f" PID: {perf_data_pids}."
+ )
+
+ def ProcessResults(self, use_cache=False):
+ # Note that this function doesn't know anything about whether there is a
+ # cache hit or miss. It should process results agnostic of the cache hit
+ # state.
+ if (
+ self.results_file
+ and self.suite == "telemetry_Crosperf"
+ and "histograms.json" in self.results_file[0]
+ ):
+ self.keyvals = self.ProcessHistogramsResults()
+ elif (
+ self.results_file
+ and self.suite != "telemetry_Crosperf"
+ and "results-chart.json" in self.results_file[0]
+ ):
+ self.keyvals = self.ProcessChartResults()
+ else:
+ if not use_cache:
+ print(
+ "\n ** WARNING **: Had to use deprecated output-method to "
+ "collect results.\n"
+ )
+ self.keyvals = self.GetKeyvals()
+ self.keyvals["retval"] = self.retval
+ # If we are in CWP approximation mode, we want to collect DSO samples
+ # for each perf.data file
+ if self.cwp_dso and self.retval == 0:
+ self.keyvals["samples"] = self.GetSamples()
+ # If the samples count collected from perf file is 0, we will treat
+ # it as a failed run.
+ if self.keyvals["samples"][0] == 0:
+ del self.keyvals["samples"]
+ self.keyvals["retval"] = 1
+ # Generate report from all perf.data files.
+ # Now parse all perf report files and include them in keyvals.
+ self.GatherPerfResults()
+
+ cpustats = {}
+ # Turbostat output has higher priority of processing.
+ if self.turbostat_log_file:
+ cpustats = self.ProcessTurbostatResults()
+ # Process cpustats output only if turbostat has no data.
+ if not cpustats and self.cpustats_log_file:
+ cpustats = self.ProcessCpustatsResults()
+ if self.top_log_file:
+ self.top_cmds = self.ProcessTopResults()
+ # Verify that PID in non system-wide perf.data and top_cmds are matching.
+ if self.perf_data_files and self.top_cmds:
+ self.VerifyPerfDataPID()
+ if self.wait_time_log_file:
+ with open(self.wait_time_log_file) as f:
+ wait_time = f.readline().strip()
+ try:
+ wait_time = float(wait_time)
+ except ValueError:
+ raise ValueError("Wait time in log file is not a number.")
+ # This is for accumulating wait time for telemtry_Crosperf runs only,
+ # for test_that runs, please refer to suite_runner.
+ self.machine.AddCooldownWaitTime(wait_time)
+
+ for param_key, param in cpustats.items():
+ for param_type, param_values in param.items():
+ val_avg = sum(param_values) / len(param_values)
+ val_min = min(param_values)
+ val_max = max(param_values)
+ # Average data is always included.
+ self.keyvals["_".join([param_key, param_type, "avg"])] = val_avg
+ # Insert min/max results only if they deviate
+ # from average.
+ if val_min != val_avg:
+ self.keyvals[
+ "_".join([param_key, param_type, "min"])
+ ] = val_min
+ if val_max != val_avg:
+ self.keyvals[
+ "_".join([param_key, param_type, "max"])
+ ] = val_max
+
+ def GetChromeVersionFromCache(self, cache_dir):
+ # Read chrome_version from keys file, if present.
+ chrome_version = ""
+ keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
+ if os.path.exists(keys_file):
+ with open(keys_file, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ if l.startswith("Google Chrome "):
+ chrome_version = l
+ if chrome_version.endswith("\n"):
+ chrome_version = chrome_version[:-1]
+ break
+ return chrome_version
+
+ def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ # Read in everything from the cache directory.
+ with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
+
+ # Untar the tarball to a temporary directory
+ self.temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self.chromeos_root, "chroot", "tmp")
+ )
+
+ command = "cd %s && tar xf %s" % (
+ self.temp_dir,
+ os.path.join(cache_dir, AUTOTEST_TARBALL),
+ )
+ ret = self.ce.RunCommand(command, print_to_console=False)
+ if ret:
+ raise RuntimeError("Could not untar cached tarball")
+ self.results_dir = self.temp_dir
+ self.results_file = self.GetDataMeasurementsFiles()
+ self.perf_data_files = self.GetPerfDataFiles()
+ self.perf_report_files = self.GetPerfReportFiles()
+ self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults(use_cache=True)
+
+ def CleanUp(self, rm_chroot_tmp):
+ if rm_chroot_tmp and self.results_dir:
+ dirname, basename = misc.GetRoot(self.results_dir)
+ if basename.find("test_that_results_") != -1:
+ command = "rm -rf %s" % self.results_dir
+ else:
+ command = "rm -rf %s" % dirname
+ self.ce.RunCommand(command)
+ if self.temp_dir:
+ command = "rm -rf %s" % self.temp_dir
+ self.ce.RunCommand(command)
+
+ def CreateTarball(self, results_dir, tarball):
+ if not results_dir.strip():
+ raise ValueError(
+ "Refusing to `tar` an empty results_dir: %r" % results_dir
+ )
+
+ ret = self.ce.RunCommand(
+ "cd %s && "
+ "tar "
+ "--exclude=var/spool "
+ "--exclude=var/log "
+ "-cjf %s ." % (results_dir, tarball)
+ )
+ if ret:
+ raise RuntimeError("Couldn't compress test output directory.")
+
+ def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
+ # Create the dir if it doesn't exist.
+ temp_dir = tempfile.mkdtemp()
+
+ # Store to the temp directory.
+ with open(os.path.join(temp_dir, RESULTS_FILE), "wb") as f:
+ pickle.dump(self.out, f)
+ pickle.dump(self.err, f)
+ pickle.dump(self.retval, f)
+
+ if not test_flag.GetTestMode():
+ with open(os.path.join(temp_dir, CACHE_KEYS_FILE), "w") as f:
+ f.write("%s\n" % self.label.name)
+ f.write("%s\n" % self.label.chrome_version)
+ f.write("%s\n" % self.machine.checksum_string)
+ for k in key_list:
+ f.write(k)
+ f.write("\n")
+
+ if self.results_dir:
+ tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
+ self.CreateTarball(self.results_dir, tarball)
+
+ # Store machine info.
+ # TODO(asharif): Make machine_manager a singleton, and don't pass it into
+ # this function.
+ with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
+ f.write(machine_manager.machine_checksum_string[self.label.name])
+
+ if os.path.exists(cache_dir):
+ command = f"rm -rf {cache_dir}"
+ self.ce.RunCommand(command)
+
+ parent_dir = os.path.dirname(cache_dir)
+ command = f"mkdir -p {parent_dir} && "
+ command += f"chmod g+x {temp_dir} && "
+ command += f"mv {temp_dir} {cache_dir}"
+ ret = self.ce.RunCommand(command)
+ if ret:
+ command = f"rm -rf {temp_dir}"
+ self.ce.RunCommand(command)
+ raise RuntimeError(
+ "Could not move dir %s to dir %s" % (temp_dir, cache_dir)
+ )
+
+ @classmethod
+ def CreateFromRun(
+ cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ out,
+ err,
+ retval,
+ test,
+ suite="telemetry_Crosperf",
+ cwp_dso="",
+ ):
+ if suite == "telemetry":
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
+ result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
+ return result
+
+ @classmethod
+ def CreateFromCacheHit(
+ cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ cache_dir,
+ test,
+ suite="telemetry_Crosperf",
+ cwp_dso="",
+ ):
+ if suite == "telemetry":
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
try:
- wait_time = float(wait_time)
- except ValueError:
- raise ValueError('Wait time in log file is not a number.')
- # This is for accumulating wait time for telemtry_Crosperf runs only,
- # for test_that runs, please refer to suite_runner.
- self.machine.AddCooldownWaitTime(wait_time)
+ result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
- for param_key, param in cpustats.items():
- for param_type, param_values in param.items():
- val_avg = sum(param_values) / len(param_values)
- val_min = min(param_values)
- val_max = max(param_values)
- # Average data is always included.
- self.keyvals['_'.join([param_key, param_type, 'avg'])] = val_avg
- # Insert min/max results only if they deviate
- # from average.
- if val_min != val_avg:
- self.keyvals['_'.join([param_key, param_type, 'min'])] = val_min
- if val_max != val_avg:
- self.keyvals['_'.join([param_key, param_type, 'max'])] = val_max
-
- def GetChromeVersionFromCache(self, cache_dir):
- # Read chrome_version from keys file, if present.
- chrome_version = ''
- keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
- if os.path.exists(keys_file):
- with open(keys_file, 'r') as f:
- lines = f.readlines()
- for l in lines:
- if l.startswith('Google Chrome '):
- chrome_version = l
- if chrome_version.endswith('\n'):
- chrome_version = chrome_version[:-1]
- break
- return chrome_version
-
- def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- # Read in everything from the cache directory.
- with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
- self.out = pickle.load(f)
- self.err = pickle.load(f)
- self.retval = pickle.load(f)
-
- # Untar the tarball to a temporary directory
- self.temp_dir = tempfile.mkdtemp(
- dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
-
- command = ('cd %s && tar xf %s' %
- (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
- ret = self.ce.RunCommand(command, print_to_console=False)
- if ret:
- raise RuntimeError('Could not untar cached tarball')
- self.results_dir = self.temp_dir
- self.results_file = self.GetDataMeasurementsFiles()
- self.perf_data_files = self.GetPerfDataFiles()
- self.perf_report_files = self.GetPerfReportFiles()
- self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
- self.ProcessResults(use_cache=True)
-
- def CleanUp(self, rm_chroot_tmp):
- if rm_chroot_tmp and self.results_dir:
- dirname, basename = misc.GetRoot(self.results_dir)
- if basename.find('test_that_results_') != -1:
- command = 'rm -rf %s' % self.results_dir
- else:
- command = 'rm -rf %s' % dirname
- self.ce.RunCommand(command)
- if self.temp_dir:
- command = 'rm -rf %s' % self.temp_dir
- self.ce.RunCommand(command)
-
- def CreateTarball(self, results_dir, tarball):
- if not results_dir.strip():
- raise ValueError('Refusing to `tar` an empty results_dir: %r' %
- results_dir)
-
- ret = self.ce.RunCommand('cd %s && '
- 'tar '
- '--exclude=var/spool '
- '--exclude=var/log '
- '-cjf %s .' % (results_dir, tarball))
- if ret:
- raise RuntimeError("Couldn't compress test output directory.")
-
- def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
- # Create the dir if it doesn't exist.
- temp_dir = tempfile.mkdtemp()
-
- # Store to the temp directory.
- with open(os.path.join(temp_dir, RESULTS_FILE), 'wb') as f:
- pickle.dump(self.out, f)
- pickle.dump(self.err, f)
- pickle.dump(self.retval, f)
-
- if not test_flag.GetTestMode():
- with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
- f.write('%s\n' % self.label.name)
- f.write('%s\n' % self.label.chrome_version)
- f.write('%s\n' % self.machine.checksum_string)
- for k in key_list:
- f.write(k)
- f.write('\n')
-
- if self.results_dir:
- tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
- self.CreateTarball(self.results_dir, tarball)
-
- # Store machine info.
- # TODO(asharif): Make machine_manager a singleton, and don't pass it into
- # this function.
- with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
- f.write(machine_manager.machine_checksum_string[self.label.name])
-
- if os.path.exists(cache_dir):
- command = f'rm -rf {cache_dir}'
- self.ce.RunCommand(command)
-
- parent_dir = os.path.dirname(cache_dir)
- command = f'mkdir -p {parent_dir} && '
- command += f'chmod g+x {temp_dir} && '
- command += f'mv {temp_dir} {cache_dir}'
- ret = self.ce.RunCommand(command)
- if ret:
- command = f'rm -rf {temp_dir}'
- self.ce.RunCommand(command)
- raise RuntimeError('Could not move dir %s to dir %s' %
- (temp_dir, cache_dir))
-
- @classmethod
- def CreateFromRun(cls,
- logger,
- log_level,
- label,
- machine,
- out,
- err,
- retval,
- test,
- suite='telemetry_Crosperf',
- cwp_dso=''):
- if suite == 'telemetry':
- result = TelemetryResult(logger, label, log_level, machine)
- else:
- result = cls(logger, label, log_level, machine)
- result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
- return result
-
- @classmethod
- def CreateFromCacheHit(cls,
- logger,
- log_level,
- label,
- machine,
- cache_dir,
- test,
- suite='telemetry_Crosperf',
- cwp_dso=''):
- if suite == 'telemetry':
- result = TelemetryResult(logger, label, log_level, machine)
- else:
- result = cls(logger, label, log_level, machine)
- try:
- result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
-
- except RuntimeError as e:
- logger.LogError('Exception while using cache: %s' % e)
- return None
- return result
+ except RuntimeError as e:
+ logger.LogError("Exception while using cache: %s" % e)
+ return None
+ return result
class TelemetryResult(Result):
- """Class to hold the results of a single Telemetry run."""
+ """Class to hold the results of a single Telemetry run."""
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.out = out
- self.err = err
- self.retval = retval
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.out = out
+ self.err = err
+ self.retval = retval
- self.ProcessResults()
+ self.ProcessResults()
- # pylint: disable=arguments-differ
- def ProcessResults(self):
- # The output is:
- # url,average_commit_time (ms),...
- # www.google.com,33.4,21.2,...
- # We need to convert to this format:
- # {"www.google.com:average_commit_time (ms)": "33.4",
- # "www.google.com:...": "21.2"}
- # Added note: Occasionally the output comes back
- # with "JSON.stringify(window.automation.GetResults())" on
- # the first line, and then the rest of the output as
- # described above.
+ # pylint: disable=arguments-differ
+ def ProcessResults(self):
+ # The output is:
+ # url,average_commit_time (ms),...
+ # www.google.com,33.4,21.2,...
+ # We need to convert to this format:
+ # {"www.google.com:average_commit_time (ms)": "33.4",
+ # "www.google.com:...": "21.2"}
+ # Added note: Occasionally the output comes back
+ # with "JSON.stringify(window.automation.GetResults())" on
+ # the first line, and then the rest of the output as
+ # described above.
- lines = self.out.splitlines()
- self.keyvals = {}
+ lines = self.out.splitlines()
+ self.keyvals = {}
- if lines:
- if lines[0].startswith('JSON.stringify'):
- lines = lines[1:]
+ if lines:
+ if lines[0].startswith("JSON.stringify"):
+ lines = lines[1:]
- if not lines:
- return
- labels = lines[0].split(',')
- for line in lines[1:]:
- fields = line.split(',')
- if len(fields) != len(labels):
- continue
- for i in range(1, len(labels)):
- key = '%s %s' % (fields[0], labels[i])
- value = fields[i]
- self.keyvals[key] = value
- self.keyvals['retval'] = self.retval
+ if not lines:
+ return
+ labels = lines[0].split(",")
+ for line in lines[1:]:
+ fields = line.split(",")
+ if len(fields) != len(labels):
+ continue
+ for i in range(1, len(labels)):
+ key = "%s %s" % (fields[0], labels[i])
+ value = fields[i]
+ self.keyvals[key] = value
+ self.keyvals["retval"] = self.retval
- def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
- self.out = pickle.load(f)
- self.err = pickle.load(f)
- self.retval = pickle.load(f)
+ def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
- self.chrome_version = (super(TelemetryResult,
- self).GetChromeVersionFromCache(cache_dir))
- self.ProcessResults()
+ self.chrome_version = super(
+ TelemetryResult, self
+ ).GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults()
class CacheConditions(object):
- """Various Cache condition values, for export."""
+ """Various Cache condition values, for export."""
- # Cache hit only if the result file exists.
- CACHE_FILE_EXISTS = 0
+ # Cache hit only if the result file exists.
+ CACHE_FILE_EXISTS = 0
- # Cache hit if the checksum of cpuinfo and totalmem of
- # the cached result and the new run match.
- MACHINES_MATCH = 1
+ # Cache hit if the checksum of cpuinfo and totalmem of
+ # the cached result and the new run match.
+ MACHINES_MATCH = 1
- # Cache hit if the image checksum of the cached result and the new run match.
- CHECKSUMS_MATCH = 2
+ # Cache hit if the image checksum of the cached result and the new run match.
+ CHECKSUMS_MATCH = 2
- # Cache hit only if the cached result was successful
- RUN_SUCCEEDED = 3
+ # Cache hit only if the cached result was successful
+ RUN_SUCCEEDED = 3
- # Never a cache hit.
- FALSE = 4
+ # Never a cache hit.
+ FALSE = 4
- # Cache hit if the image path matches the cached image path.
- IMAGE_PATH_MATCH = 5
+ # Cache hit if the image path matches the cached image path.
+ IMAGE_PATH_MATCH = 5
- # Cache hit if the uuid of hard disk mataches the cached one
+ # Cache hit if the uuid of hard disk mataches the cached one
- SAME_MACHINE_MATCH = 6
+ SAME_MACHINE_MATCH = 6
class ResultsCache(object):
- """Class to handle the cache for storing/retrieving test run results.
+ """Class to handle the cache for storing/retrieving test run results.
- This class manages the key of the cached runs without worrying about what
- is exactly stored (value). The value generation is handled by the Results
- class.
- """
- CACHE_VERSION = 6
+ This class manages the key of the cached runs without worrying about what
+ is exactly stored (value). The value generation is handled by the Results
+ class.
+ """
- def __init__(self):
- # Proper initialization happens in the Init function below.
- self.chromeos_image = None
- self.chromeos_root = None
- self.test_name = None
- self.iteration = None
- self.test_args = None
- self.profiler_args = None
- self.board = None
- self.cache_conditions = None
- self.machine_manager = None
- self.machine = None
- self._logger = None
- self.ce = None
- self.label = None
- self.share_cache = None
- self.suite = None
- self.log_level = None
- self.show_all = None
- self.run_local = None
- self.cwp_dso = None
+ CACHE_VERSION = 6
- def Init(self, chromeos_image, chromeos_root, test_name, iteration,
- test_args, profiler_args, machine_manager, machine, board,
- cache_conditions, logger_to_use, log_level, label, share_cache,
- suite, show_all_results, run_local, cwp_dso):
- self.chromeos_image = chromeos_image
- self.chromeos_root = chromeos_root
- self.test_name = test_name
- self.iteration = iteration
- self.test_args = test_args
- self.profiler_args = profiler_args
- self.board = board
- self.cache_conditions = cache_conditions
- self.machine_manager = machine_manager
- self.machine = machine
- self._logger = logger_to_use
- self.ce = command_executer.GetCommandExecuter(self._logger,
- log_level=log_level)
- self.label = label
- self.share_cache = share_cache
- self.suite = suite
- self.log_level = log_level
- self.show_all = show_all_results
- self.run_local = run_local
- self.cwp_dso = cwp_dso
+ def __init__(self):
+ # Proper initialization happens in the Init function below.
+ self.chromeos_image = None
+ self.chromeos_root = None
+ self.test_name = None
+ self.iteration = None
+ self.test_args = None
+ self.profiler_args = None
+ self.board = None
+ self.cache_conditions = None
+ self.machine_manager = None
+ self.machine = None
+ self._logger = None
+ self.ce = None
+ self.label = None
+ self.share_cache = None
+ self.suite = None
+ self.log_level = None
+ self.show_all = None
+ self.run_local = None
+ self.cwp_dso = None
- def GetCacheDirForRead(self):
- matching_dirs = []
- for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
- matching_dirs += glob.glob(glob_path)
+ def Init(
+ self,
+ chromeos_image,
+ chromeos_root,
+ test_name,
+ iteration,
+ test_args,
+ profiler_args,
+ machine_manager,
+ machine,
+ board,
+ cache_conditions,
+ logger_to_use,
+ log_level,
+ label,
+ share_cache,
+ suite,
+ show_all_results,
+ run_local,
+ cwp_dso,
+ ):
+ self.chromeos_image = chromeos_image
+ self.chromeos_root = chromeos_root
+ self.test_name = test_name
+ self.iteration = iteration
+ self.test_args = test_args
+ self.profiler_args = profiler_args
+ self.board = board
+ self.cache_conditions = cache_conditions
+ self.machine_manager = machine_manager
+ self.machine = machine
+ self._logger = logger_to_use
+ self.ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level
+ )
+ self.label = label
+ self.share_cache = share_cache
+ self.suite = suite
+ self.log_level = log_level
+ self.show_all = show_all_results
+ self.run_local = run_local
+ self.cwp_dso = cwp_dso
- if matching_dirs:
- # Cache file found.
- return matching_dirs[0]
- return None
+ def GetCacheDirForRead(self):
+ matching_dirs = []
+ for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
+ matching_dirs += glob.glob(glob_path)
- def GetCacheDirForWrite(self, get_keylist=False):
- cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
- if get_keylist:
- args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
- self.run_local)
- version, image = results_report.ParseChromeosImage(
- self.label.chromeos_image)
- keylist = [
- version, image, self.label.board, self.machine.name, self.test_name,
- str(self.iteration), args_str
- ]
- return cache_path, keylist
- return cache_path
+ if matching_dirs:
+ # Cache file found.
+ return matching_dirs[0]
+ return None
- def FormCacheDir(self, list_of_strings):
- cache_key = ' '.join(list_of_strings)
- cache_dir = misc.GetFilenameFromString(cache_key)
- if self.label.cache_dir:
- cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
- cache_path = [os.path.join(cache_home, cache_dir)]
- else:
- cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
+ def GetCacheDirForWrite(self, get_keylist=False):
+ cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
+ if get_keylist:
+ args_str = "%s_%s_%s" % (
+ self.test_args,
+ self.profiler_args,
+ self.run_local,
+ )
+ version, image = results_report.ParseChromeosImage(
+ self.label.chromeos_image
+ )
+ keylist = [
+ version,
+ image,
+ self.label.board,
+ self.machine.name,
+ self.test_name,
+ str(self.iteration),
+ args_str,
+ ]
+ return cache_path, keylist
+ return cache_path
- if self.share_cache:
- for path in [x.strip() for x in self.share_cache.split(',')]:
- if os.path.exists(path):
- cache_path.append(os.path.join(path, cache_dir))
+ def FormCacheDir(self, list_of_strings):
+ cache_key = " ".join(list_of_strings)
+ cache_dir = misc.GetFilenameFromString(cache_key)
+ if self.label.cache_dir:
+ cache_home = os.path.abspath(
+ os.path.expanduser(self.label.cache_dir)
+ )
+ cache_path = [os.path.join(cache_home, cache_dir)]
else:
- self._logger.LogFatal('Unable to find shared cache: %s' % path)
+ cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
- return cache_path
+ if self.share_cache:
+ for path in [x.strip() for x in self.share_cache.split(",")]:
+ if os.path.exists(path):
+ cache_path.append(os.path.join(path, cache_dir))
+ else:
+ self._logger.LogFatal(
+ "Unable to find shared cache: %s" % path
+ )
- def GetCacheKeyList(self, read):
- if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
- machine_checksum = '*'
- else:
- machine_checksum = self.machine_manager.machine_checksum[self.label.name]
- if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
- checksum = '*'
- elif self.label.image_type == 'trybot':
- checksum = hashlib.md5(
- self.label.chromeos_image.encode('utf-8')).hexdigest()
- elif self.label.image_type == 'official':
- checksum = '*'
- else:
- checksum = ImageChecksummer().Checksum(self.label, self.log_level)
+ return cache_path
- if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
- image_path_checksum = '*'
- else:
- image_path_checksum = hashlib.md5(
- self.chromeos_image.encode('utf-8')).hexdigest()
+ def GetCacheKeyList(self, read):
+ if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
+ machine_checksum = "*"
+ else:
+ machine_checksum = self.machine_manager.machine_checksum[
+ self.label.name
+ ]
+ if (
+ read
+ and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions
+ ):
+ checksum = "*"
+ elif self.label.image_type == "trybot":
+ checksum = hashlib.md5(
+ self.label.chromeos_image.encode("utf-8")
+ ).hexdigest()
+ elif self.label.image_type == "official":
+ checksum = "*"
+ else:
+ checksum = ImageChecksummer().Checksum(self.label, self.log_level)
- machine_id_checksum = ''
- if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
- machine_id_checksum = '*'
- else:
- if self.machine and self.machine.name in self.label.remote:
- machine_id_checksum = self.machine.machine_id_checksum
- else:
- for machine in self.machine_manager.GetMachines(self.label):
- if machine.name == self.label.remote[0]:
- machine_id_checksum = machine.machine_id_checksum
- break
+ if (
+ read
+ and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions
+ ):
+ image_path_checksum = "*"
+ else:
+ image_path_checksum = hashlib.md5(
+ self.chromeos_image.encode("utf-8")
+ ).hexdigest()
- temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
- self.run_local)
- test_args_checksum = hashlib.md5(
- temp_test_args.encode('utf-8')).hexdigest()
- return (image_path_checksum, self.test_name, str(self.iteration),
- test_args_checksum, checksum, machine_checksum,
- machine_id_checksum, str(self.CACHE_VERSION))
+ machine_id_checksum = ""
+ if (
+ read
+ and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions
+ ):
+ machine_id_checksum = "*"
+ else:
+ if self.machine and self.machine.name in self.label.remote:
+ machine_id_checksum = self.machine.machine_id_checksum
+ else:
+ for machine in self.machine_manager.GetMachines(self.label):
+ if machine.name == self.label.remote[0]:
+ machine_id_checksum = machine.machine_id_checksum
+ break
- def ReadResult(self):
- if CacheConditions.FALSE in self.cache_conditions:
- cache_dir = self.GetCacheDirForWrite()
- command = 'rm -rf %s' % (cache_dir, )
- self.ce.RunCommand(command)
- return None
- cache_dir = self.GetCacheDirForRead()
+ temp_test_args = "%s %s %s" % (
+ self.test_args,
+ self.profiler_args,
+ self.run_local,
+ )
+ test_args_checksum = hashlib.md5(
+ temp_test_args.encode("utf-8")
+ ).hexdigest()
+ return (
+ image_path_checksum,
+ self.test_name,
+ str(self.iteration),
+ test_args_checksum,
+ checksum,
+ machine_checksum,
+ machine_id_checksum,
+ str(self.CACHE_VERSION),
+ )
- if not cache_dir:
- return None
+ def ReadResult(self):
+ if CacheConditions.FALSE in self.cache_conditions:
+ cache_dir = self.GetCacheDirForWrite()
+ command = "rm -rf %s" % (cache_dir,)
+ self.ce.RunCommand(command)
+ return None
+ cache_dir = self.GetCacheDirForRead()
- if not os.path.isdir(cache_dir):
- return None
+ if not cache_dir:
+ return None
- if self.log_level == 'verbose':
- self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
- result = Result.CreateFromCacheHit(self._logger, self.log_level,
- self.label, self.machine, cache_dir,
- self.test_name, self.suite,
- self.cwp_dso)
- if not result:
- return None
+ if not os.path.isdir(cache_dir):
+ return None
- if (result.retval == 0
- or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
- return result
+ if self.log_level == "verbose":
+ self._logger.LogOutput(
+ "Trying to read from cache dir: %s" % cache_dir
+ )
+ result = Result.CreateFromCacheHit(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ cache_dir,
+ self.test_name,
+ self.suite,
+ self.cwp_dso,
+ )
+ if not result:
+ return None
- return None
+ if (
+ result.retval == 0
+ or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions
+ ):
+ return result
- def StoreResult(self, result):
- cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
- result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
+ return None
+
+ def StoreResult(self, result):
+ cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
+ result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
class MockResultsCache(ResultsCache):
- """Class for mock testing, corresponding to ResultsCache class."""
+ """Class for mock testing, corresponding to ResultsCache class."""
- # FIXME: pylint complains about this mock init method, we should probably
- # replace all Mock classes in Crosperf with simple Mock.mock().
- # pylint: disable=arguments-differ
- def Init(self, *args):
- pass
+ # FIXME: pylint complains about this mock init method, we should probably
+ # replace all Mock classes in Crosperf with simple Mock.mock().
+ # pylint: disable=arguments-differ
+ def Init(self, *args):
+ pass
- def ReadResult(self):
- return None
+ def ReadResult(self):
+ return None
- def StoreResult(self, result):
- pass
+ def StoreResult(self, result):
+ pass
class MockResult(Result):
- """Class for mock testing, corresponding to Result class."""
+ """Class for mock testing, corresponding to Result class."""
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.out = out
- self.err = err
- self.retval = retval
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.out = out
+ self.err = err
+ self.retval = retval
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index d6953ee..cad149e 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module of result cache unittest."""
-from __future__ import print_function
import io
import os
@@ -17,20 +16,20 @@
import unittest
import unittest.mock as mock
+from cros_utils import command_executer
+from cros_utils import logger
+from cros_utils import misc
import image_checksummer
-import machine_manager
-import test_flag
-
from label import MockLabel
+import machine_manager
from results_cache import CacheConditions
from results_cache import PerfDataReadError
from results_cache import PidVerificationError
from results_cache import Result
from results_cache import ResultsCache
from results_cache import TelemetryResult
-from cros_utils import command_executer
-from cros_utils import logger
-from cros_utils import misc
+import test_flag
+
# The following hardcoded string has blocked words replaced, and thus
# is not representative of a true crosperf output.
@@ -133,35 +132,35 @@
"""
keyvals = {
- '': 'PASS',
- 'b_stdio_putcgetc__0_': '0.100005711667',
- 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
- 'b_malloc_thread_local__0_': '0.01138439',
- 'b_string_strlen__0_': '0.044893587',
- 'b_malloc_sparse__0_': '0.015053784',
- 'b_string_memset__0_': '0.00275405066667',
- 'platform_LibCBench': 'PASS',
- 'b_pthread_uselesslock__0_': '0.0294113346667',
- 'b_string_strchr__0_': '0.00456903',
- 'b_pthread_create_serial1__0_': '0.0291785246667',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778',
- 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
- 'b_pthread_createjoin_serial1__0_': '0.031907936',
- 'b_malloc_thread_stress__0_': '0.0367894733333',
- 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
- 'b_malloc_bubble__0_': '0.015066374',
- 'b_malloc_big2__0_': '0.002951359',
- 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
- 'b_pthread_createjoin_serial2__0_': '0.043485347',
- 'b_regex_search___a_25_b__': '0.0496191923333',
- 'b_utf8_bigbuf__0_': '0.0473772253333',
- 'b_malloc_big1__0_': '0.00375231466667',
- 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
- 'b_malloc_tiny2__0_': '0.000581407333333',
- 'b_utf8_onebyone__0_': '0.130938538333',
- 'b_malloc_tiny1__0_': '0.000768474333333',
- 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'
+ "": "PASS",
+ "b_stdio_putcgetc__0_": "0.100005711667",
+ "b_string_strstr___azbycxdwevfugthsirjqkplomn__": "0.0133123556667",
+ "b_malloc_thread_local__0_": "0.01138439",
+ "b_string_strlen__0_": "0.044893587",
+ "b_malloc_sparse__0_": "0.015053784",
+ "b_string_memset__0_": "0.00275405066667",
+ "platform_LibCBench": "PASS",
+ "b_pthread_uselesslock__0_": "0.0294113346667",
+ "b_string_strchr__0_": "0.00456903",
+ "b_pthread_create_serial1__0_": "0.0291785246667",
+ "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__": "0.118360778",
+ "b_string_strstr___aaaaaaaaaaaaaacccccccccccc__": "0.0135694476667",
+ "b_pthread_createjoin_serial1__0_": "0.031907936",
+ "b_malloc_thread_stress__0_": "0.0367894733333",
+ "b_regex_search____a_b_c__d_b__": "0.00165455066667",
+ "b_malloc_bubble__0_": "0.015066374",
+ "b_malloc_big2__0_": "0.002951359",
+ "b_stdio_putcgetc_unlocked__0_": "0.0371443833333",
+ "b_pthread_createjoin_serial2__0_": "0.043485347",
+ "b_regex_search___a_25_b__": "0.0496191923333",
+ "b_utf8_bigbuf__0_": "0.0473772253333",
+ "b_malloc_big1__0_": "0.00375231466667",
+ "b_regex_compile____a_b_c__d_b__": "0.00529833933333",
+ "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__": "0.068957325",
+ "b_malloc_tiny2__0_": "0.000581407333333",
+ "b_utf8_onebyone__0_": "0.130938538333",
+ "b_malloc_tiny1__0_": "0.000768474333333",
+ "b_string_strstr___abcdefghijklmnopqrstuvwxyz__": "0.0134553343333",
}
PERF_DATA_HEADER = """
@@ -192,8 +191,7 @@
#
"""
-TURBOSTAT_LOG_OUTPUT = (
- """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+TURBOSTAT_LOG_OUTPUT = """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- 329 12.13 2723 2393 10975 77
0 336 12.41 2715 2393 6328 77
2 323 11.86 2731 2393 4647 69
@@ -221,17 +219,13 @@
- 843 29.83 2832 2393 28161 47
0 827 29.35 2826 2393 16093 47
2 858 30.31 2838 2393 12068 46
-""")
+"""
TURBOSTAT_DATA = {
- 'cpufreq': {
- 'all': [2723, 2884, 2927, 2937, 2932, 2933, 2832]
- },
- 'cputemp': {
- 'all': [77, 83, 84, 72, 75, 46, 47]
- },
+ "cpufreq": {"all": [2723, 2884, 2927, 2937, 2932, 2933, 2832]},
+ "cputemp": {"all": [77, 83, 84, 72, 75, 46, 47]},
}
-TOP_LOG = ("""
+TOP_LOG = """
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
4204 chronos 12 -8 2492716 205728 179016 S 11.8 5.3 0:03.89 chrome
@@ -253,58 +247,58 @@
5713 chronos 20 0 5178652 103120 50372 S 17.8 2.6 0:01.13 chrome
7 root 20 0 0 0 0 S 1.0 0.0 0:00.73 rcu_preempt
855 root 20 0 0 0 0 S 1.0 0.0 0:00.01 kworker/4:2
-""")
+"""
TOP_DATA = [
{
- 'cmd': 'chrome-5745',
- 'cpu_use_avg': 115.35,
- 'count': 2,
- 'top5_cpu_use': [122.8, 107.9],
+ "cmd": "chrome-5745",
+ "cpu_use_avg": 115.35,
+ "count": 2,
+ "top5_cpu_use": [122.8, 107.9],
},
{
- 'cmd': 'chrome-5713',
- 'cpu_use_avg': 8.9,
- 'count': 1,
- 'top5_cpu_use': [17.8]
+ "cmd": "chrome-5713",
+ "cpu_use_avg": 8.9,
+ "count": 1,
+ "top5_cpu_use": [17.8],
},
{
- 'cmd': 'irq/cros-ec-912',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [2.0],
+ "cmd": "irq/cros-ec-912",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [2.0],
},
{
- 'cmd': 'chrome-5205',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0]
+ "cmd": "chrome-5205",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'spi5-121',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "spi5-121",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'sshd-4811',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "sshd-4811",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'rcu_preempt-7',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "rcu_preempt-7",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'kworker/4:2-855',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "kworker/4:2-855",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
]
-TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5
+TOP_OUTPUT = """ COMMAND AVG CPU% SEEN HIGHEST 5
chrome 128.250000 6 [122.8, 107.9, 17.8, 5.0, 2.0]
irq/230-cros-ec 1.000000 1 [2.0]
sshd 0.500000 1 [1.0]
@@ -312,9 +306,9 @@
spi5 0.500000 1 [1.0]
rcu_preempt 0.500000 1 [1.0]
kworker/4:2 0.500000 1 [1.0]
-""")
+"""
-CPUSTATS_UNIQ_OUTPUT = ("""
+CPUSTATS_UNIQ_OUTPUT = """
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 2016000
@@ -327,20 +321,20 @@
soc-thermal 45456
little-cpu 42555
big-cpu 61724
-""")
+"""
CPUSTATS_UNIQ_DATA = {
- 'cpufreq': {
- 'cpu0': [1512, 1500],
- 'cpu1': [1512, 1600],
- 'cpu3': [2016, 2012]
+ "cpufreq": {
+ "cpu0": [1512, 1500],
+ "cpu1": [1512, 1600],
+ "cpu3": [2016, 2012],
},
- 'cputemp': {
- 'soc-thermal': [44.4, 45.5],
- 'little-cpu': [41.2, 42.6],
- 'big-cpu': [51.2, 61.7]
- }
+ "cputemp": {
+ "soc-thermal": [44.4, 45.5],
+ "little-cpu": [41.2, 42.6],
+ "big-cpu": [51.2, 61.7],
+ },
}
-CPUSTATS_DUPL_OUTPUT = ("""
+CPUSTATS_DUPL_OUTPUT = """
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1512000
@@ -353,17 +347,14 @@
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 1982000
-""")
+"""
CPUSTATS_DUPL_DATA = {
- 'cpufreq': {
- 'cpu0': [1512, 1500, 1614],
- 'cpu3': [2016, 2016, 1982]
- },
+ "cpufreq": {"cpu0": [1512, 1500, 1614], "cpu3": [2016, 2016, 1982]},
}
-TMP_DIR1 = '/tmp/tmpAbcXyz'
+TMP_DIR1 = "/tmp/tmpAbcXyz"
-HISTOGRAMSET = ("""
+HISTOGRAMSET = """
[
{
"values": [
@@ -435,1427 +426,1557 @@
}
]
-""")
+"""
# pylint: enable=line-too-long
class MockResult(Result):
- """Mock result class."""
- def __init__(self, mylogger, label, logging_level, machine):
- super(MockResult, self).__init__(mylogger, label, logging_level, machine)
+ """Mock result class."""
- def FindFilesInResultsDir(self, find_args):
- return ''
+ def __init__(self, mylogger, label, logging_level, machine):
+ super(MockResult, self).__init__(
+ mylogger, label, logging_level, machine
+ )
- # pylint: disable=arguments-differ
- def GetKeyvals(self, temp=False):
- if temp:
- pass
- return keyvals
+ def FindFilesInResultsDir(self, find_args):
+ return ""
+
+ # pylint: disable=arguments-differ
+ def GetKeyvals(self, temp=False):
+ if temp:
+ pass
+ return keyvals
class ResultTest(unittest.TestCase):
- """Result test class."""
- def __init__(self, *args, **kwargs):
- super(ResultTest, self).__init__(*args, **kwargs)
- self.callFakeProcessResults = False
- self.fakeCacheReturnResult = None
- self.callGetResultsDir = False
- self.callProcessResults = False
- self.callGetPerfReportFiles = False
- self.kv_dict = None
- self.tmpdir = ''
- self.callGetNewKeyvals = False
- self.callGetResultsFile = False
- self.callGetPerfDataFiles = False
- self.callGetTurbostatFile = False
- self.callGetCpustatsFile = False
- self.callGetTopFile = False
- self.callGetCpuinfoFile = False
- self.callGetWaitTimeFile = False
- self.args = None
- self.callGatherPerfResults = False
- self.mock_logger = mock.Mock(spec=logger.Logger)
- self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
- 'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', False, None)
+ """Result test class."""
- def testCreateFromRun(self):
- result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1', OUTPUT,
- error, 0, True)
- self.assertEqual(result.keyvals, keyvals)
- self.assertEqual(result.chroot_results_dir,
- '/tmp/test_that.PO1234567/platform_LibCBench')
- self.assertEqual(result.results_dir,
- '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
- self.assertEqual(result.retval, 0)
+ def __init__(self, *args, **kwargs):
+ super(ResultTest, self).__init__(*args, **kwargs)
+ self.callFakeProcessResults = False
+ self.fakeCacheReturnResult = None
+ self.callGetResultsDir = False
+ self.callProcessResults = False
+ self.callGetPerfReportFiles = False
+ self.kv_dict = None
+ self.tmpdir = ""
+ self.callGetNewKeyvals = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.callGetTurbostatFile = False
+ self.callGetCpustatsFile = False
+ self.callGetTopFile = False
+ self.callGetCpuinfoFile = False
+ self.callGetWaitTimeFile = False
+ self.args = None
+ self.callGatherPerfResults = False
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ self.mock_label = MockLabel(
+ "mock_label",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp",
+ "lumpy",
+ "remote",
+ "image_args",
+ "cache_dir",
+ "average",
+ "gcc",
+ False,
+ None,
+ )
- def setUp(self):
- self.result = Result(self.mock_logger, self.mock_label, 'average',
- self.mock_cmd_exec)
- self.result.chromeos_root = '/tmp/chromeos'
+ def testCreateFromRun(self):
+ result = MockResult.CreateFromRun(
+ logger.GetLogger(),
+ "average",
+ self.mock_label,
+ "remote1",
+ OUTPUT,
+ error,
+ 0,
+ True,
+ )
+ self.assertEqual(result.keyvals, keyvals)
+ self.assertEqual(
+ result.chroot_results_dir,
+ "/tmp/test_that.PO1234567/platform_LibCBench",
+ )
+ self.assertEqual(
+ result.results_dir,
+ "/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench",
+ )
+ self.assertEqual(result.retval, 0)
- @mock.patch.object(os.path, 'isdir')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter, 'CopyFiles')
- def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
+ def setUp(self):
+ self.result = Result(
+ self.mock_logger, self.mock_label, "average", self.mock_cmd_exec
+ )
+ self.result.chromeos_root = "/tmp/chromeos"
- files = ['src_file_1', 'src_file_2', 'src_file_3']
- dest_dir = '/tmp/test'
- self.mock_cmd_exec.RunCommand = mock_runcmd
- self.mock_cmd_exec.CopyFiles = mock_copyfiles
+ @mock.patch.object(os.path, "isdir")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(command_executer.CommandExecuter, "CopyFiles")
+ def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
- mock_copyfiles.return_value = 0
+ files = ["src_file_1", "src_file_2", "src_file_3"]
+ dest_dir = "/tmp/test"
+ self.mock_cmd_exec.RunCommand = mock_runcmd
+ self.mock_cmd_exec.CopyFiles = mock_copyfiles
- # test 1. dest_dir exists; CopyFiles returns 0.
- mock_isdir.return_value = True
- self.result.CopyFilesTo(dest_dir, files)
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(mock_copyfiles.call_count, 3)
- first_args = mock_copyfiles.call_args_list[0][0]
- second_args = mock_copyfiles.call_args_list[1][0]
- third_args = mock_copyfiles.call_args_list[2][0]
- self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0'))
- self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.1'))
- self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.2'))
+ mock_copyfiles.return_value = 0
- mock_runcmd.reset_mock()
- mock_copyfiles.reset_mock()
- # test 2. dest_dir does not exist; CopyFiles returns 0.
- mock_isdir.return_value = False
- self.result.CopyFilesTo(dest_dir, files)
- self.assertEqual(mock_runcmd.call_count, 3)
- self.assertEqual(mock_copyfiles.call_count, 3)
- self.assertEqual(mock_runcmd.call_args_list[0],
- mock_runcmd.call_args_list[1])
- self.assertEqual(mock_runcmd.call_args_list[0],
- mock_runcmd.call_args_list[2])
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('mkdir -p /tmp/test', ))
+ # test 1. dest_dir exists; CopyFiles returns 0.
+ mock_isdir.return_value = True
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ first_args = mock_copyfiles.call_args_list[0][0]
+ second_args = mock_copyfiles.call_args_list[1][0]
+ third_args = mock_copyfiles.call_args_list[2][0]
+ self.assertEqual(first_args, ("src_file_1", "/tmp/test/src_file_1.0"))
+ self.assertEqual(second_args, ("src_file_2", "/tmp/test/src_file_2.1"))
+ self.assertEqual(third_args, ("src_file_3", "/tmp/test/src_file_3.2"))
- # test 3. CopyFiles returns 1 (fails).
- mock_copyfiles.return_value = 1
- self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
+ mock_runcmd.reset_mock()
+ mock_copyfiles.reset_mock()
+ # test 2. dest_dir does not exist; CopyFiles returns 0.
+ mock_isdir.return_value = False
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 3)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[1]
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[2]
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0], ("mkdir -p /tmp/test",)
+ )
- @mock.patch.object(Result, 'CopyFilesTo')
- def test_copy_results_to(self, mockCopyFilesTo):
- results_file = [
- '/tmp/result.json.0', '/tmp/result.json.1', '/tmp/result.json.2'
- ]
- perf_data_files = [
- '/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
- ]
- perf_report_files = [
- '/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
- ]
+ # test 3. CopyFiles returns 1 (fails).
+ mock_copyfiles.return_value = 1
+ self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
- self.result.results_file = results_file
- self.result.perf_data_files = perf_data_files
- self.result.perf_report_files = perf_report_files
+ @mock.patch.object(Result, "CopyFilesTo")
+ def test_copy_results_to(self, mockCopyFilesTo):
+ results_file = [
+ "/tmp/result.json.0",
+ "/tmp/result.json.1",
+ "/tmp/result.json.2",
+ ]
+ perf_data_files = [
+ "/tmp/perf.data.0",
+ "/tmp/perf.data.1",
+ "/tmp/perf.data.2",
+ ]
+ perf_report_files = [
+ "/tmp/perf.report.0",
+ "/tmp/perf.report.1",
+ "/tmp/perf.report.2",
+ ]
- self.result.CopyFilesTo = mockCopyFilesTo
- self.result.CopyResultsTo('/tmp/results/')
- self.assertEqual(mockCopyFilesTo.call_count, 3)
- self.assertEqual(len(mockCopyFilesTo.call_args_list), 3)
- self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
- ('/tmp/results/', results_file))
- self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
- ('/tmp/results/', perf_data_files))
- self.assertEqual(mockCopyFilesTo.call_args_list[2][0],
- ('/tmp/results/', perf_report_files))
+ self.result.results_file = results_file
+ self.result.perf_data_files = perf_data_files
+ self.result.perf_report_files = perf_report_files
- def test_get_new_keyvals(self):
- kv_dict = {}
+ self.result.CopyFilesTo = mockCopyFilesTo
+ self.result.CopyResultsTo("/tmp/results/")
+ self.assertEqual(mockCopyFilesTo.call_count, 3)
+ self.assertEqual(len(mockCopyFilesTo.call_args_list), 3)
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[0][0],
+ ("/tmp/results/", results_file),
+ )
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[1][0],
+ ("/tmp/results/", perf_data_files),
+ )
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[2][0],
+ ("/tmp/results/", perf_report_files),
+ )
- def FakeGetDataMeasurementsFiles():
- filename = os.path.join(os.getcwd(), 'unittest_keyval_file.txt')
- return [filename]
+ def test_get_new_keyvals(self):
+ kv_dict = {}
- self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
- kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
- self.assertEqual(
- kv_dict2, {
- u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'telemetry_page_measurement_results__num_errored': 0,
- u'telemetry_page_measurement_results__num_failed': 0,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600
- })
- self.assertEqual(
- udict, {
- u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'telemetry_page_measurement_results__num_errored': u'count',
- u'telemetry_page_measurement_results__num_failed': u'count',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'
- })
+ def FakeGetDataMeasurementsFiles():
+ filename = os.path.join(os.getcwd(), "unittest_keyval_file.txt")
+ return [filename]
- def test_append_telemetry_units(self):
- kv_dict = {
- u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600
- }
- units_dict = {
- u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'
- }
+ self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
+ kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
+ self.assertEqual(
+ kv_dict2,
+ {
+ u"Box2D__Box2D": 4775,
+ u"Mandreel__Mandreel": 6620,
+ u"Gameboy__Gameboy": 9901,
+ u"Crypto__Crypto": 8737,
+ u"telemetry_page_measurement_results__num_errored": 0,
+ u"telemetry_page_measurement_results__num_failed": 0,
+ u"PdfJS__PdfJS": 6455,
+ u"Total__Score": 7918,
+ u"EarleyBoyer__EarleyBoyer": 14340,
+ u"MandreelLatency__MandreelLatency": 5188,
+ u"CodeLoad__CodeLoad": 6271,
+ u"DeltaBlue__DeltaBlue": 14401,
+ u"Typescript__Typescript": 9815,
+ u"SplayLatency__SplayLatency": 7653,
+ u"zlib__zlib": 16094,
+ u"Richards__Richards": 10358,
+ u"RegExp__RegExp": 1765,
+ u"NavierStokes__NavierStokes": 9815,
+ u"Splay__Splay": 4425,
+ u"RayTrace__RayTrace": 16600,
+ },
+ )
+ self.assertEqual(
+ udict,
+ {
+ u"Box2D__Box2D": u"score",
+ u"Mandreel__Mandreel": u"score",
+ u"Gameboy__Gameboy": u"score",
+ u"Crypto__Crypto": u"score",
+ u"telemetry_page_measurement_results__num_errored": u"count",
+ u"telemetry_page_measurement_results__num_failed": u"count",
+ u"PdfJS__PdfJS": u"score",
+ u"Total__Score": u"score",
+ u"EarleyBoyer__EarleyBoyer": u"score",
+ u"MandreelLatency__MandreelLatency": u"score",
+ u"CodeLoad__CodeLoad": u"score",
+ u"DeltaBlue__DeltaBlue": u"score",
+ u"Typescript__Typescript": u"score",
+ u"SplayLatency__SplayLatency": u"score",
+ u"zlib__zlib": u"score",
+ u"Richards__Richards": u"score",
+ u"RegExp__RegExp": u"score",
+ u"NavierStokes__NavierStokes": u"score",
+ u"Splay__Splay": u"score",
+ u"RayTrace__RayTrace": u"score",
+ },
+ )
- results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
- self.assertEqual(
- results_dict, {
- u'Box2D__Box2D': [4775, u'score'],
- u'Splay__Splay': [4425, u'score'],
- u'Gameboy__Gameboy': [9901, u'score'],
- u'Crypto__Crypto': [8737, u'score'],
- u'PdfJS__PdfJS': [6455, u'score'],
- u'Total__Score': [7918, u'score'],
- u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
- u'MandreelLatency__MandreelLatency': [5188, u'score'],
- u'DeltaBlue__DeltaBlue': [14401, u'score'],
- u'SplayLatency__SplayLatency': [7653, u'score'],
- u'Mandreel__Mandreel': [6620, u'score'],
- u'Richards__Richards': [10358, u'score'],
- u'zlib__zlib': [16094, u'score'],
- u'CodeLoad__CodeLoad': [6271, u'score'],
- u'Typescript__Typescript': [9815, u'score'],
- u'RegExp__RegExp': [1765, u'score'],
- u'RayTrace__RayTrace': [16600, u'score'],
- u'NavierStokes__NavierStokes': [9815, u'score']
- })
+ def test_append_telemetry_units(self):
+ kv_dict = {
+ u"Box2D__Box2D": 4775,
+ u"Mandreel__Mandreel": 6620,
+ u"Gameboy__Gameboy": 9901,
+ u"Crypto__Crypto": 8737,
+ u"PdfJS__PdfJS": 6455,
+ u"Total__Score": 7918,
+ u"EarleyBoyer__EarleyBoyer": 14340,
+ u"MandreelLatency__MandreelLatency": 5188,
+ u"CodeLoad__CodeLoad": 6271,
+ u"DeltaBlue__DeltaBlue": 14401,
+ u"Typescript__Typescript": 9815,
+ u"SplayLatency__SplayLatency": 7653,
+ u"zlib__zlib": 16094,
+ u"Richards__Richards": 10358,
+ u"RegExp__RegExp": 1765,
+ u"NavierStokes__NavierStokes": 9815,
+ u"Splay__Splay": 4425,
+ u"RayTrace__RayTrace": 16600,
+ }
+ units_dict = {
+ u"Box2D__Box2D": u"score",
+ u"Mandreel__Mandreel": u"score",
+ u"Gameboy__Gameboy": u"score",
+ u"Crypto__Crypto": u"score",
+ u"PdfJS__PdfJS": u"score",
+ u"Total__Score": u"score",
+ u"EarleyBoyer__EarleyBoyer": u"score",
+ u"MandreelLatency__MandreelLatency": u"score",
+ u"CodeLoad__CodeLoad": u"score",
+ u"DeltaBlue__DeltaBlue": u"score",
+ u"Typescript__Typescript": u"score",
+ u"SplayLatency__SplayLatency": u"score",
+ u"zlib__zlib": u"score",
+ u"Richards__Richards": u"score",
+ u"RegExp__RegExp": u"score",
+ u"NavierStokes__NavierStokes": u"score",
+ u"Splay__Splay": u"score",
+ u"RayTrace__RayTrace": u"score",
+ }
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(tempfile, 'mkdtemp')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
- mock_getpath):
+ results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
+ self.assertEqual(
+ results_dict,
+ {
+ u"Box2D__Box2D": [4775, u"score"],
+ u"Splay__Splay": [4425, u"score"],
+ u"Gameboy__Gameboy": [9901, u"score"],
+ u"Crypto__Crypto": [8737, u"score"],
+ u"PdfJS__PdfJS": [6455, u"score"],
+ u"Total__Score": [7918, u"score"],
+ u"EarleyBoyer__EarleyBoyer": [14340, u"score"],
+ u"MandreelLatency__MandreelLatency": [5188, u"score"],
+ u"DeltaBlue__DeltaBlue": [14401, u"score"],
+ u"SplayLatency__SplayLatency": [7653, u"score"],
+ u"Mandreel__Mandreel": [6620, u"score"],
+ u"Richards__Richards": [10358, u"score"],
+ u"zlib__zlib": [16094, u"score"],
+ u"CodeLoad__CodeLoad": [6271, u"score"],
+ u"Typescript__Typescript": [9815, u"score"],
+ u"RegExp__RegExp": [1765, u"score"],
+ u"RayTrace__RayTrace": [16600, u"score"],
+ u"NavierStokes__NavierStokes": [9815, u"score"],
+ },
+ )
- self.kv_dict = {}
- self.callGetNewKeyvals = False
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(tempfile, "mkdtemp")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_get_keyvals(
+ self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp, mock_getpath
+ ):
- def reset():
- self.kv_dict = {}
- self.callGetNewKeyvals = False
- mock_chrootruncmd.reset_mock()
- mock_runcmd.reset_mock()
- mock_mkdtemp.reset_mock()
- mock_getpath.reset_mock()
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
- def FakeGetNewKeyvals(kv_dict):
- self.kv_dict = kv_dict
- self.callGetNewKeyvals = True
- return_kvdict = {'first_time': 680, 'Total': 10}
- return_udict = {'first_time': 'ms', 'Total': 'score'}
- return return_kvdict, return_udict
+ def reset():
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
+ mock_chrootruncmd.reset_mock()
+ mock_runcmd.reset_mock()
+ mock_mkdtemp.reset_mock()
+ mock_getpath.reset_mock()
- mock_mkdtemp.return_value = TMP_DIR1
- mock_chrootruncmd.return_value = [
- '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
- ''
- ]
- mock_getpath.return_value = TMP_DIR1
- self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
- self.result.ce.RunCommand = mock_runcmd
- self.result.GetNewKeyvals = FakeGetNewKeyvals
- self.result.suite = 'telemetry_Crosperf'
- self.result.results_dir = '/tmp/test_that_resultsNmq'
+ def FakeGetNewKeyvals(kv_dict):
+ self.kv_dict = kv_dict
+ self.callGetNewKeyvals = True
+ return_kvdict = {"first_time": 680, "Total": 10}
+ return_udict = {"first_time": "ms", "Total": "score"}
+ return return_kvdict, return_udict
- # Test 1. no self.temp_dir.
- res = self.result.GetKeyvals()
- self.assertTrue(self.callGetNewKeyvals)
- self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1, ))
- self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(
- mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('./generate_test_report --no-color --csv %s') % TMP_DIR1))
- self.assertEqual(mock_getpath.call_count, 1)
- self.assertEqual(mock_mkdtemp.call_count, 1)
- self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
+ mock_mkdtemp.return_value = TMP_DIR1
+ mock_chrootruncmd.return_value = [
+ "",
+ ("%s,PASS\n%s/telemetry_Crosperf,PASS\n") % (TMP_DIR1, TMP_DIR1),
+ "",
+ ]
+ mock_getpath.return_value = TMP_DIR1
+ self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.GetNewKeyvals = FakeGetNewKeyvals
+ self.result.suite = "telemetry_Crosperf"
+ self.result.results_dir = "/tmp/test_that_resultsNmq"
- # Test 2. self.temp_dir
- reset()
- mock_chrootruncmd.return_value = [
- '', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
- 'telemetry_Crosperf,PASS\n'), ''
- ]
- mock_getpath.return_value = '/tmp/tmpJCajRG'
- self.result.temp_dir = '/tmp/tmpJCajRG'
- res = self.result.GetKeyvals()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(mock_mkdtemp.call_count, 0)
- self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertTrue(self.callGetNewKeyvals)
- self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
- self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
+ # Test 1. no self.temp_dir.
+ res = self.result.GetKeyvals()
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(
+ self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"}
+ )
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ ("cp -r /tmp/test_that_resultsNmq/* %s" % TMP_DIR1,),
+ )
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ ("./generate_test_report --no-color --csv %s") % TMP_DIR1,
+ ),
+ )
+ self.assertEqual(mock_getpath.call_count, 1)
+ self.assertEqual(mock_mkdtemp.call_count, 1)
+ self.assertEqual(
+ res, {"Total": [10, "score"], "first_time": [680, "ms"]}
+ )
- # Test 3. suite != telemetry_Crosperf. Normally this would be for
- # running non-Telemetry autotests, such as BootPerfServer. In this test
- # case, the keyvals we have set up were returned from a Telemetry test run;
- # so this pass is basically testing that we don't append the units to the
- # test results (which we do for Telemetry autotest runs).
- reset()
- self.result.suite = ''
- res = self.result.GetKeyvals()
- self.assertEqual(res, {'Total': 10, 'first_time': 680})
+ # Test 2. self.temp_dir
+ reset()
+ mock_chrootruncmd.return_value = [
+ "",
+ (
+ "/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/"
+ "telemetry_Crosperf,PASS\n"
+ ),
+ "",
+ ]
+ mock_getpath.return_value = "/tmp/tmpJCajRG"
+ self.result.temp_dir = "/tmp/tmpJCajRG"
+ res = self.result.GetKeyvals()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_mkdtemp.call_count, 0)
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(
+ self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"}
+ )
+ self.assertEqual(
+ res, {"Total": [10, "score"], "first_time": [680, "ms"]}
+ )
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- @mock.patch.object(os.path, 'exists')
- def test_get_samples(self, mock_exists, mock_get_total_samples,
- mock_getpath):
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'samus'
- mock_getpath.return_value = '/usr/chromeos/chroot/tmp/results/perf.data'
- mock_get_total_samples.return_value = [
- '', '45.42% 237210 chrome ', ''
- ]
- mock_exists.return_value = True
+ # Test 3. suite != telemetry_Crosperf. Normally this would be for
+ # running non-Telemetry autotests, such as BootPerfServer. In this test
+ # case, the keyvals we have set up were returned from a Telemetry test run;
+ # so this pass is basically testing that we don't append the units to the
+ # test results (which we do for Telemetry autotest runs).
+ reset()
+ self.result.suite = ""
+ res = self.result.GetKeyvals()
+ self.assertEqual(res, {"Total": 10, "first_time": 680})
- # mock_open does not seem to support iteration.
- # pylint: disable=line-too-long
- content = """1.63% 66 dav1d-tile chrome [.] decode_coefs
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ @mock.patch.object(os.path, "exists")
+ def test_get_samples(
+ self, mock_exists, mock_get_total_samples, mock_getpath
+ ):
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "samus"
+ mock_getpath.return_value = "/usr/chromeos/chroot/tmp/results/perf.data"
+ mock_get_total_samples.return_value = [
+ "",
+ "45.42% 237210 chrome ",
+ "",
+ ]
+ mock_exists.return_value = True
+
+ # mock_open does not seem to support iteration.
+ # pylint: disable=line-too-long
+ content = """1.63% 66 dav1d-tile chrome [.] decode_coefs
1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
1.16% 47 dav1d-tile chrome [.] decode_sb"""
- with mock.patch('builtins.open', return_value=io.StringIO(content)):
- samples = self.result.GetSamples()
- self.assertEqual(samples, [237210 - 60, u'samples'])
+ with mock.patch("builtins.open", return_value=io.StringIO(content)):
+ samples = self.result.GetSamples()
+ self.assertEqual(samples, [237210 - 60, u"samples"])
- def test_get_results_dir(self):
+ def test_get_results_dir(self):
- self.result.out = ''
- self.assertRaises(Exception, self.result.GetResultsDir)
+ self.result.out = ""
+ self.assertRaises(Exception, self.result.GetResultsDir)
- self.result.out = OUTPUT
- resdir = self.result.GetResultsDir()
- self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
+ self.result.out = OUTPUT
+ resdir = self.result.GetResultsDir()
+ self.assertEqual(resdir, "/tmp/test_that.PO1234567/platform_LibCBench")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandGeneric')
- def test_find_files_in_results_dir(self, mock_runcmd):
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandGeneric")
+ def test_find_files_in_results_dir(self, mock_runcmd):
- self.result.results_dir = None
- res = self.result.FindFilesInResultsDir('-name perf.data')
- self.assertEqual(res, '')
+ self.result.results_dir = None
+ res = self.result.FindFilesInResultsDir("-name perf.data")
+ self.assertEqual(res, "")
- self.result.ce.RunCommand = mock_runcmd
- self.result.results_dir = '/tmp/test_results'
- mock_runcmd.return_value = [0, '/tmp/test_results/perf.data', '']
- res = self.result.FindFilesInResultsDir('-name perf.data')
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('find /tmp/test_results -name perf.data', ))
- self.assertEqual(res, '/tmp/test_results/perf.data')
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = "/tmp/test_results"
+ mock_runcmd.return_value = [0, "/tmp/test_results/perf.data", ""]
+ res = self.result.FindFilesInResultsDir("-name perf.data")
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ ("find /tmp/test_results -name perf.data",),
+ )
+ self.assertEqual(res, "/tmp/test_results/perf.data")
- mock_runcmd.reset_mock()
- mock_runcmd.return_value = [1, '', '']
- self.assertRaises(Exception, self.result.FindFilesInResultsDir,
- '-name perf.data')
+ mock_runcmd.reset_mock()
+ mock_runcmd.return_value = [1, "", ""]
+ self.assertRaises(
+ Exception, self.result.FindFilesInResultsDir, "-name perf.data"
+ )
- @mock.patch.object(Result, 'FindFilesInResultsDir')
- def test_get_perf_data_files(self, mock_findfiles):
- self.args = None
+ @mock.patch.object(Result, "FindFilesInResultsDir")
+ def test_get_perf_data_files(self, mock_findfiles):
+ self.args = None
- mock_findfiles.return_value = 'line1\nline1\n'
- self.result.FindFilesInResultsDir = mock_findfiles
- res = self.result.GetPerfDataFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(mock_findfiles.call_args_list[0][0],
- ('-name perf.data', ))
+ mock_findfiles.return_value = "line1\nline1\n"
+ self.result.FindFilesInResultsDir = mock_findfiles
+ res = self.result.GetPerfDataFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(
+ mock_findfiles.call_args_list[0][0], ("-name perf.data",)
+ )
- def test_get_perf_report_files(self):
- self.args = None
+ def test_get_perf_report_files(self):
+ self.args = None
- def FakeFindFiles(find_args):
- self.args = find_args
- return 'line1\nline1\n'
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return "line1\nline1\n"
- self.result.FindFilesInResultsDir = FakeFindFiles
- res = self.result.GetPerfReportFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(self.args, '-name perf.data.report')
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetPerfReportFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(self.args, "-name perf.data.report")
- def test_get_data_measurement_files(self):
- self.args = None
+ def test_get_data_measurement_files(self):
+ self.args = None
- def FakeFindFiles(find_args):
- self.args = find_args
- return 'line1\nline1\n'
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return "line1\nline1\n"
- self.result.FindFilesInResultsDir = FakeFindFiles
- res = self.result.GetDataMeasurementsFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(self.args, '-name perf_measurements')
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetDataMeasurementsFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(self.args, "-name perf_measurements")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/turbostat.log', '')
- found_single_log = self.result.GetTurbostatFile()
- self.assertEqual(found_single_log, 'some/long/path/turbostat.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/turbostat.log", "")
+ found_single_log = self.result.GetTurbostatFile()
+ self.assertEqual(found_single_log, "some/long/path/turbostat.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
- """Error case when multiple files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0,
- 'some/long/path/turbostat.log\nturbostat.log',
- '')
- found_first_logs = self.result.GetTurbostatFile()
- self.assertEqual(found_first_logs, 'some/long/path/turbostat.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
+ """Error case when multiple files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (
+ 0,
+ "some/long/path/turbostat.log\nturbostat.log",
+ "",
+ )
+ found_first_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_first_logs, "some/long/path/turbostat.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetTurbostatFile()
- self.assertEqual(found_no_logs, '')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_no_logs, "")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
- """Error case when file search returns an error."""
- self.result.results_dir = '/tmp/test_results'
- mock_runcmd.return_value = (-1, '', 'error')
- with self.assertRaises(RuntimeError):
- self.result.GetTurbostatFile()
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
+ """Error case when file search returns an error."""
+ self.result.results_dir = "/tmp/test_results"
+ mock_runcmd.return_value = (-1, "", "error")
+ with self.assertRaises(RuntimeError):
+ self.result.GetTurbostatFile()
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single top log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/top.log', '')
- found_single_log = self.result.GetTopFile()
- self.assertEqual(found_single_log, 'some/long/path/top.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single top log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/top.log", "")
+ found_single_log = self.result.GetTopFile()
+ self.assertEqual(found_single_log, "some/long/path/top.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
- """The case when multiple top files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/top.log\ntop.log', '')
- found_first_logs = self.result.GetTopFile()
- self.assertEqual(found_first_logs, 'some/long/path/top.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
+ """The case when multiple top files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/top.log\ntop.log", "")
+ found_first_logs = self.result.GetTopFile()
+ self.assertEqual(found_first_logs, "some/long/path/top.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetTopFile()
- self.assertEqual(found_no_logs, '')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetTopFile()
+ self.assertEqual(found_no_logs, "")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single cpuinfo file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpuinfo.log', '')
- found_single_log = self.result.GetCpuinfoFile()
- self.assertEqual(found_single_log, 'some/long/path/cpuinfo.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single cpuinfo file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/cpuinfo.log", "")
+ found_single_log = self.result.GetCpuinfoFile()
+ self.assertEqual(found_single_log, "some/long/path/cpuinfo.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpustats.log', '')
- found_single_log = self.result.GetCpustatsFile()
- self.assertEqual(found_single_log, 'some/long/path/cpustats.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/cpustats.log", "")
+ found_single_log = self.result.GetCpustatsFile()
+ self.assertEqual(found_single_log, "some/long/path/cpustats.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd):
- """The case when multiple files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpustats.log\ncpustats.log',
- '')
- found_first_logs = self.result.GetCpustatsFile()
- self.assertEqual(found_first_logs, 'some/long/path/cpustats.log')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd):
+ """The case when multiple files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (
+ 0,
+ "some/long/path/cpustats.log\ncpustats.log",
+ "",
+ )
+ found_first_logs = self.result.GetCpustatsFile()
+ self.assertEqual(found_first_logs, "some/long/path/cpustats.log")
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetCpustatsFile()
- self.assertEqual(found_no_logs, '')
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetCpustatsFile()
+ self.assertEqual(found_no_logs, "")
- def test_verify_perf_data_pid_ok(self):
- """Verify perf PID which is present in TOP_DATA."""
- self.result.top_cmds = TOP_DATA
- # pid is present in TOP_DATA.
- with mock.patch.object(Result,
- 'ReadPidFromPerfData',
- return_value=['5713']):
- self.result.VerifyPerfDataPID()
+ def test_verify_perf_data_pid_ok(self):
+ """Verify perf PID which is present in TOP_DATA."""
+ self.result.top_cmds = TOP_DATA
+ # pid is present in TOP_DATA.
+ with mock.patch.object(
+ Result, "ReadPidFromPerfData", return_value=["5713"]
+ ):
+ self.result.VerifyPerfDataPID()
- def test_verify_perf_data_pid_fail(self):
- """Test perf PID missing in top raises the error."""
- self.result.top_cmds = TOP_DATA
- # pid is not in the list of top processes.
- with mock.patch.object(Result,
- 'ReadPidFromPerfData',
- return_value=['9999']):
- with self.assertRaises(PidVerificationError):
- self.result.VerifyPerfDataPID()
+ def test_verify_perf_data_pid_fail(self):
+ """Test perf PID missing in top raises the error."""
+ self.result.top_cmds = TOP_DATA
+ # pid is not in the list of top processes.
+ with mock.patch.object(
+ Result, "ReadPidFromPerfData", return_value=["9999"]
+ ):
+ with self.assertRaises(PidVerificationError):
+ self.result.VerifyPerfDataPID()
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_ok(self, mock_runcmd):
- """Test perf header parser, normal flow."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- exp_pid = '12345'
- mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), '')
- pids = self.result.ReadPidFromPerfData()
- self.assertEqual(pids, [exp_pid])
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_ok(self, mock_runcmd):
+ """Test perf header parser, normal flow."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ exp_pid = "12345"
+ mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), "")
+ pids = self.result.ReadPidFromPerfData()
+ self.assertEqual(pids, [exp_pid])
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd):
- """Test multiple perf.data files with PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- # self.result.chromeos_root = '/tmp/chromeos'
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data.0',
- '/tmp/chromeos/chroot/tmp/results/perf.data.1',
- ]
- # There is '-p <pid>' in command line but it's still system-wide: '-a'.
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions -p {pid}'
- exp_perf_pids = ['1111', '2222']
- mock_runcmd.side_effect = [
- (0, cmd_line.format(pid=exp_perf_pids[0]), ''),
- (0, cmd_line.format(pid=exp_perf_pids[1]), ''),
- ]
- pids = self.result.ReadPidFromPerfData()
- self.assertEqual(pids, exp_perf_pids)
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd):
+ """Test multiple perf.data files with PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ # self.result.chromeos_root = '/tmp/chromeos'
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data.0",
+ "/tmp/chromeos/chroot/tmp/results/perf.data.1",
+ ]
+ # There is '-p <pid>' in command line but it's still system-wide: '-a'.
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions -p {pid}"
+ exp_perf_pids = ["1111", "2222"]
+ mock_runcmd.side_effect = [
+ (0, cmd_line.format(pid=exp_perf_pids[0]), ""),
+ (0, cmd_line.format(pid=exp_perf_pids[1]), ""),
+ ]
+ pids = self.result.ReadPidFromPerfData()
+ self.assertEqual(pids, exp_perf_pids)
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
- """Test perf.data without PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions'
- mock_runcmd.return_value = (0, cmd_line, '')
- pids = self.result.ReadPidFromPerfData()
- # pids is empty.
- self.assertEqual(pids, [])
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
+ """Test perf.data without PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions"
+ mock_runcmd.return_value = (0, cmd_line, "")
+ pids = self.result.ReadPidFromPerfData()
+ # pids is empty.
+ self.assertEqual(pids, [])
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
- """Test reading from system-wide profile with PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # There is '-p <pid>' in command line but it's still system-wide: '-a'.
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions -a -p 1234'
- mock_runcmd.return_value = (0, cmd_line, '')
- pids = self.result.ReadPidFromPerfData()
- # pids should be empty since it's not a per-process profiling.
- self.assertEqual(pids, [])
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
+ """Test reading from system-wide profile with PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # There is '-p <pid>' in command line but it's still system-wide: '-a'.
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions -a -p 1234"
+ mock_runcmd.return_value = (0, cmd_line, "")
+ pids = self.result.ReadPidFromPerfData()
+ # pids should be empty since it's not a per-process profiling.
+ self.assertEqual(pids, [])
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
- """Failure to read perf.data raises the error."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # Error status of the profile read.
- mock_runcmd.return_value = (1, '', '')
- with self.assertRaises(PerfDataReadError):
- self.result.ReadPidFromPerfData()
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
+ """Failure to read perf.data raises the error."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # Error status of the profile read.
+ mock_runcmd.return_value = (1, "", "")
+ with self.assertRaises(PerfDataReadError):
+ self.result.ReadPidFromPerfData()
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_fail(self, mock_runcmd):
- """Failure to find cmdline in perf.data header raises the error."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # Empty output.
- mock_runcmd.return_value = (0, '', '')
- with self.assertRaises(PerfDataReadError):
- self.result.ReadPidFromPerfData()
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_fail(self, mock_runcmd):
+ """Failure to find cmdline in perf.data header raises the error."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # Empty output.
+ mock_runcmd.return_value = (0, "", "")
+ with self.assertRaises(PerfDataReadError):
+ self.result.ReadPidFromPerfData()
- def test_process_turbostat_results_with_valid_data(self):
- """Normal case when log exists and contains valid data."""
- self.result.turbostat_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)) as mo:
- cpustats = self.result.ProcessTurbostatResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, TURBOSTAT_DATA)
+ def test_process_turbostat_results_with_valid_data(self):
+ """Normal case when log exists and contains valid data."""
+ self.result.turbostat_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, TURBOSTAT_DATA)
- def test_process_turbostat_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.turbostat_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- cpustats = self.result.ProcessTurbostatResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, {})
+ def test_process_turbostat_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.turbostat_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, {})
- def test_process_turbostat_results_when_file_doesnt_exist(self):
- """Error case when file does not exist."""
- nonexistinglog = '/tmp/1'
- while os.path.exists(nonexistinglog):
- # Extend file path if it happens to exist.
- nonexistinglog = os.path.join(nonexistinglog, '1')
- self.result.turbostat_log_file = nonexistinglog
- # Allow the tested function to call a 'real' open and hopefully crash.
- with self.assertRaises(IOError):
- self.result.ProcessTurbostatResults()
+ def test_process_turbostat_results_when_file_doesnt_exist(self):
+ """Error case when file does not exist."""
+ nonexistinglog = "/tmp/1"
+ while os.path.exists(nonexistinglog):
+ # Extend file path if it happens to exist.
+ nonexistinglog = os.path.join(nonexistinglog, "1")
+ self.result.turbostat_log_file = nonexistinglog
+ # Allow the tested function to call a 'real' open and hopefully crash.
+ with self.assertRaises(IOError):
+ self.result.ProcessTurbostatResults()
- def test_process_cpustats_results_with_uniq_data(self):
- """Process cpustats log which has freq unique to each core.
+ def test_process_cpustats_results_with_uniq_data(self):
+ """Process cpustats log which has freq unique to each core.
- Testing normal case when frequency data vary between
- different cores.
- Expecting that data for all cores will be present in
- returned cpustats.
- """
- self.result.cpustats_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA)
+ Testing normal case when frequency data vary between
+ different cores.
+ Expecting that data for all cores will be present in
+ returned cpustats.
+ """
+ self.result.cpustats_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA)
- def test_process_cpustats_results_with_dupl_data(self):
- """Process cpustats log where cores have duplicate freq.
+ def test_process_cpustats_results_with_dupl_data(self):
+ """Process cpustats log where cores have duplicate freq.
- Testing normal case when frequency data on some cores
- are duplicated.
- Expecting that duplicated data is discarded in
- returned cpustats.
- """
- self.result.cpustats_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT)) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, CPUSTATS_DUPL_DATA)
+ Testing normal case when frequency data on some cores
+ are duplicated.
+ Expecting that duplicated data is discarded in
+ returned cpustats.
+ """
+ self.result.cpustats_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, CPUSTATS_DUPL_DATA)
- def test_process_cpustats_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.cpustats_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, {})
+ def test_process_cpustats_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.cpustats_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, {})
- def test_process_top_results_with_valid_data(self):
- """Process top log with valid data."""
+ def test_process_top_results_with_valid_data(self):
+ """Process top log with valid data."""
- self.result.top_log_file = '/tmp/fakelogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data=TOP_LOG)) as mo:
- topproc = self.result.ProcessTopResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/fakelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(topproc, TOP_DATA)
+ self.result.top_log_file = "/tmp/fakelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=TOP_LOG)
+ ) as mo:
+ topproc = self.result.ProcessTopResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/fakelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topproc, TOP_DATA)
- def test_process_top_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.top_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- topcalls = self.result.ProcessTopResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(topcalls, [])
+ def test_process_top_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.top_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ topcalls = self.result.ProcessTopResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topcalls, [])
- def test_format_string_top_cmds(self):
- """Test formatted string with top commands."""
- self.result.top_cmds = [
- {
- 'cmd': 'chrome-111',
- 'cpu_use_avg': 119.753453465,
- 'count': 44444,
- 'top5_cpu_use': [222.8, 217.9, 217.8, 191.0, 189.9],
- },
- {
- 'cmd': 'chrome-222',
- 'cpu_use_avg': 100,
- 'count': 33333,
- 'top5_cpu_use': [200.0, 195.0, 190.0, 185.0, 180.0],
- },
- {
- 'cmd': 'irq/230-cros-ec',
- 'cpu_use_avg': 10.000000000000001,
- 'count': 1000,
- 'top5_cpu_use': [11.5, 11.4, 11.3, 11.2, 11.1],
- },
- {
- 'cmd': 'powerd',
- 'cpu_use_avg': 2.0,
- 'count': 2,
- 'top5_cpu_use': [3.0, 1.0]
- },
- {
- 'cmd': 'cmd3',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd4',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd5',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd6_not_for_print',
- 'cpu_avg': 1.0,
- 'count': 1,
- 'top5': [1.0],
- },
- ]
- form_str = self.result.FormatStringTopCommands()
- self.assertEqual(
- form_str, '\n'.join([
- 'Top commands with highest CPU usage:',
- ' COMMAND AVG CPU% COUNT HIGHEST 5',
- '-' * 50,
- ' chrome-111 119.75 44444 '
- '[222.8, 217.9, 217.8, 191.0, 189.9]',
- ' chrome-222 100.00 33333 '
- '[200.0, 195.0, 190.0, 185.0, 180.0]',
- ' irq/230-cros-ec 10.00 1000 '
- '[11.5, 11.4, 11.3, 11.2, 11.1]',
- ' powerd 2.00 2 [3.0, 1.0]',
- ' cmd3 1.00 1 [1.0]',
- ' cmd4 1.00 1 [1.0]',
- ' cmd5 1.00 1 [1.0]',
- '-' * 50,
- ]))
+ def test_format_string_top_cmds(self):
+ """Test formatted string with top commands."""
+ self.result.top_cmds = [
+ {
+ "cmd": "chrome-111",
+ "cpu_use_avg": 119.753453465,
+ "count": 44444,
+ "top5_cpu_use": [222.8, 217.9, 217.8, 191.0, 189.9],
+ },
+ {
+ "cmd": "chrome-222",
+ "cpu_use_avg": 100,
+ "count": 33333,
+ "top5_cpu_use": [200.0, 195.0, 190.0, 185.0, 180.0],
+ },
+ {
+ "cmd": "irq/230-cros-ec",
+ "cpu_use_avg": 10.000000000000001,
+ "count": 1000,
+ "top5_cpu_use": [11.5, 11.4, 11.3, 11.2, 11.1],
+ },
+ {
+ "cmd": "powerd",
+ "cpu_use_avg": 2.0,
+ "count": 2,
+ "top5_cpu_use": [3.0, 1.0],
+ },
+ {
+ "cmd": "cmd3",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd4",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd5",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd6_not_for_print",
+ "cpu_avg": 1.0,
+ "count": 1,
+ "top5": [1.0],
+ },
+ ]
+ form_str = self.result.FormatStringTopCommands()
+ self.assertEqual(
+ form_str,
+ "\n".join(
+ [
+ "Top commands with highest CPU usage:",
+ " COMMAND AVG CPU% COUNT HIGHEST 5",
+ "-" * 50,
+ " chrome-111 119.75 44444 "
+ "[222.8, 217.9, 217.8, 191.0, 189.9]",
+ " chrome-222 100.00 33333 "
+ "[200.0, 195.0, 190.0, 185.0, 180.0]",
+ " irq/230-cros-ec 10.00 1000 "
+ "[11.5, 11.4, 11.3, 11.2, 11.1]",
+ " powerd 2.00 2 [3.0, 1.0]",
+ " cmd3 1.00 1 [1.0]",
+ " cmd4 1.00 1 [1.0]",
+ " cmd5 1.00 1 [1.0]",
+ "-" * 50,
+ ]
+ ),
+ )
- def test_format_string_top_calls_no_data(self):
- """Test formatted string of top with no data."""
- self.result.top_cmds = []
- form_str = self.result.FormatStringTopCommands()
- self.assertEqual(
- form_str, '\n'.join([
- 'Top commands with highest CPU usage:',
- ' COMMAND AVG CPU% COUNT HIGHEST 5',
- '-' * 50,
- '[NO DATA FROM THE TOP LOG]',
- '-' * 50,
- ]))
+ def test_format_string_top_calls_no_data(self):
+ """Test formatted string of top with no data."""
+ self.result.top_cmds = []
+ form_str = self.result.FormatStringTopCommands()
+ self.assertEqual(
+ form_str,
+ "\n".join(
+ [
+ "Top commands with highest CPU usage:",
+ " COMMAND AVG CPU% COUNT HIGHEST 5",
+ "-" * 50,
+ "[NO DATA FROM THE TOP LOG]",
+ "-" * 50,
+ ]
+ ),
+ )
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
- def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
- fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'lumpy'
- mock_getpath.return_value = fake_file
- self.result.ce.ChrootRunCommand = mock_chrootruncmd
- mock_chrootruncmd.return_value = 0
- # Debug path not found
- self.result.label.debug_path = ''
- tmp = self.result.GeneratePerfReportFiles()
- self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file])
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('/usr/sbin/perf report -n '
- '-i %s --stdio > %s') % (fake_file, fake_file)))
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand")
+ def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
+ fake_file = "/usr/chromeos/chroot/tmp/results/fake_file"
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "lumpy"
+ mock_getpath.return_value = fake_file
+ self.result.ce.ChrootRunCommand = mock_chrootruncmd
+ mock_chrootruncmd.return_value = 0
+ # Debug path not found
+ self.result.label.debug_path = ""
+ tmp = self.result.GeneratePerfReportFiles()
+ self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file])
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ ("/usr/sbin/perf report -n " "-i %s --stdio > %s")
+ % (fake_file, fake_file),
+ ),
+ )
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
- def test_generate_perf_report_files_debug(self, mock_chrootruncmd,
- mock_getpath):
- fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'lumpy'
- mock_getpath.return_value = fake_file
- self.result.ce.ChrootRunCommand = mock_chrootruncmd
- mock_chrootruncmd.return_value = 0
- # Debug path found
- self.result.label.debug_path = '/tmp/debug'
- tmp = self.result.GeneratePerfReportFiles()
- self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file])
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('/usr/sbin/perf report -n --symfs /tmp/debug '
- '--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux '
- '-i %s --stdio > %s') % (fake_file, fake_file)))
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand")
+ def test_generate_perf_report_files_debug(
+ self, mock_chrootruncmd, mock_getpath
+ ):
+ fake_file = "/usr/chromeos/chroot/tmp/results/fake_file"
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "lumpy"
+ mock_getpath.return_value = fake_file
+ self.result.ce.ChrootRunCommand = mock_chrootruncmd
+ mock_chrootruncmd.return_value = 0
+ # Debug path found
+ self.result.label.debug_path = "/tmp/debug"
+ tmp = self.result.GeneratePerfReportFiles()
+ self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file])
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ (
+ "/usr/sbin/perf report -n --symfs /tmp/debug "
+ "--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux "
+ "-i %s --stdio > %s"
+ )
+ % (fake_file, fake_file),
+ ),
+ )
- @mock.patch.object(misc, 'GetOutsideChrootPath')
- def test_populate_from_run(self, mock_getpath):
- def FakeGetResultsDir():
- self.callGetResultsDir = True
- return '/tmp/results_dir'
+ @mock.patch.object(misc, "GetOutsideChrootPath")
+ def test_populate_from_run(self, mock_getpath):
+ def FakeGetResultsDir():
+ self.callGetResultsDir = True
+ return "/tmp/results_dir"
- def FakeGetResultsFile():
- self.callGetResultsFile = True
- return []
+ def FakeGetResultsFile():
+ self.callGetResultsFile = True
+ return []
- def FakeGetPerfDataFiles():
- self.callGetPerfDataFiles = True
- return []
+ def FakeGetPerfDataFiles():
+ self.callGetPerfDataFiles = True
+ return []
- def FakeGetPerfReportFiles():
- self.callGetPerfReportFiles = True
- return []
+ def FakeGetPerfReportFiles():
+ self.callGetPerfReportFiles = True
+ return []
- def FakeGetTurbostatFile():
- self.callGetTurbostatFile = True
- return []
+ def FakeGetTurbostatFile():
+ self.callGetTurbostatFile = True
+ return []
- def FakeGetCpustatsFile():
- self.callGetCpustatsFile = True
- return []
+ def FakeGetCpustatsFile():
+ self.callGetCpustatsFile = True
+ return []
- def FakeGetTopFile():
- self.callGetTopFile = True
- return []
+ def FakeGetTopFile():
+ self.callGetTopFile = True
+ return []
- def FakeGetCpuinfoFile():
- self.callGetCpuinfoFile = True
- return []
+ def FakeGetCpuinfoFile():
+ self.callGetCpuinfoFile = True
+ return []
- def FakeGetWaitTimeFile():
- self.callGetWaitTimeFile = True
- return []
+ def FakeGetWaitTimeFile():
+ self.callGetWaitTimeFile = True
+ return []
- def FakeProcessResults(show_results=False):
- if show_results:
- pass
- self.callProcessResults = True
+ def FakeProcessResults(show_results=False):
+ if show_results:
+ pass
+ self.callProcessResults = True
- if mock_getpath:
- pass
- mock.get_path = '/tmp/chromeos/tmp/results_dir'
+ if mock_getpath:
+ pass
+ mock.get_path = "/tmp/chromeos/tmp/results_dir"
- self.callGetResultsDir = False
- self.callGetResultsFile = False
- self.callGetPerfDataFiles = False
- self.callGetPerfReportFiles = False
- self.callGetTurbostatFile = False
- self.callGetCpustatsFile = False
- self.callGetTopFile = False
- self.callGetCpuinfoFile = False
- self.callGetWaitTimeFile = False
- self.callProcessResults = False
+ self.callGetResultsDir = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.callGetPerfReportFiles = False
+ self.callGetTurbostatFile = False
+ self.callGetCpustatsFile = False
+ self.callGetTopFile = False
+ self.callGetCpuinfoFile = False
+ self.callGetWaitTimeFile = False
+ self.callProcessResults = False
- self.result.GetResultsDir = FakeGetResultsDir
- self.result.GetResultsFile = FakeGetResultsFile
- self.result.GetPerfDataFiles = FakeGetPerfDataFiles
- self.result.GeneratePerfReportFiles = FakeGetPerfReportFi