Snap for 10453563 from c79ff23576a6ce2754df2710ad82725bf4576585 to mainline-adservices-release
Change-Id: I83646d83b1c2deb34c098757bba86f851c9adf89
diff --git a/.gitignore b/.gitignore
index 37446ff..04a50ae 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,6 +7,10 @@
**/.coverage
**/htmlcov
+atest/proto/*.py
+atest/tf_proto/*.py
+atest/atest_flag_list_for_completion.txt
+
# Intellij generated files
**/.idea
**/*.iml
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..d5d02a2
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,59 @@
+// Copyright 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+python_library_host {
+ name: "asuite_proto",
+ srcs: [
+ "atest/proto/*.proto",
+ ],
+ proto: {
+ canonical_path_from_root: false,
+ },
+}
+
+
+java_library_host {
+ name: "asuite_proto_java",
+ srcs: [
+ "atest/proto/*.proto",
+ ],
+ proto: {
+ type: "full",
+ canonical_path_from_root: false,
+ include_dirs: ["external/protobuf/src"],
+ },
+ // b/267831518: Pin tradefed and dependencies to Java 11.
+ java_version: "11",
+}
+
+python_library_host {
+ name: "tradefed-protos-py",
+ srcs: [
+ "atest/tf_proto/*.proto",
+ ],
+ visibility: [
+ "//tools/asuite/atest",
+ ],
+ libs: [
+ "libprotobuf-python",
+ ],
+ proto: {
+ include_dirs: ["external/protobuf/src"],
+ canonical_path_from_root: false,
+ },
+}
diff --git a/OWNERS b/OWNERS
index 925d49c..0636b42 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,6 +1,9 @@
albaltai@google.com
dshi@google.com
+hzalek@google.com
+kellyhung@google.com
kevcheng@google.com
morrislin@google.com
patricktu@google.com
+weisu@google.com
yangbill@google.com
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 6b52c02..b4302a0 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,6 +1,6 @@
[Builtin Hooks]
-google_java_format = true
pylint3 = true
+google_java_format = true
[Tool Paths]
google-java-format = ${REPO_ROOT}/prebuilts/tools/common/google-java-format/google-java-format
diff --git a/aidegen/Android.bp b/aidegen/Android.bp
index 84e46df..3b77731 100644
--- a/aidegen/Android.bp
+++ b/aidegen/Android.bp
@@ -16,27 +16,12 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-python_defaults {
- name: "aidegen_default",
- pkg_path: "aidegen",
- version: {
- py2: {
- enabled: false,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
-}
-
python_binary_host {
name: "aidegen",
// Make aidegen's built name to aidegen-dev
suffix: "-dev",
- defaults: ["aidegen_default"],
main: "aidegen_main.py",
+ pkg_path: "aidegen",
srcs: [
"**/*.py",
],
@@ -52,7 +37,7 @@
python_library_host {
name: "aidegen_lib",
- defaults: ["aidegen_default"],
+ pkg_path: "aidegen",
srcs: [
"**/*.py",
],
@@ -64,7 +49,7 @@
python_library_host {
name: "aidegen_lib_common_util",
- defaults: ["aidegen_default"],
+ pkg_path: "aidegen",
srcs: [
"lib/common_util.py",
"lib/config.py",
@@ -94,7 +79,6 @@
],
test_config: "aidegen_unittests.xml",
test_suites: ["null-suite"],
- defaults: ["aidegen_default"],
test_options:{
unit_test: false,
},
diff --git a/aidegen/README.md b/aidegen/README.md
index defa6d4..ffa7f3b 100755
--- a/aidegen/README.md
+++ b/aidegen/README.md
@@ -68,19 +68,19 @@
Developers can also use the following optional arguments with AIDEGen commands.
-| Option | Long option | Description |
-| :----: | :---------------- | ----------------------------------------------- |
-| `-d` | `--depth` | The depth of module referenced by source. |
-| `-i` | `--ide` | Launch IDE type, j=IntelliJ s=Android Studio e=Eclipse c=CLion v=VS Code|
-| `-p` | `--ide-path` | Specify user's IDE installed path. |
-| `-n` | `--no_launch` | Do not launch IDE. |
-| `-r` | `--config-reset` | Reset all AIDEGen's saved configurations. |
-| `-s` | `--skip-build` | Skip building jars or modules. |
-| `-v` | `--verbose` | Displays DEBUG level logging. |
-| `-a` | `--android-tree` | Generate whole Android source tree project file for IDE.|
-| `-e` | `--exclude-paths` | Exclude the directories in IDE. |
-| `-l` | `--language` | Launch IDE with a specific language,j=java c=C/C++ r=Rust|
-| `-h` | `--help` | Shows help message and exits. |
+| Option | Long option | Description |
+|:------:|:------------------|--------------------------------------------------------------------------|
+| `-d` | `--depth` | The depth of module referenced by source. |
+| `-i` | `--ide` | Launch IDE type, j=IntelliJ s=Android Studio e=Eclipse c=CLion v=VS Code |
+| `-p` | `--ide-path` | Specify user's IDE installed path. |
+| `-n` | `--no_launch` | Do not launch IDE. |
+| `-r` | `--config-reset` | Reset all AIDEGen's saved configurations. |
+| `-s` | `--skip-build` | Skip building jars or modules. |
+| `-v` | `--verbose` | Displays DEBUG level logging. |
+| `-a` | `--android-tree` | Generate whole Android source tree project file for IDE. |
+| `-e` | `--exclude-paths` | Exclude the directories in IDE. |
+| `-l` | `--language` | Launch IDE with a specific language,j=java c=C/C++ r=Rust |
+| `-h` | `--help` | Shows help message and exits. |
## 4. Troubleshooting tips:
@@ -90,7 +90,7 @@
## 5. FAQ:
Q1. If I already have an IDE project file, and I run command AIDEGen to generate
-the same project file again, what'll happen?
+the same project file again, what will happen?
A1: The former IDEA project file will be overwritten by the newly generated one
from the aidegen command.
diff --git a/aidegen/__init__.py b/aidegen/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen/__init__.py
+++ /dev/null
diff --git a/aidegen/aidegen_main.py b/aidegen/aidegen_main.py
index 8dd6d73..5713a1d 100644
--- a/aidegen/aidegen_main.py
+++ b/aidegen/aidegen_main.py
@@ -24,7 +24,7 @@
- .idea/vcs.xml
- .idea/.name
- .idea/copyright/Apache_2.xml
- - .idea/copyright/progiles_settings.xml
+ - .idea/copyright/profiles_settings.xml
- Sample usage:
- Change directory to AOSP root first.
@@ -75,8 +75,9 @@
'Choose the root directory -> click \'Finish\'.')
_IDE_CACHE_REMINDER_MSG = (
'To prevent the existed IDE cache from impacting your IDE dependency '
- 'analysis, please consider to clear IDE caches if necessary. To do that, in'
- ' IntelliJ IDEA, go to [File > Invalidate Caches / Restart...].')
+ 'analysis, please consider to clear IDE caches if necessary. To do that, '
+ 'in IntelliJ IDEA, go to [File > Invalidate Caches -> '
+ 'Invalidate and Restart].')
_MAX_TIME = 1
_SKIP_BUILD_INFO_FUTURE = ''.join([
@@ -97,6 +98,7 @@
_NO_LANGUAGE_PROJECT_EXIST = 'There is no {} target.'
_NO_IDE_LAUNCH_PATH = 'Can not find the IDE path : {}'
+
def _parse_args(args):
"""Parse command line arguments.
@@ -229,7 +231,7 @@
def _launch_native_projects(ide_util_obj, args, cmakelists):
"""Launches C/C++ projects with IDE.
- AIDEGen provides the IDE argument for CLion, but there's still a implicit
+ AIDEGen provides the IDE argument for CLion, but there's still an implicit
way to launch it. The rules to launch it are:
1. If no target IDE, we don't have to launch any IDE for C/C++ project.
2. If the target IDE is IntelliJ or Eclipse, we should launch C/C++
@@ -292,7 +294,7 @@
launch C/C++ projects of frameworks/base in CLion.
c) aidegen external/rust/crates/protobuf -i v
launch Rust project of external/rust/crates/protobuf in VS Code.
- 3. If the launguage is specific, launch relative language projects in the
+ 3. If the language is specific, launch relative language projects in the
relative IDE.
a) aidegen frameworks/base -l j
launch Java projects of frameworks/base in IntelliJ.
@@ -308,7 +310,7 @@
b) aidegen frameworks/base -i s -l c
launch C/C++ projects of frameworks/base in Android Studio.
c) aidegen frameworks/base -i c -l j
- launch C/C++ projects of frameworks/base in CLion.
+ launch Java projects of frameworks/base in CLion.
Args:
args: A list of system arguments.
@@ -455,6 +457,29 @@
return abs_paths
+def _get_targets_from_args(targets, android_tree):
+ """Gets targets for specific argument.
+
+ For example:
+ $aidegen : targets = ['.']
+ $aidegen -a : targets = []
+ $aidegen . : targets = ['.']
+ $aidegen . -a: targets = []
+
+ Args:
+ targets: A list of strings of targets.
+ android_tree: A boolean, True with '-a' argument else False.
+
+ Returns:
+ A list of the Rust absolute project paths.
+ """
+ if targets == [''] and not android_tree:
+ return ['.']
+ if android_tree:
+ return []
+ return targets
+
+
@common_util.time_logged(message=_TIME_EXCEED_MSG, maximum=_MAX_TIME)
def main_with_message(args):
"""Main entry with skip build message.
@@ -490,10 +515,7 @@
ask_version = False
try:
args = _parse_args(argv)
- # If the targets is the default value, sets it to the absolute path to
- # avoid the issues caused by the empty path.
- if args.targets == ['']:
- args.targets = [os.path.abspath(os.getcwd())]
+ args.targets = _get_targets_from_args(args.targets, args.android_tree)
if args.version:
ask_version = True
version_file = os.path.join(os.path.dirname(__file__),
@@ -530,7 +552,7 @@
traceback_str = ''.join(traceback_list)
aidegen_metrics.ends_asuite_metrics(exit_code, traceback_str,
error_message)
- # print out the trackback message for developers to debug
+ # print out the traceback message for developers to debug
print(traceback_str)
raise err
finally:
diff --git a/aidegen/aidegen_main_unittest.py b/aidegen/aidegen_main_unittest.py
index 7fca970..5e392ad 100644
--- a/aidegen/aidegen_main_unittest.py
+++ b/aidegen/aidegen_main_unittest.py
@@ -43,9 +43,7 @@
# pylint: disable=protected-access
-# pylint: disable=invalid-name
# pylint: disable=too-many-arguments
-# pylint: disable=too-many-function-args
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
class AidegenMainUnittests(unittest.TestCase):
@@ -170,6 +168,19 @@
aidegen_main.main(args)
self.assertFalse(mock_launch_ide.called)
+ def test_get_targets_from_args(self):
+ """Test get targets from different arguments."""
+ self.assertEqual(
+ aidegen_main._get_targets_from_args([''], True), [])
+ self.assertEqual(
+ aidegen_main._get_targets_from_args(['.'], False), ['.'])
+ self.assertEqual(
+ aidegen_main._get_targets_from_args([''], False), ['.'])
+ self.assertEqual(
+ aidegen_main._get_targets_from_args(['.'], True), [])
+ self.assertEqual(
+ aidegen_main._get_targets_from_args(['test'], False), ['test'])
+
@mock.patch.object(aidegen_main, '_launch_ide')
@mock.patch.object(ide_util, 'get_ide_util_instance')
def test_launch_native_projects_without_ide_object(
diff --git a/aidegen/aidegen_run_unittests.py b/aidegen/aidegen_run_unittests.py
index 0027813..1f6430f 100644
--- a/aidegen/aidegen_run_unittests.py
+++ b/aidegen/aidegen_run_unittests.py
@@ -24,6 +24,7 @@
# Setup logging to be silent so unittests can pass through TF.
logging.disable(logging.ERROR)
+
def get_test_modules():
"""Returns a list of testable modules.
@@ -35,9 +36,10 @@
List of strings (the testable module import path).
"""
testable_modules = []
- base_path = os.path.dirname(os.path.realpath(__file__))
+ package = os.path.dirname(os.path.realpath(__file__))
+ base_path = os.path.dirname(package)
- for dirpath, _, files in os.walk(base_path):
+ for dirpath, _, files in os.walk(package):
for _file in files:
if _file.endswith("_unittest.py"):
# Now transform it into a relative import path.
diff --git a/aidegen/data/AndroidStyle_aidegen.xml b/aidegen/data/AndroidStyle_aidegen.xml
index 51bf5d1..0fcc8d9 100644
--- a/aidegen/data/AndroidStyle_aidegen.xml
+++ b/aidegen/data/AndroidStyle_aidegen.xml
@@ -18,6 +18,8 @@
<emptyLine />
<package name="com" withSubpackages="true" static="true" />
<emptyLine />
+ <package name="dagger" withSubpackages="true" static="true" />
+ <emptyLine />
<package name="gov" withSubpackages="true" static="true" />
<emptyLine />
<package name="junit" withSubpackages="true" static="true" />
@@ -26,6 +28,8 @@
<emptyLine />
<package name="org" withSubpackages="true" static="true" />
<emptyLine />
+ <package name="robolectric" withSubpackages="true" static="true" />
+ <emptyLine />
<package name="java" withSubpackages="true" static="true" />
<emptyLine />
<package name="javax" withSubpackages="true" static="true" />
@@ -44,6 +48,8 @@
<emptyLine />
<package name="com" withSubpackages="true" static="false" />
<emptyLine />
+ <package name="dagger" withSubpackages="true" static="false" />
+ <emptyLine />
<package name="gov" withSubpackages="true" static="false" />
<emptyLine />
<package name="junit" withSubpackages="true" static="false" />
@@ -52,6 +58,8 @@
<emptyLine />
<package name="org" withSubpackages="true" static="false" />
<emptyLine />
+ <package name="robolectric" withSubpackages="true" static="false" />
+ <emptyLine />
<package name="java" withSubpackages="true" static="false" />
<emptyLine />
<package name="javax" withSubpackages="true" static="false" />
@@ -105,6 +113,8 @@
<emptyLine />
<package name="com" withSubpackages="true" static="true" />
<emptyLine />
+ <package name="dagger" withSubpackages="true" static="true" />
+ <emptyLine />
<package name="gov" withSubpackages="true" static="true" />
<emptyLine />
<package name="junit" withSubpackages="true" static="true" />
@@ -113,6 +123,8 @@
<emptyLine />
<package name="org" withSubpackages="true" static="true" />
<emptyLine />
+ <package name="robolectric" withSubpackages="true" static="true" />
+ <emptyLine />
<package name="java" withSubpackages="true" static="true" />
<emptyLine />
<package name="javax" withSubpackages="true" static="true" />
@@ -131,6 +143,8 @@
<emptyLine />
<package name="com" withSubpackages="true" static="false" />
<emptyLine />
+ <package name="dagger" withSubpackages="true" static="false" />
+ <emptyLine />
<package name="gov" withSubpackages="true" static="false" />
<emptyLine />
<package name="junit" withSubpackages="true" static="false" />
@@ -139,6 +153,8 @@
<emptyLine />
<package name="org" withSubpackages="true" static="false" />
<emptyLine />
+ <package name="robolectric" withSubpackages="true" static="false" />
+ <emptyLine />
<package name="java" withSubpackages="true" static="false" />
<emptyLine />
<package name="javax" withSubpackages="true" static="false" />
@@ -373,4 +389,4 @@
</arrangement>
</codeStyleSettings>
</code_scheme>
-</component>
\ No newline at end of file
+</component>
diff --git a/aidegen/idea/__init__.py b/aidegen/idea/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen/idea/__init__.py
+++ /dev/null
diff --git a/aidegen/idea/iml.py b/aidegen/idea/iml.py
index 27f798c..ee01b64 100644
--- a/aidegen/idea/iml.py
+++ b/aidegen/idea/iml.py
@@ -57,7 +57,7 @@
"""
# b/121256503: Prevent duplicated iml names from breaking IDEA.
# Use a map to cache in-using(already used) iml project file names.
- USED_NAME_CACHE = dict()
+ USED_NAME_CACHE = {}
def __init__(self, mod_info):
"""Initializes IMLGenerator.
@@ -202,8 +202,8 @@
IS_TEST='true'))
self._excludes = self._mod_info.get(constant.KEY_EXCLUDES, '')
- #For sovling duplicate package name, frameworks/base will be higher
- #priority.
+ # For solving duplicate package name, frameworks/base will be higher
+ # priority.
srcs = sorted(framework_srcs) + sorted(srcs)
self._srcs = templates.CONTENT.format(MODULE_PATH=self._mod_path,
EXCLUDES=self._excludes,
diff --git a/aidegen/idea/iml_unittest.py b/aidegen/idea/iml_unittest.py
index 147c47b..3ee8e29 100644
--- a/aidegen/idea/iml_unittest.py
+++ b/aidegen/idea/iml_unittest.py
@@ -216,5 +216,6 @@
self.assertEqual(os.path.basename(root_path),
iml.IMLGenerator.get_unique_iml_name(path))
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/idea/xml_gen_unittest.py b/aidegen/idea/xml_gen_unittest.py
index b6a2356..3475a14 100644
--- a/aidegen/idea/xml_gen_unittest.py
+++ b/aidegen/idea/xml_gen_unittest.py
@@ -28,16 +28,17 @@
from aidegen.idea import xml_gen
-# pylint: disable=protected-access
class XMLGenUnittests(unittest.TestCase):
"""Unit tests for XMLGenerator class."""
_TEST_DIR = None
_XML_NAME = 'test.xml'
- _DEFAULT_XML = """<?xml version="1.0" encoding="UTF-8"?>
+ _DEFAULT_XML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4"></project>
"""
- _IGNORE_GIT_XML = """<?xml version="1.0" encoding="UTF-8"?>
+ _IGNORE_GIT_XML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsManagerConfiguration">
<ignored-roots><path value="/b" /></ignored-roots>
diff --git a/aidegen/lib/__init__.py b/aidegen/lib/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/aidegen/lib/__init__.py
+++ /dev/null
diff --git a/aidegen/lib/aidegen_metrics.py b/aidegen/lib/aidegen_metrics.py
index 1511f74..3cbf09b 100644
--- a/aidegen/lib/aidegen_metrics.py
+++ b/aidegen/lib/aidegen_metrics.py
@@ -22,36 +22,35 @@
from aidegen import constant
from aidegen.lib import common_util
-from atest import atest_utils
-# When combine 3 paths in a single try block, it's hard for the coverage
-# counting algorithm to judge the each real path clearly. So, separating them
+# When combining 3 paths in a single try block, it's hard for the coverage
+# counting algorithm to judge each real path clearly. So, separating them
# into its own try block will increase the coverage.
# Original code as follows,
# try:
-# from asuite.metrics import metrics
-# from asuite.metrics import metrics_base
-# from asuite.metrics import metrics_utils
+# from atest.metrics import metrics
+# from atest.metrics import metrics_base
+# from atest.metrics import metrics_utils
# except ImportError:
# logging.debug('Import metrics fail, can\'t send metrics.')
# metrics = None
# metrics_base = None
# metrics_utils = None
try:
- from asuite.metrics import metrics
+ from atest.metrics import metrics
except ImportError:
logging.debug('Import metrics fail, can\'t send metrics.')
metrics = None
try:
- from asuite.metrics import metrics_base
+ from atest.metrics import metrics_base
except ImportError:
logging.debug('Import metrics fail, can\'t send metrics.')
metrics_base = None
try:
- from asuite.metrics import metrics_utils
+ from atest.metrics import metrics_utils
except ImportError:
logging.debug('Import metrics fail, can\'t send metrics.')
metrics_utils = None
@@ -68,7 +67,7 @@
"""
if not metrics:
return
- atest_utils.print_data_collection_notice()
+ metrics_utils.print_data_collection_notice()
metrics_base.MetricsBase.tool_name = constant.AIDEGEN_TOOL_NAME
metrics_utils.get_start_time()
command = ' '.join(sys.argv)
@@ -138,7 +137,7 @@
return False
metrics.LocalDetectEvent(
- detect_type = process_type,
- result = int(duration)
+ detect_type=process_type,
+ result=int(duration)
)
return True
diff --git a/aidegen/lib/aidegen_metrics_unittest.py b/aidegen/lib/aidegen_metrics_unittest.py
index 89edf57..92def3f 100644
--- a/aidegen/lib/aidegen_metrics_unittest.py
+++ b/aidegen/lib/aidegen_metrics_unittest.py
@@ -21,12 +21,11 @@
from aidegen import constant
from aidegen.lib import aidegen_metrics
-from atest import atest_utils
try:
- from asuite.metrics import metrics
- from asuite.metrics import metrics_utils
+ from atest.metrics import metrics
+ from atest.metrics import metrics_utils
except ImportError:
metrics = None
metrics_utils = None
@@ -35,7 +34,7 @@
class AidegenMetricsUnittests(unittest.TestCase):
"""Unit tests for aidegen_metrics.py."""
- @mock.patch.object(atest_utils, 'print_data_collection_notice')
+ @mock.patch.object(metrics_utils, 'print_data_collection_notice')
def test_starts_asuite_metrics(self, mock_print_data):
"""Test starts_asuite_metrics."""
references = ['nothing']
@@ -65,5 +64,6 @@
aidegen_metrics.send_exception_metrics(1, '', '', 'err_test')
self.assertTrue(mock_ends_metrics.called)
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/lib/clion_project_file_gen.py b/aidegen/lib/clion_project_file_gen.py
index 3900c8b..5be0a88 100644
--- a/aidegen/lib/clion_project_file_gen.py
+++ b/aidegen/lib/clion_project_file_gen.py
@@ -83,7 +83,7 @@
# Constants for CMakeLists.txt.
_MIN_VERSION_TOKEN = '@MINVERSION@'
_PROJECT_NAME_TOKEN = '@PROJNAME@'
-_ANDOIR_ROOT_TOKEN = '@ANDROIDROOT@'
+_ANDROID_ROOT_TOKEN = '@ANDROIDROOT@'
_MINI_VERSION_SUPPORT = 'cmake_minimum_required(VERSION {})\n'
_MINI_VERSION = '3.5'
_KEY_CLANG = 'clang'
@@ -228,13 +228,13 @@
def generate_cmakelists_file(self):
"""Generates CLion project file from the target module's info."""
- with open(self.cc_path, 'w') as hfile:
+ with open(self.cc_path, 'w', encoding='utf-8') as hfile:
self._write_cmakelists_file(hfile)
@common_util.check_args(hfile=(TextIOWrapper, StringIO))
@common_util.io_error_handle
def _write_cmakelists_file(self, hfile):
- """Writes CLion project file content with neccessary info.
+ """Writes CLion project file content with necessary info.
Args:
hfile: A file handler instance.
@@ -257,7 +257,7 @@
_MIN_VERSION_TOKEN, _MINI_VERSION)
content = content.replace(_PROJECT_NAME_TOKEN, self.mod_name)
content = content.replace(
- _ANDOIR_ROOT_TOKEN, common_util.get_android_root_dir())
+ _ANDROID_ROOT_TOKEN, common_util.get_android_root_dir())
hfile.write(content)
@common_util.check_args(hfile=(TextIOWrapper, StringIO))
@@ -403,7 +403,7 @@
src_path = os.path.join(cc_dir, constant.CLION_PROJECT_FILE_NAME)
if os.path.isfile(src_path):
os.remove(src_path)
- with open(src_path, 'w') as hfile:
+ with open(src_path, 'w', encoding='utf-8') as hfile:
_write_base_cmakelists_file(hfile, cc_module_info, src_path, mod_names)
os.symlink(src_path, dst_path)
return dst_path
@@ -522,7 +522,7 @@
@common_util.check_args(hfile=(TextIOWrapper, StringIO), flags=list, tag=str)
@common_util.io_error_handle
def _write_all_flags(hfile, flags, tag):
- """Wrties all flags to the project file.
+ """Writes all flags to the project file.
Args:
hfile: A file handler instance.
diff --git a/aidegen/lib/clion_project_file_gen_unittest.py b/aidegen/lib/clion_project_file_gen_unittest.py
index a3573cd..69b8437 100644
--- a/aidegen/lib/clion_project_file_gen_unittest.py
+++ b/aidegen/lib/clion_project_file_gen_unittest.py
@@ -78,7 +78,7 @@
'get_module_path')
def test_init_with_mod_info_makedir(
self, mock_get_path, mock_get_cmake, mock_exists, mock_mkdirs):
- """Test __init__ with mod_info and check if need to make dir."""
+ """Test __init__ with mod_info and check if we need to make dir."""
mod_info = dict(self._MOD_INFO)
mod_info.update(self._MOD_NAME_DICT)
mock_get_path.return_value = self._MOD_PATH
@@ -109,7 +109,7 @@
expected = expected.replace(
clion_project_file_gen._PROJECT_NAME_TOKEN, clion_gen.mod_name)
expected = expected.replace(
- clion_project_file_gen._ANDOIR_ROOT_TOKEN,
+ clion_project_file_gen._ANDROID_ROOT_TOKEN,
common_util.get_android_root_dir())
self.assertEqual(content, expected)
@@ -592,5 +592,6 @@
mod_info, 'a/d')
self.assertEqual(res, path_d)
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/lib/common_util.py b/aidegen/lib/common_util.py
index 1d2643a..66cb186 100644
--- a/aidegen/lib/common_util.py
+++ b/aidegen/lib/common_util.py
@@ -42,12 +42,9 @@
from atest import module_info
-COLORED_INFO = partial(
- atest_utils.colorize, color=constants.MAGENTA, highlight=False)
-COLORED_PASS = partial(
- atest_utils.colorize, color=constants.GREEN, highlight=False)
-COLORED_FAIL = partial(
- atest_utils.colorize, color=constants.RED, highlight=False)
+COLORED_INFO = partial(atest_utils.colorize, color=constants.MAGENTA)
+COLORED_PASS = partial(atest_utils.colorize, color=constants.GREEN)
+COLORED_FAIL = partial(atest_utils.colorize, color=constants.RED)
FAKE_MODULE_ERROR = '{} is a fake module.'
OUTSIDE_ROOT_ERROR = '{} is outside android root.'
PATH_NOT_EXISTS_ERROR = 'The path {} doesn\'t exist.'
@@ -71,7 +68,7 @@
Args:
func: a function is to be calculated its spending time.
message: the message the decorated function wants to show.
- maximum: a interger, minutes. If time exceeds the maximum time show
+ maximum: an integer, minutes. If time exceeds the maximum time show
message, otherwise doesn't.
Returns:
@@ -219,7 +216,7 @@
1. Module name, e.g. Settings
2. Module path, e.g. packages/apps/Settings
3. Relative path, e.g. ../../packages/apps/Settings
- 4. Current directory, e.g. . or no argument
+ 4. Current directory, e.g. '.' or no argument
raise_on_lost_module: A boolean, pass to _check_module to determine if
ProjectPathNotExistError or NoModuleDefinedInModuleInfoError
should be raised.
@@ -253,7 +250,7 @@
Returns:
1. If there is no error _check_module always return True.
- 2. If there is a error,
+ 2. If there is an error,
a. When raise_on_lost_module is False, _check_module will raise the
error.
b. When raise_on_lost_module is True, _check_module will return
@@ -336,7 +333,7 @@
We just want to get an atest ModuleInfo instance.
2. If targets isn't None:
Check if the targets don't exist in ModuleInfo, we'll regain a new
- atest ModleInfo instance by setting force_build=True and call
+ atest ModuleInfo instance by setting force_build=True and call
_check_modules again. If targets still don't exist, raise exceptions.
Args:
@@ -584,7 +581,7 @@
json_path: An absolute json file path string.
data: A dictionary of data to be written into a json file.
"""
- with open(json_path, 'w') as json_file:
+ with open(json_path, 'w', encoding='utf-8') as json_file:
json.dump(data, json_file, indent=4)
@@ -598,7 +595,7 @@
Returns:
A dictionary loaded from the json_path.
"""
- with open(json_path) as jfile:
+ with open(json_path, 'r', encoding='utf-8') as jfile:
return json.load(jfile)
@@ -613,7 +610,7 @@
A list of the file's content by line.
"""
files = []
- with open(file_path, encoding='utf8') as infile:
+ with open(file_path, 'r', encoding='utf8') as infile:
for line in infile:
files.append(line.strip())
return files
@@ -630,7 +627,7 @@
Returns:
String: Content of the file.
"""
- with open(path, encoding=encode_type) as template:
+ with open(path, 'r', encoding=encode_type) as template:
return template.read()
@@ -644,7 +641,7 @@
"""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
- with open(path, 'w') as target:
+ with open(path, 'w', encoding='utf-8') as target:
target.write(content)
@@ -667,7 +664,7 @@
Returns:
A dictionary with key: environment variable and value: absolute path of
- the file generated by the environment varialbe.
+ the file generated by the environment variable.
"""
data = {}
root_dir = get_android_root_dir()
@@ -749,7 +746,7 @@
ide: A character represents the input IDE.
jlist: A list of Android Java projects, the default value is None.
clist: A list of Android C/C++ projects, the default value is None.
- clist: A list of Android Rust projects, the default value is None.
+ rlist: A list of Android Rust projects, the default value is None.
Returns:
A tuple of the determined language and IDE name strings.
@@ -781,7 +778,7 @@
"""Checks if any Java or Kotlin files exist in an abs_path directory.
Args:
- abs_path: A string of absolute path of a directory to be check.
+ abs_path: A string of absolute path of a directory to be checked.
Returns:
True if any Java or Kotlin files exist otherwise False.
diff --git a/aidegen/lib/common_util_unittest.py b/aidegen/lib/common_util_unittest.py
index 2c40b28..a2ca609 100644
--- a/aidegen/lib/common_util_unittest.py
+++ b/aidegen/lib/common_util_unittest.py
@@ -30,18 +30,20 @@
from atest import module_info
-# pylint: disable=too-many-arguments
+# pylint: disable=unused-argument
# pylint: disable=protected-access
class AidegenCommonUtilUnittests(unittest.TestCase):
"""Unit tests for common_util.py"""
- _TEST_XML_CONTENT = """<application><component name="ProjectJdkTable">
+ _TEST_XML_CONTENT = """\
+<application><component name="ProjectJdkTable">
<jdk version="2"> <name value="JDK_OTHER" />
<type value="JavaSDK" /> </jdk> </component>
</application>
"""
- _SAMPLE_XML_CONTENT = """<application>
+ _SAMPLE_XML_CONTENT = """\
+<application>
<component name="ProjectJdkTable">
<jdk version="2">
<name value="JDK_OTHER"/>
@@ -90,7 +92,6 @@
self.assertEqual(('b/c', '/a/b/c'),
common_util.get_related_paths(mod_info, 'b/c'))
-
@mock.patch('os.getcwd')
@mock.patch.object(common_util, 'is_android_root')
@mock.patch.object(common_util, 'get_android_root_dir')
@@ -251,13 +252,13 @@
@mock.patch('builtins.open', create=True)
def test_read_file_content(self, mock_open):
"""Test read_file_content handling."""
- expacted_data1 = 'Data1'
+ expected_data1 = 'Data1'
file_a = 'fileA'
mock_open.side_effect = [
- mock.mock_open(read_data=expacted_data1).return_value
+ mock.mock_open(read_data=expected_data1).return_value
]
- self.assertEqual(expacted_data1, common_util.read_file_content(file_a))
- mock_open.assert_called_once_with(file_a, encoding='utf8')
+ self.assertEqual(expected_data1, common_util.read_file_content(file_a))
+ mock_open.assert_called_once_with(file_a, 'r', encoding='utf8')
@mock.patch('os.getenv')
@mock.patch.object(common_util, 'get_android_root_dir')
@@ -291,13 +292,14 @@
""" Test replace the user home path to a constant string."""
mock_expanduser.return_value = '/usr/home/a'
test_string = '/usr/home/a/test/dir'
- expect_string = '$USER_HOME$/test/dir'
+ expected_string = '$USER_HOME$/test/dir'
result_path = common_util.remove_user_home_path(test_string)
- self.assertEqual(result_path, expect_string)
+ self.assertEqual(result_path, expected_string)
def test_io_error_handle(self):
"""Test io_error_handle handling."""
err = "It's an IO error."
+
def some_io_error_func():
raise IOError(err)
with self.assertRaises(IOError) as context:
@@ -474,7 +476,6 @@
common_util.determine_language_ide(lang, ide))
-# pylint: disable=unused-argument
def parse_rule(self, name, text):
"""A test function for test_check_args."""
diff --git a/aidegen/lib/config.py b/aidegen/lib/config.py
index eb0a187..2456f98 100644
--- a/aidegen/lib/config.py
+++ b/aidegen/lib/config.py
@@ -148,7 +148,8 @@
"""Load data from configuration file."""
if os.path.exists(self._CONFIG_FILE_PATH):
try:
- with open(self._CONFIG_FILE_PATH) as cfg_file:
+ with open(self._CONFIG_FILE_PATH, 'r',
+ encoding='utf-8') as cfg_file:
self._config = json.load(cfg_file)
except ValueError as err:
info = '{} format is incorrect, error: {}'.format(
@@ -161,12 +162,13 @@
def _save_aidegen_config(self):
"""Save data to configuration file."""
if self._is_config_modified():
- with open(self._CONFIG_FILE_PATH, 'w') as cfg_file:
+ with open(self._CONFIG_FILE_PATH, 'w',
+ encoding='utf-8') as cfg_file:
json.dump(self._config, cfg_file, indent=4)
def _is_config_modified(self):
"""Check if configuration data is modified."""
- return any(key for key in self._config if not key in self._config_backup
+ return any(key for key in self._config if key not in self._config_backup
or self._config[key] != self._config_backup[key])
def _create_config_folder(self):
@@ -214,7 +216,7 @@
1. Create two empty folders named src and gen.
2. Create an empty file named AndroidManifest.xml
- 3. Create the enable_denugger.iml.
+ 3. Create the enable_debugger.iml.
Args:
android_sdk_version: The version name of the Android Sdk in the
@@ -295,7 +297,7 @@
Class Attributes:
_PROPERTIES_FILE: The property file name of IntelliJ.
- _KEY_FILESIZE: The key name of the maximun file size.
+ _KEY_FILESIZE: The key name of the maximum file size.
_FILESIZE_LIMIT: The value to be set as the max file size.
_RE_SEARCH_FILESIZE: A regular expression to find the current max file
size.
diff --git a/aidegen/lib/config_unittest.py b/aidegen/lib/config_unittest.py
index 30caa9e..4fa15c0 100644
--- a/aidegen/lib/config_unittest.py
+++ b/aidegen/lib/config_unittest.py
@@ -206,21 +206,21 @@
"""Test deprecated_intellij_version."""
# Test the idea.sh file contains the deprecated string.
cfg = config.AidegenConfig()
- expacted_data = ('#!/bin/sh\n\n'
+ expected_data = ('#!/bin/sh\n\n'
'SUMMARY="This version of IntelliJ Community Edition '
'is no longer supported."\n')
mock_open.side_effect = [
- mock.mock_open(read_data=expacted_data).return_value
+ mock.mock_open(read_data=expected_data).return_value
]
mock_isfile.return_value = True
self.assertTrue(cfg.deprecated_intellij_version(0))
- # Test the idea.sh file doesn't contains the deprecated string.
- expacted_data = ('#!/bin/sh\n\n'
+ # Test the idea.sh file doesn't contain the deprecated string.
+ expected_data = ('#!/bin/sh\n\n'
'JAVA_BIN="$JDK/bin/java"\n'
'"$JAVA_BIN" \\n')
mock_open.side_effect = [
- mock.mock_open(read_data=expacted_data).return_value
+ mock.mock_open(read_data=expected_data).return_value
]
self.assertFalse(cfg.deprecated_intellij_version(0))
@@ -368,7 +368,7 @@
cfg._reset_max_file_size()
generated_file = os.path.join(IdeaPropertiesUnittests._CONFIG_DIR,
cfg._PROPERTIES_FILE)
- with open(generated_file) as properties_file:
+ with open(generated_file, 'r', encoding='utf-8') as properties_file:
generated_content = properties_file.read()
self.assertEqual(expected_data, generated_content)
diff --git a/aidegen/lib/dom_util.py b/aidegen/lib/dom_util.py
index b2b038c..f17f036 100644
--- a/aidegen/lib/dom_util.py
+++ b/aidegen/lib/dom_util.py
@@ -24,10 +24,10 @@
def find_special_node(parent, element_name, attributes=None):
- """Finds the node contains the specific element.
+ """Finds the node that contains the specific element.
- Find the node of the element contains the tag of element_name and the
- attribute values listed in the attributes dict. There're two cases as
+ Find the node of the element that contains the tag of element_name and the
+ attribute values listed in the attributes dict. There are two cases as
follows,
1. The case with attributes is as below <component name="...">.
2. The case without attributes is as below <findStrings>.
diff --git a/aidegen/lib/eclipse_project_file_gen.py b/aidegen/lib/eclipse_project_file_gen.py
index 3340c45..1d2ef28 100644
--- a/aidegen/lib/eclipse_project_file_gen.py
+++ b/aidegen/lib/eclipse_project_file_gen.py
@@ -34,10 +34,10 @@
jar_module_paths: A dict records a mapping of jar file and module path.
r_java_paths: A list contains the relative folder paths of the R.java
files.
- project_file: The absolutely path of .project file.
+ project_file: The absolute path of .project file.
project_content: A string ready to be written into project_file.
src_paths: A list contains the project's source paths.
- classpath_file: The absolutely path of .classpath file.
+ classpath_file: The absolute path of .classpath file.
classpath_content: A string ready to be written into classpath_file.
"""
# Constants of .project file
@@ -46,7 +46,7 @@
_PROJECT_FILENAME = '.project'
_OUTPUT_BIN_SYMBOLIC_NAME = 'bin'
- # constans of .classpath file
+ # constants of .classpath file
_CLASSPATH_SRC_ENTRY = ' <classpathentry kind="src" path="{}"/>\n'
_EXCLUDE_ANDROID_BP_ENTRY = (' <classpathentry excluding="Android.bp" '
'kind="src" path="{}"/>\n')
@@ -54,7 +54,6 @@
'path="{}" sourcepath="{}"/>\n')
_CLASSPATH_FILENAME = '.classpath'
-
def __init__(self, project):
"""Initialize class.
@@ -274,7 +273,7 @@
Returns: A list has a class path entry of the bin folder.
"""
return [self._CLASSPATH_SRC_ENTRY.format(self._OUTPUT_BIN_SYMBOLIC_NAME)
- ]
+ ]
def _create_classpath_content(self):
"""Create the project file .classpath under the module."""
diff --git a/aidegen/lib/eclipse_project_file_gen_unittest.py b/aidegen/lib/eclipse_project_file_gen_unittest.py
index de629b8..878b0ea 100644
--- a/aidegen/lib/eclipse_project_file_gen_unittest.py
+++ b/aidegen/lib/eclipse_project_file_gen_unittest.py
@@ -77,11 +77,8 @@
def test_gen_src_path_entries(self, mock_get_root, mock_exist_android_bp):
"""Test generate source folders' class path entries."""
mock_get_root.return_value = self._ROOT_PATH
- self.eclipse.src_paths = set([
- 'module/path/src',
- 'module/path/test',
- 'out/src',
- ])
+ self.eclipse.src_paths = {'module/path/src', 'module/path/test',
+ 'out/src'}
expected_result = [
' <classpathentry kind="src" path="dependencies/out/src"/>\n',
' <classpathentry kind="src" path="src"/>\n',
@@ -103,7 +100,6 @@
generated_result = sorted(self.eclipse._gen_src_path_entries())
self.assertEqual(generated_result, expected_result)
-
@mock.patch.object(common_util, 'get_android_root_dir')
def test_gen_jar_path_entries(self, mock_get_root):
"""Test generate jar files' class path entries."""
@@ -120,11 +116,8 @@
def test_get_other_src_folders(self):
"""Test _get_other_src_folders."""
- self.eclipse.src_paths = set([
- 'module/path/src',
- 'module/path/test',
- 'out/module/path/src',
- ])
+ self.eclipse.src_paths = {'module/path/src', 'module/path/test',
+ 'out/module/path/src'}
expected_result = ['out/module/path/src']
self.assertEqual(self.eclipse._get_other_src_folders(), expected_result)
diff --git a/aidegen/lib/errors.py b/aidegen/lib/errors.py
index d283582..98604d4 100644
--- a/aidegen/lib/errors.py
+++ b/aidegen/lib/errors.py
@@ -38,6 +38,7 @@
dependency.
"""
+
class ProjectOutsideAndroidRootError(AIDEgenError):
"""Raised when a project to be generated IDE project file is not under
source tree's root directory."""
diff --git a/aidegen/lib/ide_util.py b/aidegen/lib/ide_util.py
index 989483f..0bc3dd4 100644
--- a/aidegen/lib/ide_util.py
+++ b/aidegen/lib/ide_util.py
@@ -77,14 +77,14 @@
'\n\n')
CONFIG_DIR = 'config'
LINUX_JDK_PATH = os.path.join(common_util.get_android_root_dir(),
- 'prebuilts/jdk/jdk8/linux-x86')
+ 'prebuilts/jdk/jdk17/linux-x86')
LINUX_JDK_TABLE_PATH = 'config/options/jdk.table.xml'
LINUX_FILE_TYPE_PATH = 'config/options/filetypes.xml'
LINUX_ANDROID_SDK_PATH = os.path.join(os.getenv('HOME'), 'Android/Sdk')
MAC_JDK_PATH = os.path.join(common_util.get_android_root_dir(),
- 'prebuilts/jdk/jdk8/darwin-x86')
-ALTERNAIVE_JDK_TABLE_PATH = 'options/jdk.table.xml'
-ALTERNAIVE_FILE_TYPE_XML_PATH = 'options/filetypes.xml'
+ 'prebuilts/jdk/jdk17/darwin-x86')
+ALTERNATIVE_JDK_TABLE_PATH = 'options/jdk.table.xml'
+ALTERNATIVE_FILE_TYPE_XML_PATH = 'options/filetypes.xml'
MAC_ANDROID_SDK_PATH = os.path.join(os.getenv('HOME'), 'Library/Android/sdk')
PATTERN_KEY = 'pattern'
TYPE_KEY = 'type'
@@ -280,7 +280,7 @@
break
if add_pattern:
ext_attrib = root.find(_XPATH_EXTENSION_MAP)
- if not ext_attrib:
+ if ext_attrib is None:
print(_TEST_MAPPING_FILE_TYPE_ADDING_WARN)
return
ext_attrib.append(ElementTree.fromstring(_TEST_MAPPING_TYPE))
@@ -377,7 +377,7 @@
search it from environment paths.
Returns:
- The list of script full path, or None if no found.
+ The list of script full path, or None if not found.
"""
return (ide_common_util.get_script_from_internal_path(self._bin_paths,
self._ide_name) or
@@ -502,9 +502,11 @@
ce_paths = ide_common_util.get_intellij_version_path(self._ls_ce_path)
ue_paths = ide_common_util.get_intellij_version_path(self._ls_ue_path)
all_versions = self._get_all_versions(ce_paths, ue_paths)
- tmp_versions = all_versions.copy()
- for version in tmp_versions:
+ for version in list(all_versions):
real_version = os.path.realpath(version)
+ if (os.path.islink(version.split('/bin')[0]) and
+ (real_version in all_versions)):
+ all_versions.remove(real_version)
if config.AidegenConfig.deprecated_intellij_version(real_version):
all_versions.remove(version)
return self._get_user_preference(all_versions)
@@ -741,8 +743,8 @@
return None
folder_path = self._get_config_dir(ide_version, _config_folder)
if version >= _SPECIFIC_INTELLIJ_VERSION:
- self._IDE_JDK_TABLE_PATH = ALTERNAIVE_JDK_TABLE_PATH
- self._IDE_FILE_TYPE_PATH = ALTERNAIVE_FILE_TYPE_XML_PATH
+ self._IDE_JDK_TABLE_PATH = ALTERNATIVE_JDK_TABLE_PATH
+ self._IDE_FILE_TYPE_PATH = ALTERNATIVE_FILE_TYPE_XML_PATH
if not os.path.isdir(folder_path):
logging.debug("\nThe config folder: %s doesn't exist",
@@ -775,8 +777,8 @@
"""
_JDK_PATH = MAC_JDK_PATH
- _IDE_JDK_TABLE_PATH = ALTERNAIVE_JDK_TABLE_PATH
- _IDE_FILE_TYPE_PATH = ALTERNAIVE_FILE_TYPE_XML_PATH
+ _IDE_JDK_TABLE_PATH = ALTERNATIVE_JDK_TABLE_PATH
+ _IDE_FILE_TYPE_PATH = ALTERNATIVE_FILE_TYPE_XML_PATH
_JDK_CONTENT = templates.MAC_JDK_XML
_DEFAULT_ANDROID_SDK_PATH = MAC_ANDROID_SDK_PATH
@@ -861,14 +863,17 @@
Returns:
The sh full path, or None if no Studio version is installed.
"""
- versions = self._get_existent_scripts_in_system()
- if not versions:
+ all_versions = self._get_existent_scripts_in_system()
+ if not all_versions:
return None
- for version in versions:
+ for version in list(all_versions):
real_version = os.path.realpath(version)
+ if (os.path.islink(version.split('/bin')[0]) and
+ (real_version in all_versions)):
+ all_versions.remove(real_version)
if config.AidegenConfig.deprecated_studio_version(real_version):
- versions.remove(version)
- return self._get_user_preference(versions)
+ all_versions.remove(version)
+ return self._get_user_preference(all_versions)
def apply_optional_config(self):
"""Do the configuration of Android Studio.
@@ -934,7 +939,7 @@
"""
_JDK_PATH = MAC_JDK_PATH
- _IDE_JDK_TABLE_PATH = ALTERNAIVE_JDK_TABLE_PATH
+ _IDE_JDK_TABLE_PATH = ALTERNATIVE_JDK_TABLE_PATH
_JDK_CONTENT = templates.MAC_JDK_XML
_DEFAULT_ANDROID_SDK_PATH = MAC_ANDROID_SDK_PATH
diff --git a/aidegen/lib/ide_util_unittest.py b/aidegen/lib/ide_util_unittest.py
index d2f5da3..a1c8ed8 100644
--- a/aidegen/lib/ide_util_unittest.py
+++ b/aidegen/lib/ide_util_unittest.py
@@ -39,7 +39,6 @@
from aidegen.sdk import jdk_table
-# pylint: disable=too-many-public-methods
# pylint: disable=protected-access
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=unused-argument
@@ -52,14 +51,16 @@
_TEST_PRJ_PATH4 = ''
_MODULE_XML_SAMPLE = ''
_TEST_DIR = None
- _TEST_XML_CONTENT = """<application>
+ _TEST_XML_CONTENT = """\
+<application>
<component name="FileTypeManager" version="17">
<extensionMap>
<mapping ext="pi" type="Python"/>
</extensionMap>
</component>
</application>"""
- _TEST_XML_CONTENT_2 = """<application>
+ _TEST_XML_CONTENT_2 = """\
+<application>
<component name="FileTypeManager" version="17">
<extensionMap>
<mapping ext="pi" type="Python"/>
@@ -159,7 +160,7 @@
@mock.patch.object(ide_util.IdeBase, 'apply_optional_config')
def test_config_ide(self, mock_config, mock_paths, mock_preference):
"""Test IDEA, IdeUtil.config_ide won't call base none implement api."""
- # Mock SDkConfig flow to not to generate real jdk config file.
+ # Mock SDkConfig flow to not generate real jdk config file.
mock_preference.return_value = None
module_path = os.path.join(self._TEST_DIR, 'test')
idea_path = os.path.join(module_path, '.idea')
@@ -581,32 +582,32 @@
# Test no binary path in _get_script_from_system.
eclipse._bin_paths = []
- expacted_result = None
+ expected_result = None
test_result = eclipse._get_script_from_system()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
# Test get the matched binary from _get_script_from_system.
mock_glob.return_value = ['/a/b/eclipse']
mock_file_access.return_value = True
eclipse._bin_paths = ['/a/b/eclipse']
- expacted_result = '/a/b/eclipse'
+ expected_result = '/a/b/eclipse'
test_result = eclipse._get_script_from_system()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
# Test no matched binary from _get_script_from_system.
mock_glob.return_value = []
eclipse._bin_paths = ['/a/b/eclipse']
- expacted_result = None
+ expected_result = None
test_result = eclipse._get_script_from_system()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
# Test the matched binary cannot be executed.
mock_glob.return_value = ['/a/b/eclipse']
mock_file_access.return_value = False
eclipse._bin_paths = ['/a/b/eclipse']
- expacted_result = None
+ expected_result = None
test_result = eclipse._get_script_from_system()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
@mock.patch('builtins.input')
@mock.patch('os.path.exists')
@@ -616,19 +617,19 @@
eclipse = ide_util.IdeEclipse()
eclipse.cmd = ['eclipse']
mock_exists.return_value = True
- expacted_result = ('eclipse -data '
+ expected_result = ('eclipse -data '
'~/Documents/AIDEGen_Eclipse_workspace '
'2>/dev/null >&2 &')
test_result = eclipse._get_ide_cmd()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
# Test running command without the default workspace.
eclipse.cmd = ['eclipse']
mock_exists.return_value = False
mock_input.return_value = 'n'
- expacted_result = 'eclipse 2>/dev/null >&2 &'
+ expected_result = 'eclipse 2>/dev/null >&2 &'
test_result = eclipse._get_ide_cmd()
- self.assertEqual(test_result, expacted_result)
+ self.assertEqual(test_result, expected_result)
@mock.patch.object(ide_util.IdeUtil, 'is_ide_installed')
@mock.patch.object(project_config.ProjectConfig, 'get_instance')
diff --git a/aidegen/lib/module_info.py b/aidegen/lib/module_info.py
index 0a08080..ab6a346 100644
--- a/aidegen/lib/module_info.py
+++ b/aidegen/lib/module_info.py
@@ -34,7 +34,6 @@
class AidegenModuleInfo(module_info.ModuleInfo, metaclass=Singleton):
"""Class that offers fast/easy lookup for Module related details."""
-
def _load_module_info_file(self, module_file):
"""Loads the module file.
@@ -55,7 +54,7 @@
self.mod_info_file_path = Path(file_path)
logging.debug('Loading %s as module-info.', file_path)
- with open(file_path, encoding='utf8') as json_file:
+ with open(file_path, 'r', encoding='utf8') as json_file:
mod_info = json.load(json_file)
return module_info_target, mod_info
diff --git a/aidegen/lib/module_info_unittest.py b/aidegen/lib/module_info_unittest.py
index bd37636..2ba92e0 100644
--- a/aidegen/lib/module_info_unittest.py
+++ b/aidegen/lib/module_info_unittest.py
@@ -25,8 +25,8 @@
from aidegen.lib import module_info
from aidegen.lib import module_info_util
+
# pylint: disable=protected-access
-#pylint: disable=invalid-name
class AidegenModuleInfoUnittests(unittest.TestCase):
"""Unit tests for module_info.py"""
@@ -48,29 +48,29 @@
def test_is_project_path_relative_module(self):
"""Test is_project_path_relative_module handling."""
- mod_info = {'class':['APPS']}
+ mod_info = {'class': ['APPS']}
self.assertFalse(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, ''))
- mod_info = {'class':['APPS'], 'path':[]}
+ mod_info = {'class': ['APPS'], 'path': []}
self.assertFalse(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, ''))
- mod_info = {'class':['APPS'], 'path':['path_to_a']}
+ mod_info = {'class': ['APPS'], 'path': ['path_to_a']}
self.assertTrue(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, ''))
self.assertFalse(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, 'test'))
- mod_info = {'path':['path_to_a']}
+ mod_info = {'path': ['path_to_a']}
self.assertFalse(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, 'test'))
self.assertFalse(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, 'path_to_a'))
- mod_info = {'class':['APPS'], 'path':['test/path_to_a']}
+ mod_info = {'class': ['APPS'], 'path': ['test/path_to_a']}
self.assertTrue(
module_info.AidegenModuleInfo.is_project_path_relative_module(
mod_info, 'test'))
@@ -114,11 +114,13 @@
'_discover_mod_file_and_target')
def test_load_module_info_file(self, mock_discover):
"""Test _load_module_info_file with conditions."""
- json_path = 'test_data/out/soong/merged_module_info.json'
+ json_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
+ 'test_data/out/soong/merged_module_info.json')
# Test file exist case.
module_file = json_path
module_info.AidegenModuleInfo._load_module_info_file(self, module_file)
self.assertFalse(mock_discover.called)
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/lib/module_info_util.py b/aidegen/lib/module_info_util.py
index f75f376..0e4fbee 100644
--- a/aidegen/lib/module_info_util.py
+++ b/aidegen/lib/module_info_util.py
@@ -69,6 +69,9 @@
_RUST_PROJECT_JSON = 'out/soong/rust-project.json'
+# Generators are slightly more inefficient when all the values have to be
+# traversed most of the time.
+# pylint: disable=use-a-generator
# pylint: disable=dangerous-default-value
@common_util.back_to_cwd
@common_util.time_logged
@@ -76,7 +79,7 @@
"""Generate a merged dictionary.
Linked functions:
- _build_bp_info(module_info, project, verbose, skip_build)
+ _build_bp_info(module_info, project, skip_build)
_get_soong_build_json_dict()
_merge_dict(mk_dict, bp_dict)
@@ -94,14 +97,14 @@
skip_build = config.is_skip_build
main_project = projects[0] if projects else None
_build_bp_info(
- module_info, main_project, verbose, skip_build, env_on)
+ module_info, main_project, skip_build, env_on)
json_path = common_util.get_blueprint_json_path(
constant.BLUEPRINT_JAVA_JSONFILE_NAME)
bp_dict = common_util.get_json_dict(json_path)
return _merge_dict(module_info.name_to_module_info, bp_dict)
-def _build_bp_info(module_info, main_project=None, verbose=False,
+def _build_bp_info(module_info, main_project=None,
skip_build=False, env_on=_BUILD_BP_JSON_ENV_ON):
"""Make nothing to create module_bp_java_deps.json, module_bp_cc_deps.json.
@@ -112,7 +115,6 @@
Args:
module_info: A ModuleInfo instance contains data of module-info.json.
main_project: A string of the main project name.
- verbose: A boolean, if true displays full build output.
skip_build: A boolean, if true, skip building if
get_blueprint_json_path(file_name) file exists, otherwise
build it.
@@ -140,7 +142,8 @@
logging.warning(
'\nGenerate files:\n %s by atest build method.', files)
- build_with_on_cmd = atest_utils.build([_TARGET], verbose, env_on)
+ atest_utils.update_build_env(env_on)
+ build_with_on_cmd = atest_utils.build([_TARGET])
# For Android Rust projects, we need to create a symbolic link to the file
# out/soong/rust-project.json to launch the rust projects in IDEs.
@@ -281,9 +284,9 @@
Returns:
A merged dictionary.
"""
- merged_dict = dict()
+ merged_dict = {}
for module in mk_dict.keys():
- merged_dict[module] = dict()
+ merged_dict[module] = {}
for key in mk_dict[module].keys():
if key in _MERGE_NEEDED_ITEMS and mk_dict[module][key] != []:
merged_dict[module][key] = mk_dict[module][key]
@@ -305,8 +308,8 @@
"""
merged_dict = _copy_needed_items_from(mk_dict)
for module in bp_dict.keys():
- if module not in merged_dict.keys():
- merged_dict[module] = dict()
+ if module not in merged_dict:
+ merged_dict[module] = {}
_merge_module_keys(merged_dict[module], bp_dict[module])
return merged_dict
diff --git a/aidegen/lib/module_info_util_unittest.py b/aidegen/lib/module_info_util_unittest.py
index 73b9e07..a3fdd2d 100644
--- a/aidegen/lib/module_info_util_unittest.py
+++ b/aidegen/lib/module_info_util_unittest.py
@@ -72,7 +72,6 @@
# pylint: disable=invalid-name
# pylint: disable=protected-access
-# ptlint: disable=too-many-format-args
class AidegenModuleInfoUtilUnittests(unittest.TestCase):
"""Unit tests for module_info_utils.py"""
@@ -228,7 +227,7 @@
module_info_util._merge_module_keys(test_m_dict, test_b_dict)
self.assertEqual(_TEST_DEP_SRC_DICT, test_m_dict)
- def test_merge_module_keys_with_key_not_in_orginial_dict(self):
+ def test_merge_module_keys_with_key_not_in_original_dict(self):
"""Test _merge_module_keys with the key does not exist in the dictionary
to be merged into.
"""
@@ -237,8 +236,8 @@
module_info_util._merge_module_keys(test_m_dict, test_b_dict)
self.assertEqual(_TEST_DEP_SRC_DICT, test_m_dict)
- def test_merge_module_keys_with_key_in_orginial_dict(self):
- """Test _merge_module_keys with with the key exists in the dictionary
+ def test_merge_module_keys_with_key_in_original_dict(self):
+ """Test _merge_module_keys with the key exists in the dictionary
to be merged into.
"""
test_b_dict = _TEST_SRCS_BAZ_DICT
@@ -251,7 +250,7 @@
set(test_m_dict['dependencies']))
def test_merge_module_keys_with_duplicated_item_dict(self):
- """Test _merge_module_keys with with the key exists in the dictionary
+ """Test _merge_module_keys with the key exists in the dictionary
to be merged into.
"""
test_b_dict = _TEST_CLASS_DICT
@@ -356,7 +355,7 @@
@mock.patch.object(common_util, 'get_blueprint_json_files_relative_dict')
def test_get_generated_json_files(self, mock_get_bp_dict):
- """Test _get_generated_json_files function with condictions,"""
+ """Test _get_generated_json_files function with conditions."""
a_env = 'GEN_A'
b_env = 'GEN_B'
a_file_path = 'a/b/path/to/a_file'
@@ -585,5 +584,6 @@
module_info_util._generate_rust_project_link()
self.assertTrue(mock_print.called)
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/lib/native_module_info.py b/aidegen/lib/native_module_info.py
index 0b9d340..06e57da 100644
--- a/aidegen/lib/native_module_info.py
+++ b/aidegen/lib/native_module_info.py
@@ -149,7 +149,8 @@
return True
return False
- def _is_source_need_build(self, mod_info):
+ @staticmethod
+ def _is_source_need_build(mod_info):
"""Checks if a module's source files need to be built.
If a module's source files contain a path looks like,
@@ -170,7 +171,8 @@
return True
return False
- def _is_include_need_build(self, mod_info):
+ @staticmethod
+ def _is_include_need_build(mod_info):
"""Checks if a module needs to be built by its module name.
If a module's include files contain a path looks like,
diff --git a/aidegen/lib/native_project_info.py b/aidegen/lib/native_project_info.py
index 138cfbb..0319d93 100644
--- a/aidegen/lib/native_project_info.py
+++ b/aidegen/lib/native_project_info.py
@@ -26,8 +26,7 @@
from aidegen.lib import project_info
-# pylint: disable=too-few-public-methods
-class NativeProjectInfo():
+class NativeProjectInfo:
"""Native project information.
Class attributes:
@@ -52,7 +51,7 @@
we should call 'm android.frameworks.bufferhub@1.0' to
generate the include header files in,
'android.frameworks.bufferhub@1.0_genc++_headers/gen'
- direcotry.
+ directory.
"""
self.module_names = [target] if self.modules_info.is_module(
target) else self.modules_info.get_module_names_in_targets_paths(
diff --git a/aidegen/lib/native_project_info_unittest.py b/aidegen/lib/native_project_info_unittest.py
index 1a5662b..3ffdacf 100644
--- a/aidegen/lib/native_project_info_unittest.py
+++ b/aidegen/lib/native_project_info_unittest.py
@@ -50,15 +50,16 @@
def test_generate_projects(self, mock_get_inst, mock_mod_info,
mock_get_need, mock_batch, mock_print,
mock_info):
- """Test initializing NativeProjectInfo woth different conditions."""
+ """Test initializing NativeProjectInfo with different conditions."""
target = 'libui'
config = mock.Mock()
mock_get_inst.return_value = config
config.is_skip_build = True
- nativeInfo = native_project_info.NativeProjectInfo
- nativeInfo.modules_info = mock.Mock()
- nativeInfo.modules_info.is_module.return_value = [True, True]
- nativeInfo.modules_info.is_module_need_build.return_value = [True, True]
+ native_info = native_project_info.NativeProjectInfo
+ native_info.modules_info = mock.Mock()
+ native_info.modules_info.is_module.return_value = [True, True]
+ native_info.modules_info.is_module_need_build.return_value = [True,
+ True]
native_project_info.NativeProjectInfo.generate_projects([target])
self.assertTrue(mock_mod_info.called)
self.assertTrue(mock_print.called)
@@ -68,7 +69,7 @@
mock_print.reset_mock()
mock_info.reset_mock()
config.is_skip_build = False
- nativeInfo.modules_info.is_module_need_build.return_value = [
+ native_info.modules_info.is_module_need_build.return_value = [
False, False]
mock_get_need.return_value = ['mod1', 'mod2']
native_project_info.NativeProjectInfo.generate_projects([target])
@@ -81,10 +82,11 @@
def test_get_need_builds_without_needed_build(self):
"""Test _get_need_builds method without needed build."""
targets = ['mod1', 'mod2']
- nativeInfo = native_project_info.NativeProjectInfo
- nativeInfo.modules_info = mock.Mock()
- nativeInfo.modules_info.is_module.return_value = [True, True]
- nativeInfo.modules_info.is_module_need_build.return_value = [True, True]
+ native_info = native_project_info.NativeProjectInfo
+ native_info.modules_info = mock.Mock()
+ native_info.modules_info.is_module.return_value = [True, True]
+ native_info.modules_info.is_module_need_build.return_value = [True,
+ True]
self.assertEqual(
set(targets),
native_project_info.NativeProjectInfo._get_need_builds(targets))
diff --git a/aidegen/lib/native_util.py b/aidegen/lib/native_util.py
index 2461b85..7723341 100644
--- a/aidegen/lib/native_util.py
+++ b/aidegen/lib/native_util.py
@@ -118,7 +118,7 @@
def _get_merged_native_target(cc_module_info, targets):
"""Gets merged native parent target from original native targets.
- If a target is a module, we put it directly into the new list. If a traget
+ If a target is a module, we put it directly into the new list. If a target
is a path we put all the native modules under the path into the new list.
Args:
@@ -205,7 +205,7 @@
Args:
path_to_module_info: A dictionary contains data of relative path as key
and module info dictionary as value.
- rel_path: A string of relative path of a directory to be check.
+ rel_path: A string of relative path of a directory to be checked.
Returns:
True if any C/C++ project exists otherwise False.
@@ -296,7 +296,7 @@
Args:
path: A string of a module's path to be checked.
- targets: A string of a target's path without os.sep in the end.
+ target: A string of a target's path without os.sep in the end.
Returns:
A boolean of True if path contains the path of target otherwise False.
diff --git a/aidegen/lib/native_util_unittest.py b/aidegen/lib/native_util_unittest.py
index 8175bcb..bf47e6a 100644
--- a/aidegen/lib/native_util_unittest.py
+++ b/aidegen/lib/native_util_unittest.py
@@ -28,7 +28,7 @@
# pylint: disable=protected-access
-# pylint: disable=invalid-name
+# pylint: disable=too-many-arguments
class AidegenNativeUtilUnittests(unittest.TestCase):
"""Unit tests for native_util.py"""
@@ -128,7 +128,6 @@
result = (new_parent, new_targets)
self.assertEqual(result, expected)
-
def test_filter_out_modules(self):
"""Test _filter_out_modules with conditions."""
targets = ['shared/path/to/be/used2']
diff --git a/aidegen/lib/project_config.py b/aidegen/lib/project_config.py
index eaa75d5..10b6b21 100644
--- a/aidegen/lib/project_config.py
+++ b/aidegen/lib/project_config.py
@@ -27,14 +27,14 @@
'the building process.')
_SKIP_BUILD_CMD = 'aidegen {} -s'
_SKIP_BUILD_WARN = (
- 'You choose "--skip-build". Skip building jar and module might increase '
+ 'You chose "--skip-build". Skip building jar and module might increase '
'the risk of the absence of some jar or R/AIDL/logtags java files and '
'cause the red lines to appear in IDE tool.')
_INSTANCE_NOT_EXIST_ERROR = ('The instance of {} does not exist. Please '
'initialize it before using.')
-class ProjectConfig():
+class ProjectConfig:
"""A singleton class manages AIDEGen's configurations.
ProjectConfig is a singleton class that can be accessed in other modules.
diff --git a/aidegen/lib/project_config_unittest.py b/aidegen/lib/project_config_unittest.py
index d1533c7..ce87b50 100644
--- a/aidegen/lib/project_config_unittest.py
+++ b/aidegen/lib/project_config_unittest.py
@@ -46,17 +46,17 @@
def test_check_whole_android_tree(self, mock_is_android_root):
"""Test _check_whole_android_tree with different conditions."""
mock_is_android_root.return_value = True
- exspected = [constant.WHOLE_ANDROID_TREE_TARGET]
+ expected = [constant.WHOLE_ANDROID_TREE_TARGET]
self.assertEqual(
- exspected, project_config._check_whole_android_tree([''], True))
+ expected, project_config._check_whole_android_tree([''], True))
self.assertEqual(
- exspected, project_config._check_whole_android_tree([''], False))
- exspected = constant.WHOLE_ANDROID_TREE_TARGET
+ expected, project_config._check_whole_android_tree([''], False))
+ expected = constant.WHOLE_ANDROID_TREE_TARGET
self.assertEqual(
- exspected, project_config._check_whole_android_tree(['a'], True)[0])
- exspected = ['a']
+ expected, project_config._check_whole_android_tree(['a'], True)[0])
+ expected = ['a']
self.assertEqual(
- exspected, project_config._check_whole_android_tree(['a'], False))
+ expected, project_config._check_whole_android_tree(['a'], False))
def test_init_with_no_launch_ide(self):
"""Test __init__ method without launching IDE."""
diff --git a/aidegen/lib/project_file_gen.py b/aidegen/lib/project_file_gen.py
index 4fd7fce..eae5167 100644
--- a/aidegen/lib/project_file_gen.py
+++ b/aidegen/lib/project_file_gen.py
@@ -207,7 +207,7 @@
"""Remove the token _ENABLE_DEBUGGER_MODULE_TOKEN.
Remove the token _ENABLE_DEBUGGER_MODULE_TOKEN in 2 cases:
- 1. Sub projects don't need to be filled in the enable debugger module
+ 1. Sub-projects don't need to be filled in the enable debugger module
so we remove the token here. For the main project, the enable
debugger module will be appended if it exists at the time launching
IDE.
@@ -226,10 +226,10 @@
def _merge_project_vcs_xmls(projects):
- """Merge sub projects' git paths into main project's vcs.xml.
+ """Merge sub-projects' git paths into main project's vcs.xml.
After all projects' vcs.xml are generated, collect the git path of each
- projects and write them into main project's vcs.xml.
+ project and write them into main project's vcs.xml.
Args:
projects: A list of ProjectInfo instances.
@@ -244,6 +244,7 @@
xml_gen.write_ignore_git_dirs_file(main_project_absolute_path,
ignore_gits)
+
def _get_all_git_path(root_path):
"""Traverse all subdirectories to get all git folder's path.
@@ -333,7 +334,7 @@
def gen_enable_debugger_module(module_abspath, android_sdk_version):
"""Generate the enable_debugger module under AIDEGen config folder.
- Skip generating the enable_debugger module in IntelliJ once the attemption
+ Skip generating the enable_debugger module in IntelliJ once the attempt
of getting the Android SDK version is failed.
Args:
diff --git a/aidegen/lib/project_file_gen_unittest.py b/aidegen/lib/project_file_gen_unittest.py
index b09f046..aee01e8 100644
--- a/aidegen/lib/project_file_gen_unittest.py
+++ b/aidegen/lib/project_file_gen_unittest.py
@@ -90,7 +90,7 @@
sample_module = common_util.read_file_content(self._MODULE_XML_SAMPLE)
self.assertEqual(test_module, sample_module)
- # Test for sub projects which only has self module.
+ # Test for sub-projects which only has self module.
try:
pfile_gen._generate_modules_xml()
project_file_gen.update_enable_debugger(self._ANDROID_PROJECT_PATH)
@@ -165,7 +165,7 @@
@mock.patch.object(shutil, 'copy')
@mock.patch.object(os.path, 'exists')
def test_generate_git_ignore(self, mock_path_exist, mock_copy,
- mock_loggin_error):
+ mock_logging_error):
"""Test _generate_git_ignore."""
mock_path_exist.return_value = True
project_file_gen._generate_git_ignore(
@@ -177,7 +177,7 @@
mock_copy.side_effect = OSError()
project_file_gen._generate_git_ignore(
common_util.get_aidegen_root_dir())
- self.assertTrue(mock_loggin_error.called)
+ self.assertTrue(mock_logging_error.called)
def test_filter_out_source_paths(self):
"""Test _filter_out_source_paths."""
diff --git a/aidegen/lib/project_info.py b/aidegen/lib/project_info.py
index 8001500..436baf2 100644
--- a/aidegen/lib/project_info.py
+++ b/aidegen/lib/project_info.py
@@ -46,7 +46,7 @@
# soong_ui.bash. We reserve 5000 characters for rewriting the command line
# in soong_ui.bash.
_CMD_LENGTH_BUFFER = 5000
-# For each argument, it need a space to separate following argument.
+# For each argument, it needs a space to separate following argument.
_BLANK_SIZE = 1
_CORE_MODULES = [constant.FRAMEWORK_ALL, constant.CORE_ALL,
'org.apache.http.legacy.stubs.system']
@@ -106,6 +106,7 @@
"""
modules_info = None
+ projects = []
def __init__(self, target=None, is_main_project=False):
"""ProjectInfo initialize.
@@ -126,7 +127,7 @@
self.project_relative_path = rel_path
self.project_absolute_path = abs_path
self.iml_path = ''
- self._set_default_modues()
+ self._set_default_modules()
self._init_source_path()
if target == constant.FRAMEWORK_ALL:
self.dep_modules = self.get_dep_modules([target])
@@ -137,10 +138,10 @@
self.iml_name = iml.IMLGenerator.get_unique_iml_name(abs_path)
self.rel_out_soong_jar_path = self._get_rel_project_out_soong_jar_path()
- def _set_default_modues(self):
+ def _set_default_modules(self):
"""Append default hard-code modules, source paths and jar files.
- 1. framework: Framework module is always needed for dependencies but it
+ 1. framework: Framework module is always needed for dependencies, but it
might not always be located by module dependency.
2. org.apache.http.legacy.stubs.system: The module can't be located
through module dependency. Without it, a lot of java files will have
@@ -157,7 +158,7 @@
'source_folder_path': set(),
'test_folder_path': set(),
'jar_path': set(),
- 'jar_module_path': dict(),
+ 'jar_module_path': {},
'r_java_path': set(),
'srcjar_path': set()
}
@@ -496,7 +497,8 @@
self._targets = targets
self.path_to_sources = {}
- def _clear_srcjar_paths(self, module):
+ @staticmethod
+ def _clear_srcjar_paths(module):
"""Clears the srcjar_paths.
Args:
@@ -526,7 +528,7 @@
# files under the frameworks/base except the framework-all. Because
# there are too many duplicate srcjars of modules under the
# frameworks/base. So that AIDEGen keeps the srcjar files only from
- # the framework-all module. Other modeuls' srcjar files will be
+ # the framework-all module. Other modules' srcjar files will be
# removed. However, when users choose the module base case, srcjar
# files will be collected by the ProjectInfo class, so that the
# removing srcjar_paths in this class does not impact the
@@ -605,8 +607,8 @@
"""
build_cmd = ['-k', '-j']
build_cmd.extend(list(targets))
- verbose = True
- if not atest_utils.build(build_cmd, verbose, _BUILD_BP_JSON_ENV_ON):
+ atest_utils.update_build_env(_BUILD_BP_JSON_ENV_ON)
+ if not atest_utils.build(build_cmd):
message = ('Build failed!\n{}\nAIDEGen will proceed but dependency '
'correctness is not guaranteed if not all targets being '
'built successfully.'.format('\n'.join(targets)))
diff --git a/aidegen/lib/project_info_unittest.py b/aidegen/lib/project_info_unittest.py
index 884cf8c..2b1fbc2 100644
--- a/aidegen/lib/project_info_unittest.py
+++ b/aidegen/lib/project_info_unittest.py
@@ -88,7 +88,6 @@
# pylint: disable=protected-access
-# pylint: disable=invalid-name
class ProjectInfoUnittests(unittest.TestCase):
"""Unit tests for project_info.py"""
@@ -142,7 +141,6 @@
unittest_constants.TEST_MODULE, unittest_constants.TEST_PATH),
unittest_constants.TEST_MODULE)
- # pylint: disable=too-many-locals
@mock.patch('logging.info')
@mock.patch.object(common_util, 'get_android_root_dir')
@mock.patch('atest.module_info.ModuleInfo')
@@ -179,8 +177,8 @@
self.assertTrue(mock_info.called)
# Test collects source and test folders.
- result_source = set(['packages/apps/test/src/main/java'])
- result_test = set(['packages/apps/test/tests'])
+ result_source = {'packages/apps/test/src/main/java'}
+ result_test = {'packages/apps/test/tests'}
self.assertEqual(project_info_obj.source_path['source_folder_path'],
result_source)
self.assertEqual(project_info_obj.source_path['test_folder_path'],
diff --git a/aidegen/lib/singleton_unittest.py b/aidegen/lib/singleton_unittest.py
index 7f62199..bdb3a1e 100644
--- a/aidegen/lib/singleton_unittest.py
+++ b/aidegen/lib/singleton_unittest.py
@@ -33,5 +33,6 @@
singleton_a2 = SingletonClass()
self.assertTrue(singleton_a1 is singleton_a2)
+
if __name__ == '__main__':
unittest.main()
diff --git a/aidegen/lib/source_locator.py b/aidegen/lib/source_locator.py
index 9c13ae3..c8c067c 100644
--- a/aidegen/lib/source_locator.py
+++ b/aidegen/lib/source_locator.py
@@ -71,7 +71,7 @@
referenced_by_jar: A boolean to check if the module is referenced by a
jar file.
build_targets: A set to keep the unique build target jar or srcjar file
- relative paths which are ready to be rebuld.
+ relative paths which are ready to be rebuilt.
missing_jars: A set to keep the jar file relative paths if it doesn't
exist.
specific_soong_path: A string of the relative path to the module's
@@ -225,7 +225,7 @@
return None
def _init_module_path(self):
- """Inintialize self.module_path."""
+ """Initialize self.module_path."""
self.module_path = (self.module_data[constant.KEY_PATH][0]
if self._check_key(constant.KEY_PATH) else '')
@@ -407,7 +407,7 @@
package_name: A string of package name.
"""
package_name = None
- with open(abs_java_path, encoding='utf8') as data:
+ with open(abs_java_path, 'r', encoding='utf8') as data:
for line in data.read().splitlines():
match = _PACKAGE_RE.match(line)
if match:
diff --git a/aidegen/lib/source_locator_unittest.py b/aidegen/lib/source_locator_unittest.py
index b8636a4..5fe46e7 100644
--- a/aidegen/lib/source_locator_unittest.py
+++ b/aidegen/lib/source_locator_unittest.py
@@ -31,7 +31,6 @@
# pylint: disable=too-many-arguments
# pylint: disable=protected-access
-# pylint: disable=invalid-name
class ModuleDataUnittests(unittest.TestCase):
"""Unit tests for module_data.py"""
@@ -106,27 +105,27 @@
unittest_constants.TEST_MODULE, unittest_constants.MODULE_INFO, 0)
# Test for aapt2.srcjar
test_aapt2_srcjar = 'a/aapt2.srcjar'
- expect_result = 'a/aapt2'
+ expected_result = 'a/aapt2'
r_dir = module_data._get_r_dir(test_aapt2_srcjar)
- self.assertEqual(r_dir, expect_result)
+ self.assertEqual(r_dir, expected_result)
# Test for R.srcjar
test_r_jar = 'b/android/R.srcjar'
- expect_result = 'b/aapt2/R'
+ expected_result = 'b/aapt2/R'
r_dir = module_data._get_r_dir(test_r_jar)
- self.assertEqual(r_dir, expect_result)
+ self.assertEqual(r_dir, expected_result)
# Test the R.srcjar is not under the android folder.
test_wrong_r_jar = 'b/test/R.srcjar'
- expect_result = None
+ expected_result = None
r_dir = module_data._get_r_dir(test_wrong_r_jar)
- self.assertEqual(r_dir, expect_result)
+ self.assertEqual(r_dir, expected_result)
# Test for the target file is not aapt2.srcjar or R.srcjar
test_unknown_target = 'c/proto.srcjar'
- expect_result = None
+ expected_result = None
r_dir = module_data._get_r_dir(test_unknown_target)
- self.assertEqual(r_dir, expect_result)
+ self.assertEqual(r_dir, expected_result)
@mock.patch('os.path.exists')
@mock.patch('aidegen.lib.common_util.get_android_root_dir')
@@ -141,16 +140,16 @@
test_module, 0)
# Test the module is not APPS.
module_data._collect_r_srcs_paths()
- expect_result = []
- self.assertEqual(module_data.r_java_paths, expect_result)
+ expected_result = []
+ self.assertEqual(module_data.r_java_paths, expected_result)
# Test the module is not a target module.
test_module['depth'] = 1
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 1)
module_data._collect_r_srcs_paths()
- expect_result = []
- self.assertEqual(module_data.r_java_paths, expect_result)
+ expected_result = []
+ self.assertEqual(module_data.r_java_paths, expected_result)
# Test the srcjar target doesn't exist.
test_module['class'] = ['APPS']
@@ -158,8 +157,8 @@
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 0)
module_data._collect_r_srcs_paths()
- expect_result = []
- self.assertEqual(module_data.r_java_paths, expect_result)
+ expected_result = []
+ self.assertEqual(module_data.r_java_paths, expected_result)
# Test the srcjar target exists.
test_module['srcjars'] = [('out/soong/.intermediates/packages/apps/'
@@ -167,58 +166,57 @@
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 0)
module_data._collect_r_srcs_paths()
- expect_result = [
+ expected_result = [
'out/soong/.intermediates/packages/apps/test_aapt2/aapt2'
]
- self.assertEqual(module_data.r_java_paths, expect_result)
+ self.assertEqual(module_data.r_java_paths, expected_result)
mock_exists.return_value = False
module_data._collect_r_srcs_paths()
- expect_result = set([('out/soong/.intermediates/packages/apps/'
- 'test_aapt2/aapt2.srcjar')])
- self.assertEqual(module_data.build_targets, expect_result)
-
+ expected_result = {('out/soong/.intermediates/packages/apps/'
+ 'test_aapt2/aapt2.srcjar')}
+ self.assertEqual(module_data.build_targets, expected_result)
def test_parse_source_path(self):
"""Test _parse_source_path."""
# The package name of e.java is c.d.
test_java = 'a/b/c/d/e.java'
package_name = 'c.d'
- expect_result = 'a/b'
+ expected_result = 'a/b'
src_path = source_locator.ModuleData._parse_source_path(
test_java, package_name)
- self.assertEqual(src_path, expect_result)
+ self.assertEqual(src_path, expected_result)
# The package name of e.java is c.d.
test_java = 'a/b/c.d/e.java'
package_name = 'c.d'
- expect_result = 'a/b'
+ expected_result = 'a/b'
src_path = source_locator.ModuleData._parse_source_path(
test_java, package_name)
- self.assertEqual(src_path, expect_result)
+ self.assertEqual(src_path, expected_result)
# The package name of e.java is x.y.
test_java = 'a/b/c/d/e.java'
package_name = 'x.y'
- expect_result = 'a/b/c/d'
+ expected_result = 'a/b/c/d'
src_path = source_locator.ModuleData._parse_source_path(
test_java, package_name)
- self.assertEqual(src_path, expect_result)
+ self.assertEqual(src_path, expected_result)
# The package name of f.java is c.d.
test_java = 'a/b/c.d/e/c/d/f.java'
package_name = 'c.d'
- expect_result = 'a/b/c.d/e'
+ expected_result = 'a/b/c.d/e'
src_path = source_locator.ModuleData._parse_source_path(
test_java, package_name)
- self.assertEqual(src_path, expect_result)
+ self.assertEqual(src_path, expected_result)
# The package name of f.java is c.d.e.
test_java = 'a/b/c.d/e/c.d/e/f.java'
package_name = 'c.d.e'
- expect_result = 'a/b/c.d/e'
+ expected_result = 'a/b/c.d/e'
src_path = source_locator.ModuleData._parse_source_path(
test_java, package_name)
- self.assertEqual(src_path, expect_result)
+ self.assertEqual(src_path, expected_result)
@mock.patch('aidegen.lib.common_util.get_android_root_dir')
def test_append_jar_file(self, mock_android_root_dir):
@@ -285,7 +283,6 @@
os.path.join(unittest_constants.MODULE_PATH, 'tests/'))
self.assertEqual(module_data.jar_files, [])
-
@mock.patch('aidegen.lib.common_util.get_android_root_dir')
def test_set_jars_jarfile(self, mock_android_root_dir):
"""Test _set_jars_jarfile handling."""
@@ -402,13 +399,13 @@
srcjar_path = 'a/b/aapt2.srcjar'
test_module = dict(unittest_constants.MODULE_INFO)
test_module['srcjars'] = [srcjar_path]
- expacted_result = [srcjar_path]
+ expected_result = [srcjar_path]
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 0)
module_data._collect_srcjar_path('R.java')
self.assertEqual(module_data.srcjar_paths, [])
module_data._collect_srcjar_path(srcjar_path)
- self.assertEqual(module_data.srcjar_paths, expacted_result)
+ self.assertEqual(module_data.srcjar_paths, expected_result)
@mock.patch('os.path.exists')
def test_collect_all_srcjar_path(self, mock_exists):
@@ -421,7 +418,7 @@
'a/b/aidl1.srcjar',
'a/b/aidl2.srcjar'
]
- expacted_result = [
+ expected_result = [
'a/b/aidl0.srcjar',
'a/b/aidl2.srcjar',
'a/b/aidl1.srcjar'
@@ -429,17 +426,15 @@
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 0)
module_data._collect_all_srcjar_paths()
- self.assertEqual(module_data.srcjar_paths, expacted_result)
-
+ self.assertEqual(module_data.srcjar_paths, expected_result)
mock_exists.return_value = False
test_module['srcjars'] = ['a/b/aidl0.srcjar']
- expacted_result = set(['a/b/aidl0.srcjar'])
+ expected_result = {'a/b/aidl0.srcjar'}
module_data = source_locator.ModuleData(unittest_constants.TEST_MODULE,
test_module, 0)
module_data._collect_all_srcjar_paths()
- self.assertEqual(module_data.build_targets, expacted_result)
-
+ self.assertEqual(module_data.build_targets, expected_result)
def test_collect_missing_jars(self):
"""Test _collect_missing_jars."""
diff --git a/aidegen/lib/xml_util_unittest.py b/aidegen/lib/xml_util_unittest.py
index 33da643..6dde405 100644
--- a/aidegen/lib/xml_util_unittest.py
+++ b/aidegen/lib/xml_util_unittest.py
@@ -25,7 +25,6 @@
from aidegen.lib import xml_util
-# pylint: disable=protected-access
class XMLUtilUnittests(unittest.TestCase):
"""Unit tests for xml_util.py"""
diff --git a/aidegen/project/__init__.py b/aidegen/project/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen/project/__init__.py
+++ /dev/null
diff --git a/aidegen/project/project_splitter.py b/aidegen/project/project_splitter.py
index a4daf9f..fdd0a1c 100644
--- a/aidegen/project/project_splitter.py
+++ b/aidegen/project/project_splitter.py
@@ -42,7 +42,9 @@
'libnativehelper', 'pdk', 'prebuilts', 'sdk', 'system',
'toolchain', 'tools', 'vendor', 'out', 'external',
'art/tools/ahat/src/test-dump',
- 'cts/common/device-side/device-info/src_stub']
+ 'cts/common/device-side/device-info/src_stub',
+ 'external/gson/gson/src/main/java'
+ ]
_PERMISSION_DEFINED_PATH = ('frameworks/base/core/res/framework-res/'
'android_common/gen/')
_ANDROID = 'android'
@@ -155,7 +157,7 @@
self._all_srcs[key] -= srcs[key]
def _remove_duplicate_sources(self):
- """Removes the duplicate source folders from each sub project.
+ """Removes the duplicate source folders from each sub-project.
Priority processing with the longest path length, e.g.
frameworks/base/packages/SettingsLib must have priority over
@@ -184,7 +186,7 @@
"""Gets the dependencies between the projects.
Check if the current project's source folder exists in other projects.
- If do, the current project is a dependency module to the other.
+ If so, the current project is a dependency module to the other.
"""
projects = sorted(self._projects, key=lambda k: len(
k.project_relative_path))
@@ -233,7 +235,6 @@
if self._full_repo:
mod[constant.KEY_DEPENDENCIES].append(self._full_repo_iml)
mod[constant.KEY_DEPENDENCIES].append(constant.KEY_DEPENDENCIES)
- srcjar_dict = dict()
permission_src = self._get_permission_defined_source_path()
if permission_src:
mod[constant.KEY_SRCS] = [permission_src]
@@ -316,7 +317,7 @@
1) If it's a aapt2/R type jar or other directory type sources, add them
into self._all_srcs[_KEY_SOURCE_PATH].
2) If it's an R.srcjar file, check if the same path of aapt2/R directory
- exists if so add aapt2/R path into into the
+ exists if so add aapt2/R path into the
self._all_srcs[_KEY_SOURCE_PATH], otherwise unzip R.srcjar into
the 'aidegen_r.srcjar' directory and add the unzipped path into
self._all_srcs[_KEY_SOURCE_PATH].
diff --git a/aidegen/project/project_splitter_unittest.py b/aidegen/project/project_splitter_unittest.py
index 1e9665c..3149823 100644
--- a/aidegen/project/project_splitter_unittest.py
+++ b/aidegen/project/project_splitter_unittest.py
@@ -55,7 +55,7 @@
'source_folder_path': {'src1', 'src2', 'other1'},
'test_folder_path': {'src1/tests'},
'jar_path': {'jar1.jar'},
- 'jar_module_path': dict(),
+ 'jar_module_path': {},
'r_java_path': set(),
'srcjar_path': {'srcjar1.srcjar'}
}
@@ -64,7 +64,7 @@
'source_folder_path': {'src2', 'src2/src3', 'src2/lib', 'other2'},
'test_folder_path': {'src2/tests'},
'jar_path': set(),
- 'jar_module_path': dict(),
+ 'jar_module_path': {},
'r_java_path': set(),
'srcjar_path': {'srcjar2.srcjar'}
}
@@ -73,7 +73,7 @@
'source_folder_path': {'src2/src3', 'src2/lib'},
'test_folder_path': {'src2/src3/tests'},
'jar_path': {'jar3.jar'},
- 'jar_module_path': dict(),
+ 'jar_module_path': {},
'r_java_path': set(),
'srcjar_path': {'srcjar3.srcjar'}
}
@@ -82,7 +82,7 @@
'source_folder_path': set(),
'test_folder_path': set(),
'jar_path': set(),
- 'jar_module_path': dict(),
+ 'jar_module_path': {},
'r_java_path': set(),
'srcjar_path': {'framework.srcjar', 'other.srcjar'}
}
@@ -196,7 +196,7 @@
'_remove_permission_definition_srcjar_path')
@mock.patch.object(common_util, 'get_android_root_dir')
def test_gen_framework_srcjars_iml(
- self, mock_root, mock_remove, mock_get, mock_create_iml):
+ self, mock_root, mock_remove, mock_get, mock_create_iml):
"""Test gen_framework_srcjars_iml."""
mock_root.return_value = self._TEST_DIR
mock_get.return_value = 'aapt2/R'
@@ -289,7 +289,7 @@
parent_sources = ['a/b/c/d/e', 'a/b/e/f']
result = project_splitter._remove_child_duplicate_sources_from_parent(
child, parent_sources, root)
- self.assertEqual(set(['a/b/c/d/e']), result)
+ self.assertEqual({'a/b/c/d/e'}, result)
@mock.patch('os.path.relpath')
def test_get_rel_project_soong_paths(self, mock_rel):
@@ -347,7 +347,7 @@
@mock.patch.object(project_splitter, '_get_permission_r_srcjar_rel_path')
@mock.patch.object(project_splitter, '_get_permission_aapt2_rel_path')
def test_remove_permission_definition_srcjar_path(
- self, mock_get_aapt2, mock_get_r_srcjar):
+ self, mock_get_aapt2, mock_get_r_srcjar):
"""Test _remove_permission_definition_srcjar_path with conditions."""
expected_srcjars = [
'other.srcjar',
@@ -379,7 +379,8 @@
@mock.patch('os.path.isfile')
@mock.patch('os.path.isdir')
def test_get_permission_defined_source_path(
- self, mock_is_dir, mock_is_file, mock_rmtree, mock_unzip, mock_join):
+ self, mock_is_dir, mock_is_file, mock_rmtree, mock_unzip,
+ mock_join):
"""Test _get_permission_defined_source_path function."""
mock_is_dir.return_value = True
self.split_projs._get_permission_defined_source_path()
@@ -400,7 +401,8 @@
@mock.patch('os.path.dirname')
@mock.patch('os.path.isdir')
def test_unzip_all_scrjars(
- self, mock_is_dir, mock_dirname, mock_join, mock_rmtree, mock_unzip):
+ self, mock_is_dir, mock_dirname, mock_join, mock_rmtree,
+ mock_unzip):
"""Test _unzip_all_scrjars function."""
mock_is_dir.return_value = True
self.split_projs._unzip_all_scrjars()
diff --git a/aidegen/sdk/__init__.py b/aidegen/sdk/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen/sdk/__init__.py
+++ /dev/null
diff --git a/aidegen/sdk/android_sdk.py b/aidegen/sdk/android_sdk.py
index 5b7d3b6..d827246 100644
--- a/aidegen/sdk/android_sdk.py
+++ b/aidegen/sdk/android_sdk.py
@@ -40,23 +40,33 @@
"""Configures API level from the Android SDK path.
Attributes:
- _android_sdk_path: The path to the Android SDK, None if the Android SDK
- doesn't exist.
+ _android_sdk_path: The path to the Android SDK,
+ None if the Android SDK doesn't exist.
_max_api_level: An integer, the max API level in the platforms folder.
_max_code_name: A string, the code name of the max API level.
+ _max_folder_name: A string, the folder name corresponding
+ to the max API level.
_platform_mapping: A dictionary of Android platform versions mapping to
- the API level and the Android version code name.
- e.g.
+ the API level and the Android version code name. e.g.
{
- 'android-29': {'api_level': 29, 'code_name': '29'},
- 'android-Q': {'api_level': 29, 'code_name': 'Q'}
+ 'android-29': {
+ 'api_level': 29,
+ 'code_name': '29',
+ },
+ 'android-Q': {
+ 'api_level': 29,
+ 'code_name': 'Q',
+ }
}
"""
_API_LEVEL = 'api_level'
_CODE_NAME = 'code_name'
- _RE_API_LEVEL = re.compile(r'AndroidVersion.ApiLevel=(?P<api_level>[\d]+)')
- _RE_CODE_NAME = re.compile(r'AndroidVersion.CodeName=(?P<code_name>[A-Z])')
+ _FOLDER_NAME = 'folder_name'
+ _RE_API_LEVEL = re.compile(
+ r'AndroidVersion\.ApiLevel=(?P<api_level>[\d]+)')
+ _RE_CODE_NAME = re.compile(
+ r'AndroidVersion\.CodeName=(?P<code_name>[a-zA-Z]+)')
_GLOB_PROPERTIES_FILE = os.path.join('platforms', 'android-*',
'source.properties')
_INPUT_QUERY_TIMES = 3
@@ -75,6 +85,7 @@
"""Initializes AndroidSDK."""
self._max_api_level = 0
self._max_code_name = None
+ self._max_folder_name = None
self._platform_mapping = {}
self._android_sdk_path = None
@@ -89,6 +100,11 @@
return self._max_code_name
@property
+ def max_folder_name(self):
+ """Gets the max folder name."""
+ return self._max_folder_name
+
+ @property
def platform_mapping(self):
"""Gets the Android platform mapping."""
return self._platform_mapping
@@ -121,6 +137,20 @@
code_name = data[self._CODE_NAME]
return code_name
+ def _get_max_folder_name(self):
+ """Gets the max folder name from self._platform_mapping.
+
+ Returns:
+ A string of the folder name corresponding to the max API level.
+ """
+ folder_name = ''
+ for platform, data in self._platform_mapping.items():
+ if (data[self._API_LEVEL] == self.max_api_level
+ and data[self._CODE_NAME] == self._max_code_name):
+ folder_name = platform
+ break
+ return folder_name
+
def _parse_api_info(self, properties_file):
"""Parses the API information from the source.properties file.
@@ -189,13 +219,14 @@
self._android_sdk_path = path
self._max_api_level = self._parse_max_api_level()
self._max_code_name = self._parse_max_code_name()
+ self._max_folder_name = self._get_max_folder_name()
return True
return False
def path_analysis(self, sdk_path):
"""Analyses the Android SDK path.
- Confirm the path is a Android SDK folder. If it's not correct, ask user
+ Confirm the path is an Android SDK folder. If it's not correct, ask user
to enter a new one. Skip asking when enter nothing.
Args:
diff --git a/aidegen/sdk/android_sdk_unittest.py b/aidegen/sdk/android_sdk_unittest.py
index b0c1ae4..78bc055 100644
--- a/aidegen/sdk/android_sdk_unittest.py
+++ b/aidegen/sdk/android_sdk_unittest.py
@@ -39,6 +39,7 @@
"""Test initialize the attributes."""
self.assertEqual(self.sdk.max_api_level, 0)
self.assertEqual(self.sdk.max_code_name, None)
+ self.assertEqual(self.sdk.max_folder_name, None)
self.assertEqual(self.sdk.platform_mapping, {})
self.assertEqual(self.sdk.android_sdk_path, None)
@@ -99,6 +100,37 @@
code_name = self.sdk._parse_max_code_name()
self.assertEqual(code_name, 'Q')
+ def test_get_max_folder_name(self):
+ """Test _get_max_folder_name."""
+ self.sdk._max_api_level = 29
+ self.sdk._max_code_name = '29'
+ self.sdk._platform_mapping = {
+ 'android-29': {
+ 'api_level': 29,
+ 'code_name': '29',
+ },
+ 'android-28': {
+ 'api_level': 28,
+ 'code_name': '28',
+ },
+ }
+ max_folder_name = self.sdk._get_max_folder_name()
+ self.assertEqual(max_folder_name, 'android-29')
+
+ self.sdk._max_code_name = 'Q'
+ self.sdk._platform_mapping = {
+ 'android-29': {
+ 'api_level': 29,
+ 'code_name': '29',
+ },
+ 'android-Q': {
+ 'api_level': 29,
+ 'code_name': 'Q',
+ },
+ }
+ max_folder_name = self.sdk._get_max_folder_name()
+ self.assertEqual(max_folder_name, 'android-Q')
+
@mock.patch.object(common_util, 'read_file_content')
def test_parse_api_info(self, mock_read_file):
"""Test _parse_api_info."""
diff --git a/aidegen/sdk/jdk_table.py b/aidegen/sdk/jdk_table.py
index 21de99e..d332594 100644
--- a/aidegen/sdk/jdk_table.py
+++ b/aidegen/sdk/jdk_table.py
@@ -17,7 +17,7 @@
"""Configs the jdk.table.xml.
In order to enable the feature "Attach debugger to Android process" in Android
-Studio or IntelliJ, AIDEGen needs the JDK and Android SDK been set-up. The class
+Studio or IntelliJ, AIDEGen needs the JDK and Android SDK been set up. The class
JDKTableXML parses the jdk.table.xml to find the existing JDK and Android SDK
information. If they do not exist, AIDEGen will create them.
@@ -44,7 +44,7 @@
from aidegen.sdk import android_sdk
-class JDKTableXML():
+class JDKTableXML:
"""Configs jdk.table.xml for IntelliJ and Android Studio.
Attributes:
@@ -70,7 +70,7 @@
_ADDITIONAL = 'additional'
_ANDROID_SDK = 'Android SDK'
_JAVA_SDK = 'JavaSDK'
- _JDK_VERSION = 'JDK18'
+ _JDK_VERSION = 'JDK17'
_APPLICATION = 'application'
_COMPONENT = 'component'
_PROJECTJDKTABLE = 'ProjectJdkTable'
@@ -162,11 +162,11 @@
return True
return False
- def _check_jdk18_in_xml(self):
- """Checks if the JDK18 is already set in jdk.table.xml.
+ def _check_jdk17_in_xml(self):
+ """Checks if the JDK17 is already set in jdk.table.xml.
Returns:
- Boolean: True if the JDK18 exists else False.
+ Boolean: True if the JDK17 exists else False.
"""
for jdk in self._xml.iter(self._JDK):
_name = jdk.find(self._NAME)
@@ -185,7 +185,7 @@
and platform version.
1. Check if the Android SDK path is valid.
2. Check if the platform version exists in the Android SDK.
- The Android SDK version can be used to generate enble_debugger module
+ The Android SDK version can be used to generate enable_debugger module
when condition 1 and 2 are true.
Returns:
@@ -230,7 +230,7 @@
def _generate_jdk_config_string(self):
"""Generates the default JDK configuration."""
- if self._check_jdk18_in_xml():
+ if self._check_jdk17_in_xml():
return
self._append_config(self._jdk_content.format(JDKpath=self._jdk_path))
self._modify_config = True
@@ -244,6 +244,7 @@
# abandoning the sdk_config.py.
self._append_config(templates.ANDROID_SDK_XML.format(
ANDROID_SDK_PATH=self._sdk.android_sdk_path,
+ FOLDER_NAME=self._sdk.max_folder_name,
CODE_NAME=self._sdk.max_code_name))
self._android_sdk_version = self._ANDROID_SDK_VERSION.format(
CODE_NAME=self._sdk.max_code_name)
@@ -257,10 +258,10 @@
def config_jdk_table_xml(self):
"""Configures the jdk.table.xml.
- 1. Generate the JDK18 configuration if it does not exist.
+ 1. Generate the JDK17 configuration if it does not exist.
2. Generate the Android SDK configuration if it does not exist and
save the Android SDK path.
- 3. Update the jdk.table.xml if AIDEGen needs to append JDK18 or
+ 3. Update the jdk.table.xml if AIDEGen needs to append JDK17 or
Android SDK configuration.
Returns:
diff --git a/aidegen/sdk/jdk_table_unittest.py b/aidegen/sdk/jdk_table_unittest.py
index b38af61..7a55edd 100644
--- a/aidegen/sdk/jdk_table_unittest.py
+++ b/aidegen/sdk/jdk_table_unittest.py
@@ -40,14 +40,15 @@
_CONFIG_FILE = '/path/to/jdk.table.xml'
_JDK_CONTENT = '<jdk />'
_JDK_PATH = '/path/to/JDK'
- _DEFULAT_ANDROID_SDK_PATH = '/path/to/Android/SDK'
+ _DEFAULT_ANDROID_SDK_PATH = '/path/to/Android/SDK'
+ _TEST_DIR = None
def setUp(self):
"""Prepare the JDKTableXML class."""
JDKTableXMLUnittests._TEST_DIR = tempfile.mkdtemp()
self.jdk_table_xml = jdk_table.JDKTableXML(
self._CONFIG_FILE, self._JDK_CONTENT, self._JDK_PATH,
- self._DEFULAT_ANDROID_SDK_PATH)
+ self._DEFAULT_ANDROID_SDK_PATH)
def tearDown(self):
"""Clear the JDKTableXML class."""
@@ -79,7 +80,7 @@
"""Test _check_structure."""
tmp_file = os.path.join(self._TEST_DIR, self._JDK_TABLE_XML)
xml_str = ('<application>\n</application>')
- with open(tmp_file, 'w') as tmp_jdk_xml:
+ with open(tmp_file, 'w', encoding='utf-8') as tmp_jdk_xml:
tmp_jdk_xml.write(xml_str)
self.jdk_table_xml._xml = ElementTree.parse(tmp_file)
self.assertFalse(self.jdk_table_xml._check_structure())
@@ -87,7 +88,7 @@
' <component>\n'
' </component>\n'
'</application>')
- with open(tmp_file, 'w') as tmp_jdk_xml:
+ with open(tmp_file, 'w', encoding='utf-8') as tmp_jdk_xml:
tmp_jdk_xml.write(xml_str)
self.jdk_table_xml._xml = ElementTree.parse(tmp_file)
self.assertFalse(self.jdk_table_xml._check_structure())
@@ -95,12 +96,12 @@
' <component name="ProjectJdkTable">\n'
' </component>\n'
'</application>')
- with open(tmp_file, 'w') as tmp_jdk_xml:
+ with open(tmp_file, 'w', encoding='utf-8') as tmp_jdk_xml:
tmp_jdk_xml.write(xml_str)
self.jdk_table_xml._xml = ElementTree.parse(tmp_file)
self.assertTrue(self.jdk_table_xml._check_structure())
- @mock.patch.object(jdk_table.JDKTableXML, '_check_jdk18_in_xml')
+ @mock.patch.object(jdk_table.JDKTableXML, '_check_jdk17_in_xml')
def test_generate_jdk_config_string(self, mock_jdk_exists):
"""Test _generate_jdk_config_string."""
mock_jdk_exists.return_value = True
@@ -134,7 +135,7 @@
b' </component>\n'
b'</application>')
tmp_file = os.path.join(self._TEST_DIR, self._JDK_TABLE_XML)
- with open(tmp_file, 'w') as tmp_jdk_xml:
+ with open(tmp_file, 'w', encoding='utf-8') as tmp_jdk_xml:
tmp_jdk_xml.write(xml_str)
self.jdk_table_xml._xml = ElementTree.parse(tmp_file)
self.jdk_table_xml._generate_jdk_config_string()
@@ -170,19 +171,19 @@
mock_override.return_value = True
self.assertTrue(mock_gen_jdk.called)
- def test_check_jdk18_in_xml(self):
- """Test _check_jdk18_in_xml."""
- xml_str = ('<test><jdk><name value="JDK18" /><type value="JavaSDK" />'
+ def test_check_jdk17_in_xml(self):
+ """Test _check_jdk17_in_xml."""
+ xml_str = ('<test><jdk><name value="JDK17" /><type value="JavaSDK" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
- self.assertTrue(self.jdk_table_xml._check_jdk18_in_xml())
+ self.assertTrue(self.jdk_table_xml._check_jdk17_in_xml())
xml_str = ('<test><jdk><name value="test" /><type value="JavaSDK" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
- self.assertFalse(self.jdk_table_xml._check_jdk18_in_xml())
+ self.assertFalse(self.jdk_table_xml._check_jdk17_in_xml())
xml_str = ('<test><jdk><name value="test" /></jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
- self.assertFalse(self.jdk_table_xml._check_jdk18_in_xml())
+ self.assertFalse(self.jdk_table_xml._check_jdk17_in_xml())
@mock.patch.object(android_sdk.AndroidSDK, 'is_android_sdk_path')
def test_check_android_sdk_in_xml(self, mock_is_android_sdk):
@@ -194,27 +195,27 @@
},
}
mock_is_android_sdk.return_value = True
- xml_str = ('<test><jdk><name value="JDK18" /><type value="JavaSDK" />'
+ xml_str = ('<test><jdk><name value="JDK17" /><type value="JavaSDK" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
self.assertFalse(self.jdk_table_xml._check_android_sdk_in_xml())
xml_str = ('<test><jdk><name value="Android SDK 29 platform" />'
'<type value="Android SDK" />'
- '<additional jdk="JDK18" sdk="android-29" />'
+ '<additional jdk="JDK17" sdk="android-29" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
self.assertFalse(self.jdk_table_xml._check_android_sdk_in_xml())
xml_str = ('<test><jdk><name value="Android SDK 28 platform" />'
'<type value="Android SDK" />'
'<homePath value="/path/to/Android/SDK" />'
- '<additional jdk="JDK18" sdk="android-28" />'
+ '<additional jdk="JDK17" sdk="android-28" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
self.assertFalse(self.jdk_table_xml._check_android_sdk_in_xml())
xml_str = ('<test><jdk><name value="Android SDK 29 platform" />'
'<type value="Android SDK" />'
'<homePath value="/path/to/Android/SDK" />'
- '<additional jdk="JDK18" sdk="android-29" />'
+ '<additional jdk="JDK17" sdk="android-29" />'
'</jdk></test>')
self.jdk_table_xml._xml = ElementTree.fromstring(xml_str)
self.assertTrue(self.jdk_table_xml._check_android_sdk_in_xml())
diff --git a/aidegen/templates.py b/aidegen/templates.py
index a45557d..a1a901a 100644
--- a/aidegen/templates.py
+++ b/aidegen/templates.py
@@ -15,8 +15,11 @@
# limitations under the License.
"""The iml/xml templates of AIDEgen."""
+# pylint: disable=line-too-long
+
# Content of iml file.
-FILE_IML = """<?xml version="1.0" encoding="UTF-8"?>
+FILE_IML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
@FACETS@
<component name="NewModuleRootManager" inherit-compiler-output="true">
@@ -30,7 +33,8 @@
</module>
"""
# TODO(b/153704028): Refactor to create iml file.
-IML = """<?xml version="1.0" encoding="UTF-8"?>
+IML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">{FACET}
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />{SOURCES}
@@ -70,7 +74,8 @@
<orderEntry type="module" module-name="{MODULE}" />"""
# The template content of modules.xml.
-XML_MODULES = """<?xml version="1.0" encoding="UTF-8"?>
+XML_MODULES = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
@@ -82,7 +87,8 @@
"""
# The template content of vcs.xml.
-XML_VCS = """<?xml version="1.0" encoding="UTF-8"?>
+XML_VCS = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
{GIT_MAPPINGS}
@@ -91,7 +97,8 @@
"""
# The template content of misc.xml
-XML_MISC = """<?xml version="1.0" encoding="UTF-8"?>
+XML_MISC = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ConfigCheckProjectState">
<option name="disabledCheckers">
@@ -138,16 +145,16 @@
<default-html-doctype>http://www.w3.org/1999/xhtml
</default-html-doctype>
</component>
- <component name="ProjectRootManager" version="2" languageLevel="JDK_1_8"
- assert-keyword="true" project-jdk-name="JDK18"
+ <component name="ProjectRootManager" version="2" languageLevel="JDK_17"
+ assert-keyword="true" project-jdk-name="JDK17"
project-jdk-type="JavaSDK"/>
<component name="WebServicesPlugin" addRequiredLibraries="true"/>
</project>
-
"""
# The template content of compiler.xml
-XML_COMPILER = """<?xml version="1.0" encoding="UTF-8"?>
+XML_COMPILER = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<option name="DEFAULT_COMPILER" value="Javac"/>
@@ -173,7 +180,8 @@
"""
# The template content of codeStyleConfig.xml
-XML_CODE_STYLE_CONFIG = """<component name="ProjectCodeStyleConfiguration">
+XML_CODE_STYLE_CONFIG = """\
+<component name="ProjectCodeStyleConfiguration">
<state>
<option name="USE_PER_PROJECT_SETTINGS" value="true" />
</state>
@@ -181,7 +189,8 @@
"""
# The template content of Apache_2.xml
-XML_APACHE_2 = """<component name="CopyrightManager">
+XML_APACHE_2 = """\
+<component name="CopyrightManager">
<copyright>
<option name="notice"
value="Copyright (C) &#36;today.year The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."/>
@@ -194,7 +203,8 @@
"""
# The template content of copyright/profiles_settings.xml
-XML_COPYRIGHT_PROFILES_SETTINGS = """<component name="CopyrightManager">
+XML_COPYRIGHT_PROFILES_SETTINGS = """\
+<component name="CopyrightManager">
<settings default="">
<module2copyright>
<element module="Project Files" copyright="Apache 2"/>
@@ -204,7 +214,8 @@
"""
# The template content of inspectionProfiles/profiles_settings.xml
-XML_INSPECTION_PROFILES_SETTINGS = """<component name="InspectionProjectProfileManager">
+XML_INSPECTION_PROFILES_SETTINGS = """\
+<component name="InspectionProjectProfileManager">
<settings>
<option name="PROJECT_PROFILE" value="Aidegen_Inspections" />
<version value="1.0" />
@@ -215,7 +226,8 @@
# The template content of inspectionProfiles/Aidegen_Inspections.xml
# N.b. this minimal configuration leaves most of the options unspecified,
# which means that they will be filled with default values set by Jetbrains.
-XML_INSPECTIONS = """<component name="InspectionProjectProfileManager">
+XML_INSPECTIONS = """\
+<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Aidegen_Inspections" />
<inspection_tool class="JavaDoc" enabled="true" level="WARNING" enabled_by_default="true">
@@ -229,34 +241,90 @@
"""
# The configuration of JDK on Linux.
-LINUX_JDK_XML = """ <jdk version="2">
- <name value="JDK18" />
+LINUX_JDK_XML = """\
+ <jdk version="2">
+ <name value="JDK17" />
<type value="JavaSDK" />
- <version value="java version "1.8.0_152"" />
+ <version value="java version "17.0.4"" />
<homePath value="{JDKpath}" />
<roots>
<annotationsPath>
<root type="composite">
- <root url="jar://$APPLICATION_HOME_DIR$/lib/jdkAnnotations.jar!/" type="simple" />
+ <root url="jar://$APPLICATION_HOME_DIR$/plugins/java/lib/jdkAnnotations.jar!/" type="simple" />
</root>
</annotationsPath>
<classPath>
<root type="composite">
- <root url="jar://{JDKpath}/jre/lib/charsets.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/cldrdata.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/dnsns.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/jaccess.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/localedata.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/nashorn.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunec.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunjce_provider.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunpkcs11.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/zipfs.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/jce.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/jsse.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/management-agent.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/resources.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/rt.jar!/" type="simple" />
+ <root url="jrt://{JDKpath}!/java.base" type="simple" />
+ <root url="jrt://{JDKpath}!/java.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/java.datatransfer" type="simple" />
+ <root url="jrt://{JDKpath}!/java.desktop" type="simple" />
+ <root url="jrt://{JDKpath}!/java.instrument" type="simple" />
+ <root url="jrt://{JDKpath}!/java.logging" type="simple" />
+ <root url="jrt://{JDKpath}!/java.management" type="simple" />
+ <root url="jrt://{JDKpath}!/java.management.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/java.naming" type="simple" />
+ <root url="jrt://{JDKpath}!/java.net.http" type="simple" />
+ <root url="jrt://{JDKpath}!/java.prefs" type="simple" />
+ <root url="jrt://{JDKpath}!/java.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/java.scripting" type="simple" />
+ <root url="jrt://{JDKpath}!/java.se" type="simple" />
+ <root url="jrt://{JDKpath}!/java.security.jgss" type="simple" />
+ <root url="jrt://{JDKpath}!/java.security.sasl" type="simple" />
+ <root url="jrt://{JDKpath}!/java.smartcardio" type="simple" />
+ <root url="jrt://{JDKpath}!/java.sql" type="simple" />
+ <root url="jrt://{JDKpath}!/java.sql.rowset" type="simple" />
+ <root url="jrt://{JDKpath}!/java.transaction.xa" type="simple" />
+ <root url="jrt://{JDKpath}!/java.xml" type="simple" />
+ <root url="jrt://{JDKpath}!/java.xml.crypto" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.accessibility" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.attach" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.charsets" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.crypto.ec" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.dynalink" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.editpad" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.hotspot.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.httpserver" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.incubator.foreign" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.incubator.vector" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.ed" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.jvmstat" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.le" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.opt" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.ci" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.compiler.management" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jartool" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.javadoc" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jcmd" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jconsole" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdeps" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdi" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdwp.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jfr" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jlink" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jpackage" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jshell" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jsobject" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jstatd" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.localedata" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management.jfr" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.naming.dns" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.naming.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.net" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.nio.mapmode" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.random" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.sctp" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.security.auth" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.security.jgss" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.unsupported" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.unsupported.desktop" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.xml.dom" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.zipfs" type="simple" />
</root>
</classPath>
<javadocPath>
@@ -264,7 +332,76 @@
</javadocPath>
<sourcePath>
<root type="composite">
- <root url="jar://{JDKpath}/src.zip!/" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.se" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jfr" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.net" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.sql" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.xml" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jcmd" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.sctp" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.base" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdeps" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jlink" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.zipfs" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.prefs" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.attach" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jshell" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jstatd" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.random" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.naming" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.editpad" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jartool" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.javadoc" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.xml.dom" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.desktop" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.logging" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.charsets" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.dynalink" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jconsole" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jpackage" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jsobject" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.net.http" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.crypto.ec" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.scripting" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.httpserver" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdwp.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.localedata" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.naming.dns" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.naming.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.instrument" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.management" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.sql.rowset" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.xml.crypto" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.ed" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.le" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.nio.mapmode" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.unsupported" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.smartcardio" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.opt" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.datatransfer" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.accessibility" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.hotspot.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.security.auth" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.security.jgss" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.security.jgss" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.security.sasl" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.ci" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management.jfr" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.management.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.transaction.xa" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.incubator.vector" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.jvmstat" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.incubator.foreign" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.unsupported.desktop" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.compiler.management" type="simple" />
</root>
</sourcePath>
</roots>
@@ -273,41 +410,90 @@
"""
# The configuration of JDK on Mac.
-MAC_JDK_XML = """ <jdk version="2">
- <name value="JDK18" />
+MAC_JDK_XML = """\
+ <jdk version="2">
+ <name value="JDK17" />
<type value="JavaSDK" />
- <version value="java version "1.8.0_152"" />
+ <version value="java version "17.0.4"" />
<homePath value="{JDKpath}" />
<roots>
<annotationsPath>
<root type="composite">
- <root url="jar://$APPLICATION_HOME_DIR$/lib/jdkAnnotations.jar!/" type="simple" />
+ <root url="jar://$APPLICATION_HOME_DIR$/plugins/java/lib/jdkAnnotations.jar!/" type="simple" />
</root>
</annotationsPath>
<classPath>
<root type="composite">
- <root url="jar://{JDKpath}/jre/lib/charsets.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/cldrdata.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/dnsns.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/jaccess.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/localedata.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/nashorn.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunec.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunjce_provider.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/sunpkcs11.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/ext/zipfs.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/jce.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/jsse.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/management-agent.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/resources.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/rt.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/management-agent.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/resources.jar!/" type="simple" />
- <root url="jar://{JDKpath}/jre/lib/rt.jar!/" type="simple" />
- <root url="jar://{JDKpath}/lib/dt.jar!/" type="simple" />
- <root url="jar://{JDKpath}/lib/jconsole.jar!/" type="simple" />
- <root url="jar://{JDKpath}/lib/sa-jdi.jar!/" type="simple" />
- <root url="jar://{JDKpath}/lib/tools.jar!/" type="simple" />
+ <root url="jrt://{JDKpath}!/java.base" type="simple" />
+ <root url="jrt://{JDKpath}!/java.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/java.datatransfer" type="simple" />
+ <root url="jrt://{JDKpath}!/java.desktop" type="simple" />
+ <root url="jrt://{JDKpath}!/java.instrument" type="simple" />
+ <root url="jrt://{JDKpath}!/java.logging" type="simple" />
+ <root url="jrt://{JDKpath}!/java.management" type="simple" />
+ <root url="jrt://{JDKpath}!/java.management.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/java.naming" type="simple" />
+ <root url="jrt://{JDKpath}!/java.net.http" type="simple" />
+ <root url="jrt://{JDKpath}!/java.prefs" type="simple" />
+ <root url="jrt://{JDKpath}!/java.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/java.scripting" type="simple" />
+ <root url="jrt://{JDKpath}!/java.se" type="simple" />
+ <root url="jrt://{JDKpath}!/java.security.jgss" type="simple" />
+ <root url="jrt://{JDKpath}!/java.security.sasl" type="simple" />
+ <root url="jrt://{JDKpath}!/java.smartcardio" type="simple" />
+ <root url="jrt://{JDKpath}!/java.sql" type="simple" />
+ <root url="jrt://{JDKpath}!/java.sql.rowset" type="simple" />
+ <root url="jrt://{JDKpath}!/java.transaction.xa" type="simple" />
+ <root url="jrt://{JDKpath}!/java.xml" type="simple" />
+ <root url="jrt://{JDKpath}!/java.xml.crypto" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.accessibility" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.attach" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.charsets" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.crypto.ec" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.dynalink" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.editpad" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.hotspot.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.httpserver" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.incubator.foreign" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.incubator.vector" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.ed" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.jvmstat" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.le" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.opt" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.ci" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.internal.vm.compiler.management" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jartool" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.javadoc" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jcmd" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jconsole" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdeps" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdi" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jdwp.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jfr" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jlink" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jpackage" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jshell" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jsobject" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.jstatd" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.localedata" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management.agent" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.management.jfr" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.naming.dns" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.naming.rmi" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.net" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.nio.mapmode" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.random" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.sctp" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.security.auth" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.security.jgss" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.unsupported" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.unsupported.desktop" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.xml.dom" type="simple" />
+ <root url="jrt://{JDKpath}!/jdk.zipfs" type="simple" />
</root>
</classPath>
<javadocPath>
@@ -315,7 +501,76 @@
</javadocPath>
<sourcePath>
<root type="composite">
- <root url="jar://{JDKpath}/src.zip!/" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.se" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jfr" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.net" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.sql" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.xml" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jcmd" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.sctp" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.base" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdeps" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jlink" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.zipfs" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.prefs" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.attach" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jshell" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jstatd" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.random" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.naming" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.editpad" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jartool" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.javadoc" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.xml.dom" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.desktop" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.logging" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.charsets" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.dynalink" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jconsole" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jpackage" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jsobject" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.net.http" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.crypto.ec" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.scripting" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.httpserver" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.jdwp.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.localedata" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.naming.dns" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.naming.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.instrument" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.management" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.sql.rowset" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.xml.crypto" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.ed" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.le" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.nio.mapmode" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.unsupported" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.smartcardio" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.opt" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.datatransfer" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.accessibility" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.hotspot.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.security.auth" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.security.jgss" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.security.jgss" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.security.sasl" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.ci" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management.jfr" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.management.rmi" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/java.transaction.xa" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.incubator.vector" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.jvmstat" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.management.agent" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.incubator.foreign" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.unsupported.desktop" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jar://{JDKpath}/lib/src.zip!/jdk.internal.vm.compiler.management" type="simple" />
</root>
</sourcePath>
</roots>
@@ -324,7 +579,8 @@
"""
# The file's header of CLion project file.
-CMAKELISTS_HEADER = """# THIS FILE WAS AUTOMATICALLY GENERATED!
+CMAKELISTS_HEADER = """\
+# THIS FILE WAS AUTOMATICALLY GENERATED!
# ANY MODIFICATION WILL BE OVERWRITTEN!
# To improve project view in Clion :
@@ -336,33 +592,40 @@
"""
# The configuration of Android SDK.
-ANDROID_SDK_XML = """ <jdk version="2">
+ANDROID_SDK_XML = """\
+ <jdk version="2">
<name value="Android API {CODE_NAME} Platform" />
<type value="Android SDK" />
- <version value="java version "1.8.0_152"" />
+ <version value="java version "17.0.4"" />
<homePath value="{ANDROID_SDK_PATH}" />
<roots>
<annotationsPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="jar://{ANDROID_SDK_PATH}/platforms/{FOLDER_NAME}/data/annotations.zip!/" type="simple" />
+ </root>
</annotationsPath>
<classPath>
<root type="composite">
- <root url="file://{ANDROID_SDK_PATH}/platforms/android-{CODE_NAME}/data/res" type="simple" />
+ <root url="jar://{ANDROID_SDK_PATH}/platforms/{FOLDER_NAME}/android.jar!/" type="simple" />
+ <root url="file://{ANDROID_SDK_PATH}/platforms/{FOLDER_NAME}/data/res" type="simple" />
</root>
</classPath>
<javadocPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="http://developer.android.com/reference/" type="simple" />
+ </root>
</javadocPath>
<sourcePath>
<root type="composite" />
</sourcePath>
</roots>
- <additional jdk="JDK18" sdk="android-{CODE_NAME}" />
+ <additional jdk="JDK17" sdk="android-{CODE_NAME}" />
</jdk>
"""
# The configuration of TEST_MAPPING in jsonSchemas.xml.
-TEST_MAPPING_SCHEMAS_XML = """<?xml version="1.0" encoding="UTF-8"?>
+TEST_MAPPING_SCHEMAS_XML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JsonSchemaMappingsProjectConfiguration">
<state>
@@ -391,16 +654,17 @@
# The xml templates for Eclipse.
# .classpath template
-ECLIPSE_CLASSPATH_XML = """<?xml version="1.0" encoding="UTF-8"?>
+ECLIPSE_CLASSPATH_XML = """<\
+?xml version="1.0" encoding="UTF-8"?>
<classpath>
{SRC}
{LIB}
- <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
</classpath>
"""
# .project template
-ECLIPSE_PROJECT_XML = """<?xml version="1.0" encoding="UTF-8"?>
+ECLIPSE_PROJECT_XML = """\
+<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>{PROJECTNAME}</name>
<comment></comment>
@@ -423,7 +687,8 @@
"""
# The template of default AndroidManifest.xml.
-ANDROID_MANIFEST_CONTENT = """<?xml version="1.0" encoding="utf-8"?>
+ANDROID_MANIFEST_CONTENT = """\
+<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
android:versionCode="1"
android:versionName="1.0" >
@@ -431,7 +696,8 @@
"""
# The xml template for enabling debugger.
-XML_ENABLE_DEBUGGER = """<?xml version="1.0" encoding="UTF-8"?>
+XML_ENABLE_DEBUGGER = """\
+<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="FacetManager">
<facet type="android" name="Android">
@@ -451,13 +717,15 @@
"""
# The default empty template of the jdk.table.xml.
-JDK_TABLE_XML = """<application>
+JDK_TABLE_XML = """\
+<application>
<component name="ProjectJdkTable">
</component>
</application>
"""
-XML_WORKSPACE = """<?xml version="1.0" encoding="UTF-8"?>
+XML_WORKSPACE = """\
+<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsManagerConfiguration">
<ignored-roots>
@@ -467,7 +735,8 @@
</project>
"""
-IGNORED_GITS = """<component name="VcsManagerConfiguration">
+IGNORED_GITS = """\
+<component name="VcsManagerConfiguration">
<ignored-roots>{GITS}</ignored-roots>
</component>
-"""
\ No newline at end of file
+"""
diff --git a/aidegen/test_data/jdk_table_xml/android_sdk.xml b/aidegen/test_data/jdk_table_xml/android_sdk.xml
index b1a8ba6..54f85e3 100644
--- a/aidegen/test_data/jdk_table_xml/android_sdk.xml
+++ b/aidegen/test_data/jdk_table_xml/android_sdk.xml
@@ -3,25 +3,30 @@
<jdk version="2">
<name value="Android API 28 Platform" />
<type value="Android SDK" />
- <version value="java version "1.8.0_152"" />
+ <version value="java version "17.0.4"" />
<homePath value="/path/to/Android/Sdk" />
<roots>
<annotationsPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="jar:///path/to/Android/Sdk/platforms/android-28/data/annotations.zip!/" type="simple" />
+ </root>
</annotationsPath>
<classPath>
<root type="composite">
+ <root url="jar:///path/to/Android/Sdk/platforms/android-28/android.jar!/" type="simple" />
<root url="file:///path/to/Android/Sdk/platforms/android-28/data/res" type="simple" />
</root>
</classPath>
<javadocPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="http://developer.android.com/reference/" type="simple" />
+ </root>
</javadocPath>
<sourcePath>
<root type="composite" />
</sourcePath>
</roots>
- <additional jdk="JDK18" sdk="android-28" />
+ <additional jdk="JDK17" sdk="android-28" />
</jdk>
</component>
</application>
diff --git a/aidegen/test_data/jdk_table_xml/android_sdk_nonexistent.xml b/aidegen/test_data/jdk_table_xml/android_sdk_nonexistent.xml
index 1619b6c..f7f564c 100644
--- a/aidegen/test_data/jdk_table_xml/android_sdk_nonexistent.xml
+++ b/aidegen/test_data/jdk_table_xml/android_sdk_nonexistent.xml
@@ -3,25 +3,30 @@
<jdk version="2">
<name value="Android API 28 Platform" />
<type value="Fake Android SDK" />
- <version value="java version "1.8.0_152"" />
+ <version value="java version "17.0.4"" />
<homePath value="/path/to/Android/Sdk" />
<roots>
<annotationsPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="jar:///path/to/Android/Sdk/platforms/android-28/data/annotations.zip!/" type="simple" />
+ </root>
</annotationsPath>
<classPath>
<root type="composite">
+ <root url="jar:///path/to/Android/Sdk/platforms/android-28/android.jar!/" type="simple" />
<root url="file:///path/to/Android/Sdk/platforms/android-28/data/res" type="simple" />
</root>
</classPath>
<javadocPath>
- <root type="composite" />
+ <root type="composite" >
+ <root url="http://developer.android.com/reference/" type="simple" />
+ </root>
</javadocPath>
<sourcePath>
<root type="composite" />
</sourcePath>
</roots>
- <additional jdk="JDK18" sdk="android-28" />
+ <additional jdk="JDK17" sdk="android-28" />
</jdk>
</component>
</application>
diff --git a/aidegen/test_data/jdk_table_xml/jdk17.xml b/aidegen/test_data/jdk_table_xml/jdk17.xml
new file mode 100644
index 0000000..72cddfc
--- /dev/null
+++ b/aidegen/test_data/jdk_table_xml/jdk17.xml
@@ -0,0 +1,169 @@
+<application>
+ <component name="ProjectJdkTable">
+ <jdk version="2">
+ <name value="JDK17" />
+ <type value="JavaSDK" />
+ <version value="java version "17.0.4"" />
+ <homePath value="/path/to/android/root/prebuilts/jdk/jdk17/linux-x86" />
+ <roots>
+ <annotationsPath>
+ <root type="composite">
+ <root url="jar://$APPLICATION_HOME_DIR$/plugins/java/lib/jdkAnnotations.jar!/" type="simple" />
+ </root>
+ </annotationsPath>
+ <classPath>
+ <root type="composite">
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.base" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.datatransfer" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.desktop" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.instrument" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.logging" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.management.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.naming" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.net.http" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.prefs" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.scripting" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.se" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.security.jgss" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.security.sasl" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.smartcardio" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.sql" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.sql.rowset" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.transaction.xa" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.xml" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.xml.crypto" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.accessibility" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.attach" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.charsets" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.crypto.ec" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.dynalink" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.editpad" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.hotspot.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.httpserver" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.incubator.foreign" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.incubator.vector" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.ed" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.jvmstat" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.le" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.opt" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.ci" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.compiler.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jartool" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.javadoc" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jcmd" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jconsole" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdeps" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdwp.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jfr" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jlink" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jpackage" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jshell" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jsobject" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jstatd" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.localedata" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management.jfr" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.naming.dns" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.naming.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.net" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.nio.mapmode" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.random" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.sctp" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.security.auth" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.security.jgss" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.unsupported" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.unsupported.desktop" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.xml.dom" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.zipfs" type="simple" />
+ </root>
+ </classPath>
+ <javadocPath>
+ <root type="composite" />
+ </javadocPath>
+ <sourcePath>
+ <root type="composite">
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.se" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jfr" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.net" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.sql" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.xml" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jcmd" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.sctp" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.base" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdeps" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jlink" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.zipfs" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.prefs" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.attach" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jshell" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jstatd" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.random" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.naming" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.editpad" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jartool" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.javadoc" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.xml.dom" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.desktop" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.logging" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.charsets" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.dynalink" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jconsole" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jpackage" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jsobject" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.net.http" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.crypto.ec" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.scripting" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.httpserver" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdwp.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.localedata" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.naming.dns" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.naming.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.instrument" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.management" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.sql.rowset" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.xml.crypto" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.ed" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.le" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.nio.mapmode" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.unsupported" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.smartcardio" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.opt" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.datatransfer" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.accessibility" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.hotspot.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.security.auth" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.security.jgss" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.security.jgss" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.security.sasl" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.ci" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management.jfr" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.management.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.transaction.xa" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.incubator.vector" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.jvmstat" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.incubator.foreign" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.unsupported.desktop" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.compiler.management" type="simple" />
+ </root>
+ </sourcePath>
+ </roots>
+ <additional />
+ </jdk>
+ </component>
+</application>
diff --git a/aidegen/test_data/jdk_table_xml/jdk18.xml b/aidegen/test_data/jdk_table_xml/jdk18.xml
deleted file mode 100644
index ea9a52e..0000000
--- a/aidegen/test_data/jdk_table_xml/jdk18.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<application>
- <component name="ProjectJdkTable">
- <jdk version="2">
- <name value="JDK18" />
- <type value="JavaSDK" />
- <version value="java version "1.8.0_152"" />
- <homePath value="/path/to/android/root/prebuilts/jdk/jdk8/linux-x86" />
- <roots>
- <annotationsPath>
- <root type="composite">
- <root url="jar://$APPLICATION_HOME_DIR$/lib/jdkAnnotations.jar!/" type="simple" />
- </root>
- </annotationsPath>
- <classPath>
- <root type="composite">
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/charsets.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/cldrdata.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/dnsns.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/jaccess.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/localedata.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/nashorn.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunec.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunjce_provider.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunpkcs11.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/zipfs.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/jce.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/jsse.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/management-agent.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/resources.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/rt.jar!/" type="simple" />
- </root>
- </classPath>
- <javadocPath>
- <root type="composite" />
- </javadocPath>
- <sourcePath>
- <root type="composite">
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/src.zip!/" type="simple" />
- </root>
- </sourcePath>
- </roots>
- <additional />
- </jdk>
- </component>
-</application>
diff --git a/aidegen/test_data/jdk_table_xml/jdk_nonexistent.xml b/aidegen/test_data/jdk_table_xml/jdk_nonexistent.xml
index 2cc65c6..a85094a 100644
--- a/aidegen/test_data/jdk_table_xml/jdk_nonexistent.xml
+++ b/aidegen/test_data/jdk_table_xml/jdk_nonexistent.xml
@@ -5,33 +5,88 @@
<type value="JavaSDK" />
</jdk>
<jdk version="2">
- <name value="JDK18" />
+ <name value="JDK17" />
<type value="JavaSDK" />
- <version value="java version "1.8.0_152"" />
- <homePath value="/path/to/android/root/prebuilts/jdk/jdk8/linux-x86" />
+ <version value="java version "17.0.4"" />
+ <homePath value="/path/to/android/root/prebuilts/jdk/jdk17/linux-x86" />
<roots>
<annotationsPath>
<root type="composite">
- <root url="jar://$APPLICATION_HOME_DIR$/lib/jdkAnnotations.jar!/" type="simple" />
+ <root url="jar://$APPLICATION_HOME_DIR$/plugins/java/lib/jdkAnnotations.jar!/" type="simple" />
</root>
</annotationsPath>
<classPath>
<root type="composite">
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/charsets.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/cldrdata.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/dnsns.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/jaccess.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/localedata.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/nashorn.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunec.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunjce_provider.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/sunpkcs11.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/ext/zipfs.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/jce.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/jsse.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/management-agent.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/resources.jar!/" type="simple" />
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/jre/lib/rt.jar!/" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.base" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.datatransfer" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.desktop" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.instrument" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.logging" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.management.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.naming" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.net.http" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.prefs" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.scripting" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.se" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.security.jgss" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.security.sasl" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.smartcardio" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.sql" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.sql.rowset" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.transaction.xa" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.xml" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/java.xml.crypto" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.accessibility" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.attach" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.charsets" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.crypto.ec" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.dynalink" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.editpad" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.hotspot.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.httpserver" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.incubator.foreign" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.incubator.vector" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.ed" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.jvmstat" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.le" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.opt" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.ci" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.internal.vm.compiler.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jartool" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.javadoc" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jcmd" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jconsole" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdeps" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jdwp.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jfr" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jlink" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jpackage" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jshell" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jsobject" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.jstatd" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.localedata" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management.agent" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.management.jfr" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.naming.dns" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.naming.rmi" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.net" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.nio.mapmode" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.random" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.sctp" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.security.auth" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.security.jgss" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.unsupported" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.unsupported.desktop" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.xml.dom" type="simple" />
+ <root url="jrt:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86!/jdk.zipfs" type="simple" />
</root>
</classPath>
<javadocPath>
@@ -39,7 +94,76 @@
</javadocPath>
<sourcePath>
<root type="composite">
- <root url="jar:///path/to/android/root/prebuilts/jdk/jdk8/linux-x86/src.zip!/" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.se" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jfr" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.net" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.sql" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.xml" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jcmd" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.sctp" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.base" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdeps" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jlink" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.zipfs" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.prefs" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.attach" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jshell" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jstatd" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.random" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.naming" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.editpad" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jartool" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.javadoc" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.xml.dom" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.desktop" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.logging" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.charsets" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.dynalink" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jconsole" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jpackage" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jsobject" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.net.http" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.crypto.ec" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.scripting" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.httpserver" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.jdwp.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.localedata" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.naming.dns" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.naming.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.instrument" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.management" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.sql.rowset" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.xml.crypto" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.ed" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.le" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.nio.mapmode" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.unsupported" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.smartcardio" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.opt" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.datatransfer" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.accessibility" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.hotspot.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.security.auth" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.security.jgss" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.security.jgss" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.security.sasl" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.ci" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management.jfr" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.management.rmi" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/java.transaction.xa" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.crypto.cryptoki" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.incubator.vector" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.jvmstat" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.management.agent" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.incubator.foreign" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.unsupported.desktop" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.compiler" type="simple" />
+ <root url="jar:///path/to/android/root/prebuilts/jdk/jdk17/linux-x86/lib/src.zip!/jdk.internal.vm.compiler.management" type="simple" />
</root>
</sourcePath>
</roots>
diff --git a/aidegen/unittest_constants.py b/aidegen/unittest_constants.py
index f0edaed..5b5fd71 100644
--- a/aidegen/unittest_constants.py
+++ b/aidegen/unittest_constants.py
@@ -32,7 +32,7 @@
'/opt/intellij-ce-2018.1/bin/idea.sh', '/opt/intellij-ce-2017.2/bin/idea.sh'
] # script path data
-SH_GODEN_SAMPLE = '/opt/intellij-ce-2018.1/bin/idea.sh'
+SH_GOLDEN_SAMPLE = '/opt/intellij-ce-2018.1/bin/idea.sh'
IDEA_SH_FIND_NONE = '' # Neither IntelliJ CE nor UE script exists.
TEST_PATH = 'path'
diff --git a/aidegen/vscode/__init__.py b/aidegen/vscode/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen/vscode/__init__.py
+++ /dev/null
diff --git a/aidegen/vscode/vscode_native_project_file_gen.py b/aidegen/vscode/vscode_native_project_file_gen.py
index 4cfec1c..48e6077 100644
--- a/aidegen/vscode/vscode_native_project_file_gen.py
+++ b/aidegen/vscode/vscode_native_project_file_gen.py
@@ -30,7 +30,7 @@
_CONFIG = 'configurations'
_NAME = 'name'
_LINUX = 'Linux'
-_INTELL_SENSE = 'intelliSenseMode'
+_INTELLI_SENSE = 'intelliSenseMode'
_GCC_X64 = 'clang-x64'
_INC_PATH = 'includePath'
_SYS_INC_PATH = 'systemIncludePath'
@@ -50,7 +50,7 @@
_COMPILER_PATH = '/usr/bin/gcc'
_COMPILER_EMPTY = '"compilerPath" is empty and will skip querying a compiler.'
_FALSE = 'false'
-_INTELI_SENSE_ENGINE = 'C_Cpp.intelliSenseEngine'
+_INTELLI_SENSE_ENGINE = 'C_Cpp.intelliSenseEngine'
_DEFAULT = 'Default'
@@ -85,7 +85,8 @@
os.path.join(self.config_dir, _C_CPP_PROPERTIES_CONFIG_FILE_NAME),
data)
- def _create_c_cpp_properties_dict(self, native_mod_info, mod_names):
+ @staticmethod
+ def _create_c_cpp_properties_dict(native_mod_info, mod_names):
"""Creates the dictionary of 'c_cpp_properties.json' file.
Args:
@@ -96,12 +97,11 @@
Returns:
A dictionary contains the formats of c_cpp_properties.json file.
"""
- configs = {}
- configs[_NAME] = _LINUX
+ configs = {_NAME: _LINUX}
includes = set()
for mod_name in mod_names:
includes.update(native_mod_info.get_module_includes(mod_name))
- browse = {_LIMIT_SYM: _FALSE, _INTELI_SENSE_ENGINE: _DEFAULT}
+ browse = {_LIMIT_SYM: _FALSE, _INTELLI_SENSE_ENGINE: _DEFAULT}
if includes:
paths = _make_header_file_paths(includes)
configs[_INC_PATH] = paths
@@ -117,9 +117,8 @@
configs[_COMPILE_CMD] = os.path.join(
root_dir, _COMPILE_COMMANDS_FILE_DIR, _COMPILE_COMMANDS_FILE_NAME)
configs[_BROWSE] = browse
- configs[_INTELL_SENSE] = _GCC_X64
- data = {}
- data[_CONFIG] = [configs]
+ configs[_INTELLI_SENSE] = _GCC_X64
+ data = {_CONFIG: [configs]}
return data
diff --git a/aidegen/vscode/vscode_native_project_file_gen_unittest.py b/aidegen/vscode/vscode_native_project_file_gen_unittest.py
index d9a6c39..56f1a14 100644
--- a/aidegen/vscode/vscode_native_project_file_gen_unittest.py
+++ b/aidegen/vscode/vscode_native_project_file_gen_unittest.py
@@ -24,6 +24,7 @@
from aidegen.vscode import vscode_native_project_file_gen
+# pylint: disable=protected-access
class VSCodeNativeProjectFileGenUnittests(unittest.TestCase):
"""Unit tests for vscode_native_project_file_gen.py"""
diff --git a/aidegen/vscode/vscode_workspace_file_gen.py b/aidegen/vscode/vscode_workspace_file_gen.py
index 9399709..c1034cf 100644
--- a/aidegen/vscode/vscode_workspace_file_gen.py
+++ b/aidegen/vscode/vscode_workspace_file_gen.py
@@ -83,7 +83,7 @@
def _get_unique_project_name(abs_path, root_dir):
- """Gets an unique project name from the project absolute path.
+ """Gets a unique project name from the project absolute path.
If it's the whole Android source case, replace the relative path '.' with
the root folder name.
@@ -93,7 +93,7 @@
root_dir: A string of the absolute Android root path.
Returns:
- A string of an unique project name.
+ A string of a unique project name.
"""
unique_name = os.path.relpath(abs_path, root_dir).replace(os.sep, '.')
if unique_name == '.':
diff --git a/aidegen/vscode/vscode_workspace_file_gen_unittest.py b/aidegen/vscode/vscode_workspace_file_gen_unittest.py
index d1de483..2823dd0 100644
--- a/aidegen/vscode/vscode_workspace_file_gen_unittest.py
+++ b/aidegen/vscode/vscode_workspace_file_gen_unittest.py
@@ -33,7 +33,7 @@
@mock.patch.object(
vscode_workspace_file_gen, '_create_code_workspace_file_content')
@mock.patch.object(common_util, 'get_android_root_dir')
- def test_vdcode_apply_optional_config(
+ def test_vscode_apply_optional_config(
self, mock_get_root, mock_ws_content, mock_get_unique):
"""Test IdeVSCode's apply_optional_config method."""
mock_get_root.return_value = 'a/b'
diff --git a/aidegen_functional_test/Android.bp b/aidegen_functional_test/Android.bp
index 4b7c05c..356037c 100644
--- a/aidegen_functional_test/Android.bp
+++ b/aidegen_functional_test/Android.bp
@@ -16,25 +16,10 @@
default_applicable_licenses: ["Android-Apache-2.0"],
}
-python_defaults {
- name: "aidegen_functional_test_default",
- pkg_path: "aidegen_functional_test",
- version: {
- py2: {
- enabled: false,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
-}
-
python_binary_host {
name: "aidegen_functional_test",
suffix: "-dev",
- defaults: ["aidegen_functional_test_default"],
+ pkg_path: "aidegen_functional_test",
main: "aidegen_functional_test_main.py",
srcs: [
"**/*.py",
diff --git a/aidegen_functional_test/__init__.py b/aidegen_functional_test/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/aidegen_functional_test/__init__.py
+++ /dev/null
diff --git a/aidegen_functional_test/test_data/golden_samples.json b/aidegen_functional_test/test_data/golden_samples.json
index 3b67a2a..418b401 100644
--- a/aidegen_functional_test/test_data/golden_samples.json
+++ b/aidegen_functional_test/test_data/golden_samples.json
@@ -574,9 +574,7 @@
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_18_android_intermediates/classes-header.jar!/",
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_8_android_intermediates/classes-header.jar!/",
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_9_android_intermediates/classes-header.jar!/",
- "jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/updatable_media_stubs_intermediates/classes-header.jar!/",
- "jar://$PROJECT_DIR$/prebuilts/jdk/jdk8/linux-x86/jre/lib/jce.jar!/",
- "jar://$PROJECT_DIR$/prebuilts/jdk/jdk8/linux-x86/jre/lib/rt.jar!/"
+ "jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/updatable_media_stubs_intermediates/classes-header.jar!/"
]
},
"tools-tradefederation-core.iml": {
@@ -664,9 +662,7 @@
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/core-lambda-stubs_intermediates/classes-header.jar!/",
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_4_android_intermediates/classes-header.jar!/",
"jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_7_android_intermediates/classes-header.jar!/",
- "jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_8_android_intermediates/classes-header.jar!/",
- "jar://$PROJECT_DIR$/prebuilts/jdk/jdk8/linux-x86/jre/lib/jce.jar!/",
- "jar://$PROJECT_DIR$/prebuilts/jdk/jdk8/linux-x86/jre/lib/rt.jar!/"
+ "jar://$PROJECT_DIR$/out/target/common/obj/JAVA_LIBRARIES/sdk_public_8_android_intermediates/classes-header.jar!/"
]
}
}
diff --git a/asuite_run_unittests.py b/asuite_run_unittests.py
index 4f66b4d..65497c2 100755
--- a/asuite_run_unittests.py
+++ b/asuite_run_unittests.py
@@ -29,8 +29,8 @@
ASUITE_HOME = os.path.dirname(os.path.realpath(__file__))
ASUITE_PLUGIN_PATH = os.path.join(ASUITE_HOME, "asuite_plugin")
-ATEST_CMD = os.path.join(ASUITE_HOME, "atest", "atest_run_unittests.py")
-ATEST2_CMD = os.path.join(ASUITE_HOME, "atest-py2", "atest_run_unittests.py")
+# Temporarily running full module testing for atest due to b/248507158.
+ATEST_CMD = "atest atest_unittests --host"
AIDEGEN_CMD = "atest aidegen_unittests --host"
PLUGIN_LIB_CMD = "atest plugin_lib_unittests --host"
GRADLE_TEST = "/gradlew test"
@@ -51,8 +51,6 @@
for f in files:
if 'atest' in f:
cmd_dict.update({ATEST_CMD: None})
- if 'atest-py2' in f:
- cmd_dict.update({ATEST2_CMD: None})
if 'aidegen' in f:
cmd_dict.update({AIDEGEN_CMD: None})
if 'plugin_lib' in f:
diff --git a/atest-py2/Android.bp b/atest-py2/Android.bp
deleted file mode 100644
index 08b86ad..0000000
--- a/atest-py2/Android.bp
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-python_binary_host {
- name: "atest-py2",
- main: "atest.py",
- srcs: [
- "**/*.py",
- ],
- exclude_srcs: [
- "*_unittest.py",
- "*/*_unittest.py",
- "asuite_lib_test/*.py",
- "proto/*_pb2.py",
- "proto/__init__.py",
- ],
- libs: [
- "atest_py2_proto",
- ],
- data: [
- "tools/updatedb_darwin.sh",
- ],
- // Make atest's built name to atest-py2-dev
- stem: "atest-py2-dev",
- defaults: ["atest_py2_default"],
- dist: {
- targets: ["droidcore"],
- },
-}
-
-//python_test_host {
-// name: "atest-py2_unittests",
-// main: "atest_run_unittests.py",
-// pkg_path: "atest",
-// srcs: [
-// "**/*.py",
-// ],
-// data: [
-// "tools/updatedb_darwin.sh",
-// "unittest_data/**/*",
-// "unittest_data/**/.*",
-// ],
-// exclude_srcs: [
-// "asuite_lib_test/*.py",
-// "proto/*_pb2.py",
-// "proto/__init__.py",
-// ],
-// libs: [
-// "py-mock",
-// "atest_py2_proto",
-// ],
-// test_config: "atest_unittests.xml",
-// defaults: ["atest_py2_default"],
-//}
-
-python_library_host {
- name: "atest_py2_proto",
- defaults: ["atest_py2_default"],
- srcs: [
- "proto/*.proto",
- ],
- proto: {
- canonical_path_from_root: false,
- },
-}
-
-python_defaults {
- name: "atest_py2_default",
- version: {
- py2: {
- enabled: true,
- embedded_launcher: false,
- },
- py3: {
- enabled: false,
- embedded_launcher: false,
- },
- },
-}
diff --git a/atest-py2/INTEGRATION_TESTS b/atest-py2/INTEGRATION_TESTS
deleted file mode 100644
index 2bf986e..0000000
--- a/atest-py2/INTEGRATION_TESTS
+++ /dev/null
@@ -1,86 +0,0 @@
-# TODO (b/121362882): Add deviceless tests when dry-run is ready.
-###[Test Finder: MODULE, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: MODULE and runner: AtestTradefedTestRunner###
-HelloWorldTests
-hello_world_test
-
-
-###[Test Finder: MODULE_FILE_PATH, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: MODULE_FILE_PATH and runner: AtestTradefedTestRunner###
-# frameworks/base/services/tests/servicestests/src/com/android/server/wm/ScreenDecorWindowTests.java#testFlagChange
-# packages/apps/Bluetooth/tests/unit/Android.mk
-platform_testing/tests/example/native
-# platform_testing/tests/example/native/
-platform_testing/tests/example/native/Android.bp
-
-
-###[Test Finder: INTEGRATION_FILE_PATH, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: INTEGRATION_FILE_PATH and runner: AtestTradefedTestRunner###
-tools/tradefederation/core/res/config/native-benchmark.xml
-
-
-###[Test Finder: MODULE_CLASS, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: MODULE_CLASS and runner: AtestTradefedTestRunner###
-CtsAnimationTestCases:AnimatorTest
-CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences
-CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest
-
-
-###[Test Finder: QUALIFIED_CLASS, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: QUALIFIED_CLASS and runner: AtestTradefedTestRunner###
-# com.android.server.display.DisplayManagerServiceTest
-# com.android.server.wm.ScreenDecorWindowTests#testMultipleDecors
-
-
-###[Test Finder: MODULE_PACKAGE, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: MODULE_PACKAGE and runner: AtestTradefedTestRunner###
-CtsSampleDeviceTestCases:android.sample.cts
-
-
-###[Test Finder: PACKAGE, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: PACKAGE and runner: AtestTradefedTestRunner###
-android.animation.cts
-
-
-###[Test Finder: CLASS, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: CLASS and runner: AtestTradefedTestRunner###
-AnimatorTest
-
-
-###[Test Finder: CC_CLASS, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: CC_CLASS and runner: AtestTradefedTestRunner###
-PacketFragmenterTest
-# PacketFragmenterTest#test_no_fragment_necessary
-PacketFragmenterTest#test_no_fragment_necessary,test_ble_fragment_necessary
-
-
-###[Test Finder: INTEGRATION, Test Runner:AtestTradefedTestRunner]###
-###Purpose: Test with finder: INTEGRATION and runner: AtestTradefedTestRunner###
-native-benchmark
-
-
-###[Test Finder: MODULE, Test Runner: VtsTradefedTestRunner]####
-###Purpose: Test with finder: MODULE and runner: VtsTradefedTestRunner###
-VtsCodelabHelloWorldTest
-
-
-###[Test Finder: MODULE, Test Runner: RobolectricTestRunner]#####
-###Purpose: Test with finder: MODULE and runner: RobolectricTestRunner###
-CarMessengerRoboTests
-###Purpose: Test with input path for RobolectricTest###
-packages/apps/Car/Messenger/tests/robotests/src/com/android/car/messenger/MessengerDelegateTest.java
-
-
-###[Test Finder: SUITE_PLAN, Test Runner: SuitePlanTestRunner]###
-###Purpose: Test with finder: SUITE_PLAN and runner: SuitePlanTestRunner###
-# cts-common
-
-
-###[Test Finder: SUITE_PLAN_FILE_PATH, Test Runner: SuitePlanTestRunner]###
-###Purpose: Test with finder: SUITE_PLAN_FILE_PATH and runner: SuitePlanTestRunner###
-# test/suite_harness/tools/cts-tradefed/res/config/cts.xml
-
-
-###[MULTIPLE-TESTS + AtestTradefedTestRunner]###
-###Purpose: Test with mixed testcases###
-CtsSampleDeviceTestCases CtsAnimationTestCases
diff --git a/atest-py2/OWNERS b/atest-py2/OWNERS
deleted file mode 100644
index 4d3541a..0000000
--- a/atest-py2/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-dshi@google.com
-easoncylee@google.com
-kevcheng@google.com
-yangbill@google.com
diff --git a/atest-py2/README.md b/atest-py2/README.md
deleted file mode 100644
index 3824245..0000000
--- a/atest-py2/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Atest
-
-The contents of this page have been moved to source.android.com.
-
-See:
-[Atest](https://source.android.com/compatibility/tests/development/atest)
diff --git a/atest-py2/__init__.py b/atest-py2/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/__init__.py
+++ /dev/null
diff --git a/atest-py2/asuite_metrics.py b/atest-py2/asuite_metrics.py
deleted file mode 100644
index 88fca0a..0000000
--- a/atest-py2/asuite_metrics.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Asuite simple Metrics Functions"""
-
-import json
-import logging
-import os
-import uuid
-
-try:
- # PYTHON2
- from urllib2 import Request
- from urllib2 import urlopen
-except ImportError:
- # PYTHON3
- from urllib.request import Request
- from urllib.request import urlopen
-
-
-_JSON_HEADERS = {'Content-Type': 'application/json'}
-_METRICS_RESPONSE = 'done'
-_METRICS_TIMEOUT = 2 #seconds
-_META_FILE = os.path.join(os.path.expanduser('~'),
- '.config', 'asuite', '.metadata')
-_ANDROID_BUILD_TOP = 'ANDROID_BUILD_TOP'
-
-UNUSED_UUID = '00000000-0000-4000-8000-000000000000'
-
-
-#pylint: disable=broad-except
-def log_event(metrics_url, unused_key_fallback=True, **kwargs):
- """Base log event function for asuite backend.
-
- Args:
- metrics_url: String, URL to report metrics to.
- unused_key_fallback: Boolean, If True and unable to get grouping key,
- use a unused key otherwise return out. Sometimes we
- don't want to return metrics for users we are
- unable to identify. Default True.
- kwargs: Dict, additional fields we want to return metrics for.
- """
- try:
- try:
- key = str(_get_grouping_key())
- except Exception:
- if not unused_key_fallback:
- return
- key = UNUSED_UUID
- data = {'grouping_key': key,
- 'run_id': str(uuid.uuid4())}
- if kwargs:
- data.update(kwargs)
- data = json.dumps(data)
- request = Request(metrics_url, data=data,
- headers=_JSON_HEADERS)
- response = urlopen(request, timeout=_METRICS_TIMEOUT)
- content = response.read()
- if content != _METRICS_RESPONSE:
- raise Exception('Unexpected metrics response: %s' % content)
- except Exception as e:
- logging.debug('Exception sending metrics: %s', e)
-
-
-def _get_grouping_key():
- """Get grouping key. Returns UUID.uuid4."""
- if os.path.isfile(_META_FILE):
- with open(_META_FILE) as f:
- try:
- return uuid.UUID(f.read(), version=4)
- except ValueError:
- logging.debug('malformed group_key in file, rewriting')
- # TODO: Delete get_old_key() on 11/17/2018
- key = _get_old_key() or uuid.uuid4()
- dir_path = os.path.dirname(_META_FILE)
- if os.path.isfile(dir_path):
- os.remove(dir_path)
- try:
- os.makedirs(dir_path)
- except OSError as e:
- if not os.path.isdir(dir_path):
- raise e
- with open(_META_FILE, 'w+') as f:
- f.write(str(key))
- return key
-
-
-def _get_old_key():
- """Get key from old meta data file if exists, else return None."""
- old_file = os.path.join(os.environ[_ANDROID_BUILD_TOP],
- 'tools/tradefederation/core/atest', '.metadata')
- key = None
- if os.path.isfile(old_file):
- with open(old_file) as f:
- try:
- key = uuid.UUID(f.read(), version=4)
- except ValueError:
- logging.debug('error reading old key')
- os.remove(old_file)
- return key
diff --git a/atest-py2/atest.py b/atest-py2/atest.py
deleted file mode 100755
index fe9b240..0000000
--- a/atest-py2/atest.py
+++ /dev/null
@@ -1,721 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Command line utility for running Android tests through TradeFederation.
-
-atest helps automate the flow of building test modules across the Android
-code base and executing the tests via the TradeFederation test harness.
-
-atest is designed to support any test types that can be ran by TradeFederation.
-"""
-
-from __future__ import print_function
-
-import logging
-import os
-import sys
-import tempfile
-import time
-import platform
-
-from multiprocessing import Process
-
-import atest_arg_parser
-import atest_error
-import atest_execution_info
-import atest_utils
-import bug_detector
-import cli_translator
-# pylint: disable=import-error
-import constants
-import module_info
-import result_reporter
-import test_runner_handler
-
-from metrics import metrics
-from metrics import metrics_base
-from metrics import metrics_utils
-from test_runners import regression_test_runner
-from tools import atest_tools
-
-EXPECTED_VARS = frozenset([
- constants.ANDROID_BUILD_TOP,
- 'ANDROID_TARGET_OUT_TESTCASES',
- constants.ANDROID_OUT])
-TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
-CUSTOM_ARG_FLAG = '--'
-OPTION_NOT_FOR_TEST_MAPPING = (
- 'Option `%s` does not work for running tests in TEST_MAPPING files')
-
-DEVICE_TESTS = 'tests that require device'
-HOST_TESTS = 'tests that do NOT require device'
-RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
-RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
-TEST_COUNT = 'test_count'
-TEST_TYPE = 'test_type'
-# Tasks that must run in the build time but unable to build by soong.
-# (e.g subprocesses that invoke host commands.)
-EXTRA_TASKS = {
- 'index-targets': atest_tools.index_targets
-}
-
-
-def _run_extra_tasks(join=False):
- """Execute EXTRA_TASKS with multiprocessing.
-
- Args:
- join: A boolean that indicates the process should terminate when
- the main process ends or keep itself alive. True indicates the
- main process will wait for all subprocesses finish while False represents
- killing all subprocesses when the main process exits.
- """
- _running_procs = []
- for task in EXTRA_TASKS.values():
- proc = Process(target=task)
- proc.daemon = not join
- proc.start()
- _running_procs.append(proc)
- if join:
- for proc in _running_procs:
- proc.join()
-
-
-def _parse_args(argv):
- """Parse command line arguments.
-
- Args:
- argv: A list of arguments.
-
- Returns:
- An argspace.Namespace class instance holding parsed args.
- """
- # Store everything after '--' in custom_args.
- pruned_argv = argv
- custom_args_index = None
- if CUSTOM_ARG_FLAG in argv:
- custom_args_index = argv.index(CUSTOM_ARG_FLAG)
- pruned_argv = argv[:custom_args_index]
- parser = atest_arg_parser.AtestArgParser()
- parser.add_atest_args()
- args = parser.parse_args(pruned_argv)
- args.custom_args = []
- if custom_args_index is not None:
- args.custom_args = argv[custom_args_index+1:]
- return args
-
-
-def _configure_logging(verbose):
- """Configure the logger.
-
- Args:
- verbose: A boolean. If true display DEBUG level logs.
- """
- log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
- datefmt = '%Y-%m-%d %H:%M:%S'
- if verbose:
- logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
- else:
- logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
-
-
-def _missing_environment_variables():
- """Verify the local environment has been set up to run atest.
-
- Returns:
- List of strings of any missing environment variables.
- """
- missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
- if missing:
- logging.error('Local environment doesn\'t appear to have been '
- 'initialized. Did you remember to run lunch? Expected '
- 'Environment Variables: %s.', missing)
- return missing
-
-
-def make_test_run_dir():
- """Make the test run dir in ATEST_RESULT_ROOT.
-
- Returns:
- A string of the dir path.
- """
- if not os.path.exists(constants.ATEST_RESULT_ROOT):
- os.makedirs(constants.ATEST_RESULT_ROOT)
- ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
- test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
- dir=constants.ATEST_RESULT_ROOT)
- return test_result_dir
-
-
-def get_extra_args(args):
- """Get extra args for test runners.
-
- Args:
- args: arg parsed object.
-
- Returns:
- Dict of extra args for test runners to utilize.
- """
- extra_args = {}
- if args.wait_for_debugger:
- extra_args[constants.WAIT_FOR_DEBUGGER] = None
- steps = args.steps or constants.ALL_STEPS
- if constants.INSTALL_STEP not in steps:
- extra_args[constants.DISABLE_INSTALL] = None
- # The key and its value of the dict can be called via:
- # if args.aaaa:
- # extra_args[constants.AAAA] = args.aaaa
- arg_maps = {'all_abi': constants.ALL_ABI,
- 'collect_tests_only': constants.COLLECT_TESTS_ONLY,
- 'custom_args': constants.CUSTOM_ARGS,
- 'disable_teardown': constants.DISABLE_TEARDOWN,
- 'dry_run': constants.DRY_RUN,
- 'generate_baseline': constants.PRE_PATCH_ITERATIONS,
- 'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
- 'host': constants.HOST,
- 'instant': constants.INSTANT,
- 'iterations': constants.ITERATIONS,
- 'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
- 'retry_any_failure': constants.RETRY_ANY_FAILURE,
- 'serial': constants.SERIAL,
- 'sharding': constants.SHARDING,
- 'tf_debug': constants.TF_DEBUG,
- 'tf_template': constants.TF_TEMPLATE,
- 'user_type': constants.USER_TYPE}
- not_match = [k for k in arg_maps if k not in vars(args)]
- if not_match:
- raise AttributeError('%s object has no attribute %s'
- %(type(args).__name__, not_match))
- extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
- if arg_maps.get(k) and v})
- return extra_args
-
-
-def _get_regression_detection_args(args, results_dir):
- """Get args for regression detection test runners.
-
- Args:
- args: parsed args object.
- results_dir: string directory to store atest results.
-
- Returns:
- Dict of args for regression detection test runner to utilize.
- """
- regression_args = {}
- pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
- else args.detect_regression.pop(0))
- post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
- else args.detect_regression.pop(0))
- regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
- regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
- return regression_args
-
-
-def _validate_exec_mode(args, test_infos, host_tests=None):
- """Validate all test execution modes are not in conflict.
-
- Exit the program with error code if have device-only and host-only.
- If no conflict and host side, add args.host=True.
-
- Args:
- args: parsed args object.
- test_info: TestInfo object.
- host_tests: True if all tests should be deviceless, False if all tests
- should be device tests. Default is set to None, which means
- tests can be either deviceless or device tests.
- """
- all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
- err_msg = None
- # In the case of '$atest <device-only> --host', exit.
- if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
- err_msg = ('Test side and option(--host) conflict. Please remove '
- '--host if the test run on device side.')
- # In the case of '$atest <host-only> <device-only> --host' or
- # '$atest <host-only> <device-only>', exit.
- if (constants.DEVICELESS_TEST in all_device_modes and
- constants.DEVICE_TEST in all_device_modes):
- err_msg = 'There are host-only and device-only tests in command.'
- if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
- err_msg = 'There are host-only tests in command.'
- if err_msg:
- logging.error(err_msg)
- metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
- sys.exit(constants.EXIT_CODE_ERROR)
- # In the case of '$atest <host-only>', we add --host to run on host-side.
- # The option should only be overridden if `host_tests` is not set.
- if not args.host and host_tests is None:
- args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
-
-
-def _validate_tm_tests_exec_mode(args, test_infos):
- """Validate all test execution modes are not in conflict.
-
- Split the tests in Test Mapping files into two groups, device tests and
- deviceless tests running on host. Validate the tests' host setting.
- For device tests, exit the program if any test is found for host-only.
- For deviceless tests, exit the program if any test is found for device-only.
-
- Args:
- args: parsed args object.
- test_info: TestInfo object.
- """
- device_test_infos, host_test_infos = _split_test_mapping_tests(
- test_infos)
- # No need to verify device tests if atest command is set to only run host
- # tests.
- if device_test_infos and not args.host:
- _validate_exec_mode(args, device_test_infos, host_tests=False)
- if host_test_infos:
- _validate_exec_mode(args, host_test_infos, host_tests=True)
-
-
-def _will_run_tests(args):
- """Determine if there are tests to run.
-
- Currently only used by detect_regression to skip the test if just running regression detection.
-
- Args:
- args: parsed args object.
-
- Returns:
- True if there are tests to run, false otherwise.
- """
- return not (args.detect_regression and len(args.detect_regression) == 2)
-
-
-def _has_valid_regression_detection_args(args):
- """Validate regression detection args.
-
- Args:
- args: parsed args object.
-
- Returns:
- True if args are valid
- """
- if args.generate_baseline and args.generate_new_metrics:
- logging.error('Cannot collect both baseline and new metrics at the same time.')
- return False
- if args.detect_regression is not None:
- if not args.detect_regression:
- logging.error('Need to specify at least 1 arg for regression detection.')
- return False
- elif len(args.detect_regression) == 1:
- if args.generate_baseline or args.generate_new_metrics:
- return True
- logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
- return False
- elif len(args.detect_regression) == 2:
- if args.generate_baseline:
- logging.error('Specified 2 metric paths and --generate-baseline, '
- 'either drop --generate-baseline or drop a path')
- return False
- if args.generate_new_metrics:
- logging.error('Specified 2 metric paths and --generate-new-metrics, '
- 'either drop --generate-new-metrics or drop a path')
- return False
- return True
- else:
- logging.error('Specified more than 2 metric paths.')
- return False
- return True
-
-
-def _has_valid_test_mapping_args(args):
- """Validate test mapping args.
-
- Not all args work when running tests in TEST_MAPPING files. Validate the
- args before running the tests.
-
- Args:
- args: parsed args object.
-
- Returns:
- True if args are valid
- """
- is_test_mapping = atest_utils.is_test_mapping(args)
- if not is_test_mapping:
- return True
- options_to_validate = [
- (args.generate_baseline, '--generate-baseline'),
- (args.detect_regression, '--detect-regression'),
- (args.generate_new_metrics, '--generate-new-metrics'),
- ]
- for arg_value, arg in options_to_validate:
- if arg_value:
- logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
- return False
- return True
-
-
-def _validate_args(args):
- """Validate setups and args.
-
- Exit the program with error code if any setup or arg is invalid.
-
- Args:
- args: parsed args object.
- """
- if _missing_environment_variables():
- sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
- if args.generate_baseline and args.generate_new_metrics:
- logging.error(
- 'Cannot collect both baseline and new metrics at the same time.')
- sys.exit(constants.EXIT_CODE_ERROR)
- if not _has_valid_regression_detection_args(args):
- sys.exit(constants.EXIT_CODE_ERROR)
- if not _has_valid_test_mapping_args(args):
- sys.exit(constants.EXIT_CODE_ERROR)
-
-
-def _print_module_info_from_module_name(mod_info, module_name):
- """print out the related module_info for a module_name.
-
- Args:
- mod_info: ModuleInfo object.
- module_name: A string of module.
-
- Returns:
- True if the module_info is found.
- """
- title_mapping = {
- constants.MODULE_PATH: "Source code path",
- constants.MODULE_INSTALLED: "Installed path",
- constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
- target_module_info = mod_info.get_module_info(module_name)
- is_module_found = False
- if target_module_info:
- atest_utils.colorful_print(module_name, constants.GREEN)
- for title_key in title_mapping.iterkeys():
- atest_utils.colorful_print("\t%s" % title_mapping[title_key],
- constants.CYAN)
- for info_value in target_module_info[title_key]:
- print("\t\t{}".format(info_value))
- is_module_found = True
- return is_module_found
-
-
-def _print_test_info(mod_info, test_infos):
- """Print the module information from TestInfos.
-
- Args:
- mod_info: ModuleInfo object.
- test_infos: A list of TestInfos.
-
- Returns:
- Always return EXIT_CODE_SUCCESS
- """
- for test_info in test_infos:
- _print_module_info_from_module_name(mod_info, test_info.test_name)
- atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
- print("\t\t{}".format(", ".join(test_info.build_targets)))
- for build_target in test_info.build_targets:
- if build_target != test_info.test_name:
- _print_module_info_from_module_name(mod_info, build_target)
- atest_utils.colorful_print("", constants.WHITE)
- return constants.EXIT_CODE_SUCCESS
-
-
-def is_from_test_mapping(test_infos):
- """Check that the test_infos came from TEST_MAPPING files.
-
- Args:
- test_infos: A set of TestInfos.
-
- Returns:
- True if the test infos are from TEST_MAPPING files.
- """
- return list(test_infos)[0].from_test_mapping
-
-
-def _split_test_mapping_tests(test_infos):
- """Split Test Mapping tests into 2 groups: device tests and host tests.
-
- Args:
- test_infos: A set of TestInfos.
-
- Returns:
- A tuple of (device_test_infos, host_test_infos), where
- device_test_infos: A set of TestInfos for tests that require device.
- host_test_infos: A set of TestInfos for tests that do NOT require
- device.
- """
- assert is_from_test_mapping(test_infos)
- host_test_infos = set([info for info in test_infos if info.host])
- device_test_infos = set([info for info in test_infos if not info.host])
- return device_test_infos, host_test_infos
-
-
-# pylint: disable=too-many-locals
-def _run_test_mapping_tests(results_dir, test_infos, extra_args):
- """Run all tests in TEST_MAPPING files.
-
- Args:
- results_dir: String directory to store atest results.
- test_infos: A set of TestInfos.
- extra_args: Dict of extra args to add to test run.
-
- Returns:
- Exit code.
- """
- device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
- # `host` option needs to be set to True to run host side tests.
- host_extra_args = extra_args.copy()
- host_extra_args[constants.HOST] = True
- test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
- if extra_args.get(constants.HOST):
- atest_utils.colorful_print(
- 'Option `--host` specified. Skip running device tests.',
- constants.MAGENTA)
- else:
- test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
-
- test_results = []
- for tests, args, test_type in test_runs:
- if not tests:
- continue
- header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
- atest_utils.colorful_print(header, constants.MAGENTA)
- logging.debug('\n'.join([str(info) for info in tests]))
- tests_exit_code, reporter = test_runner_handler.run_all_tests(
- results_dir, tests, args, delay_print_summary=True)
- atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
- test_results.append((tests_exit_code, reporter, test_type))
-
- all_tests_exit_code = constants.EXIT_CODE_SUCCESS
- failed_tests = []
- for tests_exit_code, reporter, test_type in test_results:
- atest_utils.colorful_print(
- RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
- result = tests_exit_code | reporter.print_summary()
- if result:
- failed_tests.append(test_type)
- all_tests_exit_code |= result
-
- # List failed tests at the end as a reminder.
- if failed_tests:
- atest_utils.colorful_print(
- '\n==============================', constants.YELLOW)
- atest_utils.colorful_print(
- '\nFollowing tests failed:', constants.MAGENTA)
- for failure in failed_tests:
- atest_utils.colorful_print(failure, constants.RED)
-
- return all_tests_exit_code
-
-
-def _dry_run(results_dir, extra_args, test_infos):
- """Only print the commands of the target tests rather than running them in actual.
-
- Args:
- results_dir: Path for saving atest logs.
- extra_args: Dict of extra args for test runners to utilize.
- test_infos: A list of TestInfos.
-
- Returns:
- A list of test commands.
- """
- all_run_cmds = []
- for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
- runner = test_runner(results_dir)
- run_cmds = runner.generate_run_commands(tests, extra_args)
- for run_cmd in run_cmds:
- all_run_cmds.append(run_cmd)
- print('Would run test via command: %s'
- % (atest_utils.colorize(run_cmd, constants.GREEN)))
- return all_run_cmds
-
-def _print_testable_modules(mod_info, suite):
- """Print the testable modules for a given suite.
-
- Args:
- mod_info: ModuleInfo object.
- suite: A string of suite name.
- """
- testable_modules = mod_info.get_testable_modules(suite)
- print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
- len(testable_modules), suite), constants.CYAN))
- print('-------')
- for module in sorted(testable_modules):
- print('\t%s' % module)
-
-def _is_inside_android_root():
- """Identify whether the cwd is inside of Android source tree.
-
- Returns:
- False if the cwd is outside of the source tree, True otherwise.
- """
- build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
- return build_top in os.getcwd()
-
-# pylint: disable=too-many-statements
-# pylint: disable=too-many-branches
-# pylint: disable=too-many-return-statements
-def main(argv, results_dir, args):
- """Entry point of atest script.
-
- Args:
- argv: A list of arguments.
- results_dir: A directory which stores the ATest execution information.
- args: An argspace.Namespace class instance holding parsed args.
-
- Returns:
- Exit code.
- """
- _configure_logging(args.verbose)
- _validate_args(args)
- metrics_utils.get_start_time()
- os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
- metrics.AtestStartEvent(
- command_line=' '.join(argv),
- test_references=args.tests,
- cwd=os.getcwd(),
- os=os_pyver)
- if args.version:
- if os.path.isfile(constants.VERSION_FILE):
- with open(constants.VERSION_FILE) as version_file:
- print(version_file.read())
- return constants.EXIT_CODE_SUCCESS
- if not _is_inside_android_root():
- atest_utils.colorful_print(
- "\nAtest must always work under ${}!".format(
- constants.ANDROID_BUILD_TOP), constants.RED)
- return constants.EXIT_CODE_OUTSIDE_ROOT
- if args.help:
- atest_arg_parser.print_epilog_text()
- return constants.EXIT_CODE_SUCCESS
- if args.history:
- atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
- args.history)
- return constants.EXIT_CODE_SUCCESS
- if args.latest_result:
- atest_execution_info.print_test_result_by_path(
- constants.LATEST_RESULT_FILE)
- return constants.EXIT_CODE_SUCCESS
- mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
- if args.rebuild_module_info:
- _run_extra_tasks(join=True)
- translator = cli_translator.CLITranslator(module_info=mod_info,
- print_cache_msg=not args.clear_cache)
- if args.list_modules:
- _print_testable_modules(mod_info, args.list_modules)
- return constants.EXIT_CODE_SUCCESS
- build_targets = set()
- test_infos = set()
- # Clear cache if user pass -c option
- if args.clear_cache:
- atest_utils.clean_test_info_caches(args.tests)
- if _will_run_tests(args):
- build_targets, test_infos = translator.translate(args)
- if not test_infos:
- return constants.EXIT_CODE_TEST_NOT_FOUND
- if not is_from_test_mapping(test_infos):
- _validate_exec_mode(args, test_infos)
- else:
- _validate_tm_tests_exec_mode(args, test_infos)
- if args.info:
- return _print_test_info(mod_info, test_infos)
- build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
- test_infos)
- extra_args = get_extra_args(args)
- if args.update_cmd_mapping or args.verify_cmd_mapping:
- args.dry_run = True
- if args.dry_run:
- args.tests.sort()
- dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
- if args.verify_cmd_mapping:
- try:
- atest_utils.handle_test_runner_cmd(' '.join(args.tests),
- dry_run_cmds,
- do_verification=True)
- except atest_error.DryRunVerificationError as e:
- atest_utils.colorful_print(str(e), constants.RED)
- return constants.EXIT_CODE_VERIFY_FAILURE
- if args.update_cmd_mapping:
- atest_utils.handle_test_runner_cmd(' '.join(args.tests),
- dry_run_cmds)
- return constants.EXIT_CODE_SUCCESS
- if args.detect_regression:
- build_targets |= (regression_test_runner.RegressionTestRunner('')
- .get_test_runner_build_reqs())
- # args.steps will be None if none of -bit set, else list of params set.
- steps = args.steps if args.steps else constants.ALL_STEPS
- if build_targets and constants.BUILD_STEP in steps:
- if constants.TEST_STEP in steps and not args.rebuild_module_info:
- # Run extra tasks along with build step concurrently. Note that
- # Atest won't index targets when only "-b" is given(without -t).
- _run_extra_tasks(join=False)
- # Add module-info.json target to the list of build targets to keep the
- # file up to date.
- build_targets.add(mod_info.module_info_target)
- build_start = time.time()
- success = atest_utils.build(build_targets, verbose=args.verbose)
- metrics.BuildFinishEvent(
- duration=metrics_utils.convert_duration(time.time() - build_start),
- success=success,
- targets=build_targets)
- if not success:
- return constants.EXIT_CODE_BUILD_FAILURE
- elif constants.TEST_STEP not in steps:
- logging.warn('Install step without test step currently not '
- 'supported, installing AND testing instead.')
- steps.append(constants.TEST_STEP)
- tests_exit_code = constants.EXIT_CODE_SUCCESS
- test_start = time.time()
- if constants.TEST_STEP in steps:
- if not is_from_test_mapping(test_infos):
- tests_exit_code, reporter = test_runner_handler.run_all_tests(
- results_dir, test_infos, extra_args)
- atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
- else:
- tests_exit_code = _run_test_mapping_tests(
- results_dir, test_infos, extra_args)
- if args.detect_regression:
- regression_args = _get_regression_detection_args(args, results_dir)
- # TODO(b/110485713): Should not call run_tests here.
- reporter = result_reporter.ResultReporter()
- atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
- tests_exit_code |= regression_test_runner.RegressionTestRunner(
- '').run_tests(
- None, regression_args, reporter)
- metrics.RunTestsFinishEvent(
- duration=metrics_utils.convert_duration(time.time() - test_start))
- preparation_time = atest_execution_info.preparation_time(test_start)
- if preparation_time:
- # Send the preparation time only if it's set.
- metrics.RunnerFinishEvent(
- duration=metrics_utils.convert_duration(preparation_time),
- success=True,
- runner_name=constants.TF_PREPARATION,
- test=[])
- if tests_exit_code != constants.EXIT_CODE_SUCCESS:
- tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
- return tests_exit_code
-
-if __name__ == '__main__':
- RESULTS_DIR = make_test_run_dir()
- ARGS = _parse_args(sys.argv[1:])
- with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
- RESULTS_DIR,
- ARGS) as result_file:
- metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
- EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, ARGS)
- DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
- metrics.LocalDetectEvent(
- detect_type=constants.DETECT_TYPE_BUG_DETECTED,
- result=DETECTOR.caught_result)
- if result_file:
- print("Run 'atest --history' to review test result history.")
- sys.exit(EXIT_CODE)
diff --git a/atest-py2/atest_arg_parser.py b/atest-py2/atest_arg_parser.py
deleted file mode 100644
index 6fb2205..0000000
--- a/atest-py2/atest_arg_parser.py
+++ /dev/null
@@ -1,682 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Atest Argument Parser class for atest.
-"""
-
-# pylint: disable=line-too-long
-
-import argparse
-import pydoc
-
-import atest_utils
-import constants
-
-# Constants used for AtestArgParser and EPILOG_TEMPLATE
-HELP_DESC = ('A command line tool that allows users to build, install, and run '
- 'Android tests locally, greatly speeding test re-runs without '
- 'requiring knowledge of Trade Federation test harness command line'
- ' options.')
-
-# Constants used for arg help message(sorted in alphabetic)
-ALL_ABI = 'Set to run tests for all abis.'
-BUILD = 'Run a build.'
-CLEAR_CACHE = 'Wipe out the test_infos cache of the test.'
-COLLECT_TESTS_ONLY = ('Collect a list test cases of the instrumentation tests '
- 'without testing them in real.')
-DISABLE_TEARDOWN = 'Disable test teardown and cleanup.'
-DRY_RUN = 'Dry run atest without building, installing and running tests in real.'
-ENABLE_FILE_PATTERNS = 'Enable FILE_PATTERNS in TEST_MAPPING.'
-HISTORY = ('Show test results in chronological order(with specified number or '
- 'all by default).')
-HOST = ('Run the test completely on the host without a device. '
- '(Note: running a host test that requires a device without '
- '--host will fail.)')
-INCLUDE_SUBDIRS = 'Search TEST_MAPPING files in subdirs as well.'
-INFO = 'Show module information.'
-INSTALL = 'Install an APK.'
-INSTANT = ('Run the instant_app version of the module if the module supports it. '
- 'Note: Nothing\'s going to run if it\'s not an Instant App test and '
- '"--instant" is passed.')
-ITERATION = 'Loop-run tests until the max iteration is reached. (10 by default)'
-LATEST_RESULT = 'Print latest test result.'
-LIST_MODULES = 'List testable modules for the given suite.'
-REBUILD_MODULE_INFO = ('Forces a rebuild of the module-info.json file. '
- 'This may be necessary following a repo sync or '
- 'when writing a new test.')
-RERUN_UNTIL_FAILURE = ('Rerun all tests until a failure occurs or the max '
- 'iteration is reached. (10 by default)')
-RETRY_ANY_FAILURE = ('Rerun failed tests until passed or the max iteration '
- 'is reached. (10 by default)')
-SERIAL = 'The device to run the test on.'
-SHARDING = 'Option to specify sharding count. The default value is 2'
-TEST = ('Run the tests. WARNING: Many test configs force cleanup of device '
- 'after test run. In this case, "-d" must be used in previous test run to '
- 'disable cleanup for "-t" to work. Otherwise, device will need to be '
- 'setup again with "-i".')
-TEST_MAPPING = 'Run tests defined in TEST_MAPPING files.'
-TF_TEMPLATE = ('Add extra tradefed template for ATest suite, '
- 'e.g. atest <test> --tf-template <template_key>=<template_path>')
-TF_DEBUG = 'Enable tradefed debug mode with a specify port. Default value is 10888.'
-UPDATE_CMD_MAPPING = ('Update the test command of input tests. Warning: result '
- 'will be saved under tools/tradefederation/core/atest/test_data.')
-USER_TYPE = 'Run test with specific user type, e.g. atest <test> --user-type secondary_user'
-VERBOSE = 'Display DEBUG level logging.'
-VERIFY_CMD_MAPPING = 'Verify the test command of input tests.'
-VERSION = 'Display version string.'
-WAIT_FOR_DEBUGGER = 'Wait for debugger prior to execution (Instrumentation tests only).'
-
-def _positive_int(value):
- """Verify value by whether or not a positive integer.
-
- Args:
- value: A string of a command-line argument.
-
- Returns:
- int of value, if it is an positive integer.
- Otherwise, raise argparse.ArgumentTypeError.
- """
- err_msg = "invalid positive int value: '%s'" % value
- try:
- converted_value = int(value)
- if converted_value < 1:
- raise argparse.ArgumentTypeError(err_msg)
- return converted_value
- except ValueError:
- raise argparse.ArgumentTypeError(err_msg)
-
-class AtestArgParser(argparse.ArgumentParser):
- """Atest wrapper of ArgumentParser."""
-
- def __init__(self):
- """Initialise an ArgumentParser instance."""
- atest_utils.print_data_collection_notice()
- super(AtestArgParser, self).__init__(
- description=HELP_DESC, add_help=False)
-
- def add_atest_args(self):
- """A function that does ArgumentParser.add_argument()"""
- self.add_argument('tests', nargs='*', help='Tests to build and/or run.')
- # Options that to do with testing.
- self.add_argument('-a', '--all-abi', action='store_true', help=ALL_ABI)
- self.add_argument('-b', '--build', action='append_const', dest='steps',
- const=constants.BUILD_STEP, help=BUILD)
- self.add_argument('-d', '--disable-teardown', action='store_true',
- help=DISABLE_TEARDOWN)
- self.add_argument('--host', action='store_true', help=HOST)
- self.add_argument('-i', '--install', action='append_const',
- dest='steps', const=constants.INSTALL_STEP,
- help=INSTALL)
- self.add_argument('-m', constants.REBUILD_MODULE_INFO_FLAG,
- action='store_true', help=REBUILD_MODULE_INFO)
- self.add_argument('-s', '--serial', help=SERIAL)
- self.add_argument('--sharding', nargs='?', const=2,
- type=_positive_int, default=0,
- help=SHARDING)
- self.add_argument('-t', '--test', action='append_const', dest='steps',
- const=constants.TEST_STEP, help=TEST)
- self.add_argument('-w', '--wait-for-debugger', action='store_true',
- help=WAIT_FOR_DEBUGGER)
-
- # Options related to Test Mapping
- self.add_argument('-p', '--test-mapping', action='store_true',
- help=TEST_MAPPING)
- self.add_argument('--include-subdirs', action='store_true',
- help=INCLUDE_SUBDIRS)
- # TODO(146980564): Remove enable-file-patterns when support
- # file-patterns in TEST_MAPPING by default.
- self.add_argument('--enable-file-patterns', action='store_true',
- help=ENABLE_FILE_PATTERNS)
-
- # Options for information queries and dry-runs:
- # A group of options for dry-runs. They are mutually exclusive
- # in a command line.
- group = self.add_mutually_exclusive_group()
- group.add_argument('--collect-tests-only', action='store_true',
- help=COLLECT_TESTS_ONLY)
- group.add_argument('--dry-run', action='store_true', help=DRY_RUN)
- self.add_argument('-h', '--help', action='store_true',
- help='Print this help message.')
- self.add_argument('--info', action='store_true', help=INFO)
- self.add_argument('-L', '--list-modules', help=LIST_MODULES)
- self.add_argument('-v', '--verbose', action='store_true', help=VERBOSE)
- self.add_argument('-V', '--version', action='store_true', help=VERSION)
-
- # Obsolete options that will be removed soon.
- self.add_argument('--generate-baseline', nargs='?',
- type=int, const=5, default=0,
- help='Generate baseline metrics, run 5 iterations by'
- 'default. Provide an int argument to specify '
- '# iterations.')
- self.add_argument('--generate-new-metrics', nargs='?',
- type=int, const=5, default=0,
- help='Generate new metrics, run 5 iterations by '
- 'default. Provide an int argument to specify '
- '# iterations.')
- self.add_argument('--detect-regression', nargs='*',
- help='Run regression detection algorithm. Supply '
- 'path to baseline and/or new metrics folders.')
-
- # Options related to module parameterization
- self.add_argument('--instant', action='store_true', help=INSTANT)
- self.add_argument('--user-type', help=USER_TYPE)
-
- # Option for dry-run command mapping result and cleaning cache.
- self.add_argument('-c', '--clear-cache', action='store_true',
- help=CLEAR_CACHE)
- self.add_argument('-u', '--update-cmd-mapping', action='store_true',
- help=UPDATE_CMD_MAPPING)
- self.add_argument('-y', '--verify-cmd-mapping', action='store_true',
- help=VERIFY_CMD_MAPPING)
-
- # Options for Tradefed debug mode.
- self.add_argument('-D', '--tf-debug', nargs='?', const=10888,
- type=_positive_int, default=0,
- help=TF_DEBUG)
-
- # Options for Tradefed customization related.
- self.add_argument('--tf-template', action='append',
- help=TF_TEMPLATE)
-
- # A group of options for rerun strategy. They are mutually exclusive
- # in a command line.
- group = self.add_mutually_exclusive_group()
- # Option for rerun tests for the specified number iterations.
- group.add_argument('--iterations', nargs='?',
- type=_positive_int, const=10, default=0,
- metavar='MAX_ITERATIONS', help=ITERATION)
- group.add_argument('--rerun-until-failure', nargs='?',
- type=_positive_int, const=10, default=0,
- metavar='MAX_ITERATIONS', help=RERUN_UNTIL_FAILURE)
- group.add_argument('--retry-any-failure', nargs='?',
- type=_positive_int, const=10, default=0,
- metavar='MAX_ITERATIONS', help=RETRY_ANY_FAILURE)
-
- # A group of options for history. They are mutually exclusive
- # in a command line.
- history_group = self.add_mutually_exclusive_group()
- # History related options.
- history_group.add_argument('--latest-result', action='store_true',
- help=LATEST_RESULT)
- history_group.add_argument('--history', nargs='?', const='99999',
- help=HISTORY)
-
- # This arg actually doesn't consume anything, it's primarily used for
- # the help description and creating custom_args in the NameSpace object.
- self.add_argument('--', dest='custom_args', nargs='*',
- help='Specify custom args for the test runners. '
- 'Everything after -- will be consumed as '
- 'custom args.')
-
- def get_args(self):
- """This method is to get args from actions and return optional args.
-
- Returns:
- A list of optional arguments.
- """
- argument_list = []
- # The output of _get_optional_actions(): [['-t', '--test'], [--info]]
- # return an argument list: ['-t', '--test', '--info']
- for arg in self._get_optional_actions():
- argument_list.extend(arg.option_strings)
- return argument_list
-
-
-def print_epilog_text():
- """Pagination print EPILOG_TEXT.
-
- Returns:
- STDOUT from pydoc.pager().
- """
- epilog_text = EPILOG_TEMPLATE.format(ALL_ABI=ALL_ABI,
- BUILD=BUILD,
- CLEAR_CACHE=CLEAR_CACHE,
- COLLECT_TESTS_ONLY=COLLECT_TESTS_ONLY,
- DISABLE_TEARDOWN=DISABLE_TEARDOWN,
- DRY_RUN=DRY_RUN,
- ENABLE_FILE_PATTERNS=ENABLE_FILE_PATTERNS,
- HELP_DESC=HELP_DESC,
- HISTORY=HISTORY,
- HOST=HOST,
- INCLUDE_SUBDIRS=INCLUDE_SUBDIRS,
- INFO=INFO,
- INSTALL=INSTALL,
- INSTANT=INSTANT,
- ITERATION=ITERATION,
- LATEST_RESULT=LATEST_RESULT,
- LIST_MODULES=LIST_MODULES,
- REBUILD_MODULE_INFO=REBUILD_MODULE_INFO,
- RERUN_UNTIL_FAILURE=RERUN_UNTIL_FAILURE,
- RETRY_ANY_FAILURE=RETRY_ANY_FAILURE,
- SERIAL=SERIAL,
- SHARDING=SHARDING,
- TEST=TEST,
- TEST_MAPPING=TEST_MAPPING,
- TF_DEBUG=TF_DEBUG,
- TF_TEMPLATE=TF_TEMPLATE,
- USER_TYPE=USER_TYPE,
- UPDATE_CMD_MAPPING=UPDATE_CMD_MAPPING,
- VERBOSE=VERBOSE,
- VERSION=VERSION,
- VERIFY_CMD_MAPPING=VERIFY_CMD_MAPPING,
- WAIT_FOR_DEBUGGER=WAIT_FOR_DEBUGGER)
- return pydoc.pager(epilog_text)
-
-
-EPILOG_TEMPLATE = r'''ATEST(1) ASuite/ATest
-
-NAME
- atest - {HELP_DESC}
-
-
-SYNOPSIS
- atest [OPTION]... [TEST_TARGET]... -- [CUSTOM_ARGS]...
-
-
-OPTIONS
- Below arguments are catagorised by features and purposes. Arguments marked with default will apply even the user does not pass it explicitly.
-
- [ Testing ]
- -a, --all-abi
- {ALL_ABI}
-
- -b, --build:
- {BUILD} (default)
-
- -d, --disable-teardown
- {DISABLE_TEARDOWN}
-
- -D --tf-debug
- {TF_DEBUG}
-
- --history
- {HISTORY}
-
- --host
- {HOST}
-
- -i, --install
- {INSTALL} (default)
-
- -m, --rebuild-module-info
- {REBUILD_MODULE_INFO} (default)
-
- -s, --serial
- {SERIAL}
-
- --sharding
- {SHARDING}
-
- -t, --test
- {TEST} (default)
-
- --tf-template
- {TF_TEMPLATE}
-
- -w, --wait-for-debugger
- {WAIT_FOR_DEBUGGER}
-
-
- [ Test Mapping ]
- -p, --test-mapping
- {TEST_MAPPING}
-
- --include-subdirs
- {INCLUDE_SUBDIRS}
-
- --enable-file-patterns
- {ENABLE_FILE_PATTERNS}
-
-
- [ Information/Queries ]
- --collect-tests-only
- {COLLECT_TESTS_ONLY}
-
- --info
- {INFO}
-
- -L, --list-modules
- {LIST_MODULES}
-
- --latest-result
- {LATEST_RESULT}
-
- -v, --verbose
- {VERBOSE}
-
- -V, --version
- {VERSION}
-
-
- [ Dry-Run and Caching ]
- --dry-run
- {DRY_RUN}
-
- -c, --clear-cache
- {CLEAR_CACHE}
-
- -u, --update-cmd-mapping
- {UPDATE_CMD_MAPPING}
-
- -y, --verify-cmd-mapping
- {VERIFY_CMD_MAPPING}
-
-
- [ Module Parameterization ]
- --instant
- {INSTANT}
-
- --user-type
- {USER_TYPE}
-
-
- [ Iteration Testing ]
- --iterations
- {ITERATION}
-
- --rerun-until-failure
- {RERUN_UNTIL_FAILURE}
-
- --retry-any-failure
- {RETRY_ANY_FAILURE}
-
-
-EXAMPLES
- - - - - - - - - -
- IDENTIFYING TESTS
- - - - - - - - - -
-
- The positional argument <tests> should be a reference to one or more of the tests you'd like to run. Multiple tests can be run in one command by separating test references with spaces.
-
- Usage template: atest <reference_to_test_1> <reference_to_test_2>
-
- A <reference_to_test> can be satisfied by the test's MODULE NAME, MODULE:CLASS, CLASS NAME, TF INTEGRATION TEST, FILE PATH or PACKAGE NAME. Explanations and examples of each follow.
-
-
- < MODULE NAME >
-
- Identifying a test by its module name will run the entire module. Input the name as it appears in the LOCAL_MODULE or LOCAL_PACKAGE_NAME variables in that test's Android.mk or Android.bp file.
-
- Note: Use < TF INTEGRATION TEST > to run non-module tests integrated directly into TradeFed.
-
- Examples:
- atest FrameworksServicesTests
- atest CtsJankDeviceTestCases
-
-
- < MODULE:CLASS >
-
- Identifying a test by its class name will run just the tests in that class and not the whole module. MODULE:CLASS is the preferred way to run a single class. MODULE is the same as described above. CLASS is the name of the test class in the .java file. It can either be the fully qualified class name or just the basic name.
-
- Examples:
- atest FrameworksServicesTests:ScreenDecorWindowTests
- atest FrameworksServicesTests:com.android.server.wm.ScreenDecorWindowTests
- atest CtsJankDeviceTestCases:CtsDeviceJankUi
-
-
- < CLASS NAME >
-
- A single class can also be run by referencing the class name without the module name.
-
- Examples:
- atest ScreenDecorWindowTests
- atest CtsDeviceJankUi
-
- However, this will take more time than the equivalent MODULE:CLASS reference, so we suggest using a MODULE:CLASS reference whenever possible. Examples below are ordered by performance from the fastest to the slowest:
-
- Examples:
- atest FrameworksServicesTests:com.android.server.wm.ScreenDecorWindowTests
- atest FrameworksServicesTests:ScreenDecorWindowTests
- atest ScreenDecorWindowTests
-
- < TF INTEGRATION TEST >
-
- To run tests that are integrated directly into TradeFed (non-modules), input the name as it appears in the output of the "tradefed.sh list configs" cmd.
-
- Examples:
- atest example/reboot
- atest native-benchmark
-
-
- < FILE PATH >
-
- Both module-based tests and integration-based tests can be run by inputting the path to their test file or dir as appropriate. A single class can also be run by inputting the path to the class's java file.
-
- Both relative and absolute paths are supported.
-
- Example - 2 ways to run the `CtsJankDeviceTestCases` module via path:
- 1. run module from android <repo root>:
- atest cts/tests/jank/jank
-
- 2. from <android root>/cts/tests/jank:
- atest .
-
- Example - run a specific class within CtsJankDeviceTestCases module from <android repo> root via path:
- atest cts/tests/jank/src/android/jank/cts/ui/CtsDeviceJankUi.java
-
- Example - run an integration test from <android repo> root via path:
- atest tools/tradefederation/contrib/res/config/example/reboot.xml
-
-
- < PACKAGE NAME >
-
- Atest supports searching tests from package name as well.
-
- Examples:
- atest com.android.server.wm
- atest android.jank.cts
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - -
- SPECIFYING INDIVIDUAL STEPS: BUILD, INSTALL OR RUN
- - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- The -b, -i and -t options allow you to specify which steps you want to run. If none of those options are given, then all steps are run. If any of these options are provided then only the listed steps are run.
-
- Note: -i alone is not currently support and can only be included with -t.
- Both -b and -t can be run alone.
-
- Examples:
- atest -b <test> (just build targets)
- atest -t <test> (run tests only)
- atest -it <test> (install apk and run tests)
- atest -bt <test> (build targets, run tests, but skip installing apk)
-
-
- Atest now has the ability to force a test to skip its cleanup/teardown step. Many tests, e.g. CTS, cleanup the device after the test is run, so trying to rerun your test with -t will fail without having the --disable-teardown parameter. Use -d before -t to skip the test clean up step and test iteratively.
-
- atest -d <test> (disable installing apk and cleanning up device)
- atest -t <test>
-
- Note that -t disables both setup/install and teardown/cleanup of the device. So you can continue to rerun your test with just
-
- atest -t <test>
-
- as many times as you want.
-
-
- - - - - - - - - - - - - -
- RUNNING SPECIFIC METHODS
- - - - - - - - - - - - - -
-
- It is possible to run only specific methods within a test class. To run only specific methods, identify the class in any of the ways supported for identifying a class (MODULE:CLASS, FILE PATH, etc) and then append the name of the method or method using the following template:
-
- <reference_to_class>#<method1>
-
- Multiple methods can be specified with commas:
-
- <reference_to_class>#<method1>,<method2>,<method3>...
-
- Examples:
- atest com.android.server.wm.ScreenDecorWindowTests#testMultipleDecors
-
- atest FrameworksServicesTests:ScreenDecorWindowTests#testFlagChange,testRemoval
-
-
- - - - - - - - - - - - - -
- RUNNING MULTIPLE CLASSES
- - - - - - - - - - - - - -
-
- To run multiple classes, deliminate them with spaces just like you would when running multiple tests. Atest will handle building and running classes in the most efficient way possible, so specifying a subset of classes in a module will improve performance over running the whole module.
-
-
- Examples:
- - two classes in same module:
- atest FrameworksServicesTests:ScreenDecorWindowTests FrameworksServicesTests:DimmerTests
-
- - two classes, different modules:
- atest FrameworksServicesTests:ScreenDecorWindowTests CtsJankDeviceTestCases:CtsDeviceJankUi
-
-
- - - - - - - - - - - -
- RUNNING NATIVE TESTS
- - - - - - - - - - - -
-
- Atest can run native test.
-
- Example:
- - Input tests:
- atest -a libinput_tests inputflinger_tests
-
- Use -a|--all-abi to run the tests for all available device architectures, which in this example is armeabi-v7a (ARM 32-bit) and arm64-v8a (ARM 64-bit).
-
- To select a specific native test to run, use colon (:) to specify the test name and hashtag (#) to further specify an individual method. For example, for the following test definition:
-
- TEST_F(InputDispatcherTest, InjectInputEvent_ValidatesKeyEvents)
-
- You can run the entire test using:
-
- atest inputflinger_tests:InputDispatcherTest
-
- or an individual test method using:
-
- atest inputflinger_tests:InputDispatcherTest#InjectInputEvent_ValidatesKeyEvents
-
-
- - - - - - - - - - - - - - -
- RUNNING TESTS IN ITERATION
- - - - - - - - - - - - - - -
-
- To run tests in iterations, simply pass --iterations argument. No matter pass or fail, atest won't stop testing until the max iteration is reached.
-
- Example:
- atest <test> --iterations # 10 iterations(by default).
- atest <test> --iterations 5 # run <test> 5 times.
-
- Two approaches that assist users to detect flaky tests:
-
- 1) Run all tests until a failure occurs or the max iteration is reached.
-
- Example:
- - 10 iterations(by default).
- atest <test> --rerun-until-failure
- - stop when failed or reached the 20th run.
- atest <test> --rerun-until-failure 20
-
- 2) Run failed tests until passed or the max iteration is reached.
-
- Example:
- - 10 iterations(by default).
- atest <test> --retry-any-failure
- - stop when passed or reached the 20th run.
- atest <test> --retry-any-failure 20
-
-
- - - - - - - - - - - - - - - - -
- REGRESSION DETECTION (obsolute)
- - - - - - - - - - - - - - - - -
-
- ********************** Warning **********************
- Please STOP using arguments below -- they are obsolete and will be removed in a near future:
- --detect-regression
- --generate-baseline
- --generate-new-metrics
-
- Please check RUNNING TESTS IN ITERATION out for alternatives.
- ******************************************************
-
- Generate pre-patch or post-patch metrics without running regression detection:
-
- Example:
- atest <test> --generate-baseline <optional iter>
- atest <test> --generate-new-metrics <optional iter>
-
- Local regression detection can be run in three options:
-
- 1) Provide a folder containing baseline (pre-patch) metrics (generated previously). Atest will run the tests n (default 5) iterations, generate a new set of post-patch metrics, and compare those against existing metrics.
-
- Example:
- atest <test> --detect-regression </path/to/baseline> --generate-new-metrics <optional iter>
-
- 2) Provide a folder containing post-patch metrics (generated previously). Atest will run the tests n (default 5) iterations, generate a new set of pre-patch metrics, and compare those against those provided. Note: the developer needs to revert the device/tests to pre-patch state to generate baseline metrics.
-
- Example:
- atest <test> --detect-regression </path/to/new> --generate-baseline <optional iter>
-
- 3) Provide 2 folders containing both pre-patch and post-patch metrics. Atest will run no tests but the regression detection algorithm.
-
- Example:
- atest --detect-regression </path/to/baseline> </path/to/new>
-
-
- - - - - - - - - - - - -
- TESTS IN TEST MAPPING
- - - - - - - - - - - - -
-
- Atest can run tests in TEST_MAPPING files:
-
- 1) Run presubmit tests in TEST_MAPPING files in current and parent
- directories. You can also specify a target directory.
-
- Example:
- atest (run presubmit tests in TEST_MAPPING files in current and parent directories)
- atest --test-mapping </path/to/project>
- (run presubmit tests in TEST_MAPPING files in </path/to/project> and its parent directories)
-
- 2) Run a specified test group in TEST_MAPPING files.
-
- Example:
- atest :postsubmit
- (run postsubmit tests in TEST_MAPPING files in current and parent directories)
- atest :all
- (Run tests from all groups in TEST_MAPPING files)
- atest --test-mapping </path/to/project>:postsubmit
- (run postsubmit tests in TEST_MAPPING files in </path/to/project> and its parent directories)
-
- 3) Run tests in TEST_MAPPING files including sub directories
-
- By default, atest will only search for tests in TEST_MAPPING files in current (or given directory) and its parent directories. If you want to run tests in TEST_MAPPING files in the sub-directories, you can use option --include-subdirs to force atest to include those tests too.
-
- Example:
- atest --include-subdirs [optional </path/to/project>:<test_group_name>]
- (run presubmit tests in TEST_MAPPING files in current, sub and parent directories)
- A path can be provided optionally if you want to search for tests in a given directory, with optional test group name. By default, the test group is presubmit.
-
-
- - - - - - - - - - - - - - -
- ADDITIONAL ARGS TO TRADEFED
- - - - - - - - - - - - - - -
-
- When trying to pass custom arguments for the test runners, everything after '--'
- will be consumed as custom args.
-
- Example:
- atest -v <test> -- <custom_args1> <custom_args2>
-
-
- 2019-12-19
-'''
diff --git a/atest-py2/atest_arg_parser_unittest.py b/atest-py2/atest_arg_parser_unittest.py
deleted file mode 100755
index fd4c321..0000000
--- a/atest-py2/atest_arg_parser_unittest.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for atest_arg_parser."""
-
-import unittest
-
-import atest_arg_parser
-
-
-class AtestArgParserUnittests(unittest.TestCase):
- """Unit tests for atest_arg_parser.py"""
-
- def test_get_args(self):
- """Test get_args(): flatten a nested list. """
- parser = atest_arg_parser.AtestArgParser()
- parser.add_argument('-t', '--test', help='Run the tests.')
- parser.add_argument('-b', '--build', help='Run a build.')
- parser.add_argument('--generate-baseline', help='Generate a baseline.')
- test_args = ['-t', '--test',
- '-b', '--build',
- '--generate-baseline',
- '-h', '--help'].sort()
- self.assertEqual(test_args, parser.get_args().sort())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/atest_completion.sh b/atest-py2/atest_completion.sh
deleted file mode 100644
index 3ac8e0d..0000000
--- a/atest-py2/atest_completion.sh
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ATEST_REL_DIR="tools/tradefederation/core/atest"
-
-_fetch_testable_modules() {
- [[ -z $ANDROID_BUILD_TOP ]] && return 0
- export ATEST_DIR="$ANDROID_BUILD_TOP/$ATEST_REL_DIR"
- $PYTHON - << END
-import os
-import pickle
-import sys
-
-sys.path.append(os.getenv('ATEST_DIR'))
-import constants
-
-if os.path.isfile(constants.MODULE_INDEX):
- with open(constants.MODULE_INDEX, 'rb') as cache:
- try:
- print("\n".join(pickle.load(cache, encoding="utf-8")))
- except:
- print("\n".join(pickle.load(cache)))
-else:
- print("")
-END
- unset ATEST_DIR
-}
-
-# This function invoke get_args() and return each item
-# of the list for tab completion candidates.
-_fetch_atest_args() {
- [[ -z $ANDROID_BUILD_TOP ]] && return 0
- export ATEST_DIR="$ANDROID_BUILD_TOP/$ATEST_REL_DIR"
- $PYTHON - << END
-import os
-import sys
-
-atest_dir = os.path.join(os.getenv('ATEST_DIR'))
-sys.path.append(atest_dir)
-
-import atest_arg_parser
-
-parser = atest_arg_parser.AtestArgParser()
-parser.add_atest_args()
-print("\n".join(parser.get_args()))
-END
- unset ATEST_DIR
-}
-
-# This function returns devices recognised by adb.
-_fetch_adb_devices() {
- while read dev; do echo $dev | awk '{print $1}'; done < <(adb devices | egrep -v "^List|^$"||true)
-}
-
-# This function returns all paths contain TEST_MAPPING.
-_fetch_test_mapping_files() {
- [[ -z $ANDROID_BUILD_TOP ]] && return 0
- find -maxdepth 5 -type f -name TEST_MAPPING |sed 's/^.\///g'| xargs dirname 2>/dev/null
-}
-
-# The main tab completion function.
-_atest() {
- # Not support completion on Darwin since the bash version of it
- # is too old to fully support useful built-in commands/functions
- # such as compopt, _get_comp_words_by_ref and __ltrim_colon_completions.
- [[ "$(uname -s)" == "Darwin" ]] && return 0
-
- local cur prev
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- _get_comp_words_by_ref -n : cur prev || true
-
- case "$cur" in
- -*)
- COMPREPLY=($(compgen -W "$(_fetch_atest_args)" -- $cur))
- ;;
- */*)
- ;;
- *)
- local candidate_args=$(ls; _fetch_testable_modules)
- COMPREPLY=($(compgen -W "$candidate_args" -- $cur))
- ;;
- esac
-
- case "$prev" in
- --iterations|--rerun-until-failure|--retry-any-failure)
- COMPREPLY=(10) ;;
- --list-modules|-L)
- # TODO: genetate the list automately when the API is available.
- COMPREPLY=($(compgen -W "cts vts" -- $cur)) ;;
- --serial|-s)
- local adb_devices="$(_fetch_adb_devices)"
- if [ -n "$adb_devices" ]; then
- COMPREPLY=($(compgen -W "$(_fetch_adb_devices)" -- $cur))
- else
- # Don't complete files/dirs when there'is no devices.
- compopt -o nospace
- COMPREPLY=("")
- fi ;;
- --test-mapping|-p)
- local mapping_files="$(_fetch_test_mapping_files)"
- if [ -n "$mapping_files" ]; then
- COMPREPLY=($(compgen -W "$mapping_files" -- $cur))
- else
- # Don't complete files/dirs when TEST_MAPPING wasn't found.
- compopt -o nospace
- COMPREPLY=("")
- fi ;;
- esac
- __ltrim_colon_completions "$cur" "$prev" || true
- return 0
-}
-
-function _atest_main() {
- # Only use this in interactive mode.
- # Warning: below check must be "return", not "exit". "exit" won't break the
- # build in interactive shell(e.g VM), but will result in build breakage in
- # non-interactive shell(e.g docker container); therefore, using "return"
- # adapts both conditions.
- [[ ! $- =~ 'i' ]] && return 0
-
- # Use Py2 as the default interpreter. This script is aiming for being
- # compatible with both Py2 and Py3.
- if [ -x "$(which python)" ]; then
- PYTHON=$(which python)
- elif [ -x "$(which python3)" ]; then
- PYTHON=$(which python3)
- else
- PYTHON="/usr/bin/env python"
- fi
-
- # Complete file/dir name first by using option "nosort".
- # BASH version <= 4.3 doesn't have nosort option.
- # Note that nosort has no effect for zsh.
- local _atest_comp_options="-o default -o nosort"
- local _atest_executables=(atest atest-dev atest-src)
- for exec in "${_atest_executables[*]}"; do
- complete -F _atest $_atest_comp_options $exec 2>/dev/null || \
- complete -F _atest -o default $exec
- done
-
- # Install atest-src for the convenience of debugging.
- local atest_src="$(gettop)/$ATEST_REL_DIR/atest.py"
- [[ -f "$atest_src" ]] && alias atest-src="$atest_src"
-}
-
-_atest_main
diff --git a/atest-py2/atest_decorator.py b/atest-py2/atest_decorator.py
deleted file mode 100644
index 6f171df..0000000
--- a/atest-py2/atest_decorator.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-ATest decorator.
-"""
-
-def static_var(varname, value):
- """Decorator to cache static variable.
-
- Args:
- varname: Variable name you want to use.
- value: Variable value.
-
- Returns: decorator function.
- """
-
- def fun_var_decorate(func):
- """Set the static variable in a function."""
- setattr(func, varname, value)
- return func
- return fun_var_decorate
diff --git a/atest-py2/atest_enum.py b/atest-py2/atest_enum.py
deleted file mode 100644
index f4fb656..0000000
--- a/atest-py2/atest_enum.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Atest custom enum class.
-"""
-
-class AtestEnum(tuple):
- """enum library isn't a Python 2.7 built-in, so roll our own."""
- __getattr__ = tuple.index
diff --git a/atest-py2/atest_error.py b/atest-py2/atest_error.py
deleted file mode 100644
index 7ab8b5f..0000000
--- a/atest-py2/atest_error.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-atest exceptions.
-"""
-
-
-class UnsupportedModuleTestError(Exception):
- """Error raised when we find a module that we don't support."""
-
-class TestDiscoveryException(Exception):
- """Base Exception for issues with test discovery."""
-
-class NoTestFoundError(TestDiscoveryException):
- """Raised when no tests are found."""
-
-class TestWithNoModuleError(TestDiscoveryException):
- """Raised when test files have no parent module directory."""
-
-class MissingPackageNameError(TestDiscoveryException):
- """Raised when the test class java file does not contain a package name."""
-
-class TooManyMethodsError(TestDiscoveryException):
- """Raised when input string contains more than one # character."""
-
-class MethodWithoutClassError(TestDiscoveryException):
- """Raised when method is appended via # but no class file specified."""
-
-class UnknownTestRunnerError(Exception):
- """Raised when an unknown test runner is specified."""
-
-class NoTestRunnerName(Exception):
- """Raised when Test Runner class var NAME isn't defined."""
-
-class NoTestRunnerExecutable(Exception):
- """Raised when Test Runner class var EXECUTABLE isn't defined."""
-
-class HostEnvCheckFailed(Exception):
- """Raised when Test Runner's host env check fails."""
-
-class ShouldNeverBeCalledError(Exception):
- """Raised when something is called when it shouldn't, used for testing."""
-
-class FatalIncludeError(TestDiscoveryException):
- """Raised if expanding include tag fails."""
-
-class MissingCCTestCaseError(TestDiscoveryException):
- """Raised when the cc file does not contain a test case class."""
-
-class XmlNotExistError(TestDiscoveryException):
- """Raised when the xml file does not exist."""
-
-class DryRunVerificationError(Exception):
- """Base Exception if verification fail."""
diff --git a/atest-py2/atest_execution_info.py b/atest-py2/atest_execution_info.py
deleted file mode 100644
index 0c67e19..0000000
--- a/atest-py2/atest_execution_info.py
+++ /dev/null
@@ -1,329 +0,0 @@
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-ATest execution info generator.
-"""
-
-from __future__ import print_function
-
-import glob
-import logging
-import json
-import os
-import sys
-
-import atest_utils as au
-import constants
-
-from metrics import metrics_utils
-
-_ARGS_KEY = 'args'
-_STATUS_PASSED_KEY = 'PASSED'
-_STATUS_FAILED_KEY = 'FAILED'
-_STATUS_IGNORED_KEY = 'IGNORED'
-_SUMMARY_KEY = 'summary'
-_TOTAL_SUMMARY_KEY = 'total_summary'
-_TEST_RUNNER_KEY = 'test_runner'
-_TEST_NAME_KEY = 'test_name'
-_TEST_TIME_KEY = 'test_time'
-_TEST_DETAILS_KEY = 'details'
-_TEST_RESULT_NAME = 'test_result'
-_EXIT_CODE_ATTR = 'EXIT_CODE'
-_MAIN_MODULE_KEY = '__main__'
-_UUID_LEN = 30
-_RESULT_LEN = 35
-_COMMAND_LEN = 50
-_LOGCAT_FMT = '{}/log/invocation_*/{}*logcat-on-failure*'
-
-_SUMMARY_MAP_TEMPLATE = {_STATUS_PASSED_KEY : 0,
- _STATUS_FAILED_KEY : 0,
- _STATUS_IGNORED_KEY : 0,}
-
-PREPARE_END_TIME = None
-
-
-def preparation_time(start_time):
- """Return the preparation time.
-
- Args:
- start_time: The time.
-
- Returns:
- The preparation time if PREPARE_END_TIME is set, None otherwise.
- """
- return PREPARE_END_TIME - start_time if PREPARE_END_TIME else None
-
-
-def symlink_latest_result(test_result_dir):
- """Make the symbolic link to latest result.
-
- Args:
- test_result_dir: A string of the dir path.
- """
- symlink = os.path.join(constants.ATEST_RESULT_ROOT, 'LATEST')
- if os.path.exists(symlink) or os.path.islink(symlink):
- os.remove(symlink)
- os.symlink(test_result_dir, symlink)
-
-
-def print_test_result(root, history_arg):
- """Make a list of latest n test result.
-
- Args:
- root: A string of the test result root path.
- history_arg: A string of an integer or uuid. If it's an integer string,
- the number of lines of test result will be given; else it
- will be treated a uuid and print test result accordingly
- in detail.
- """
- if not history_arg.isdigit():
- path = os.path.join(constants.ATEST_RESULT_ROOT, history_arg,
- 'test_result')
- print_test_result_by_path(path)
- return
- target = '%s/20*_*_*' % root
- paths = glob.glob(target)
- paths.sort(reverse=True)
- print('{:-^{uuid_len}} {:-^{result_len}} {:-^{command_len}}'
- .format('uuid', 'result', 'command',
- uuid_len=_UUID_LEN,
- result_len=_RESULT_LEN,
- command_len=_COMMAND_LEN))
- for path in paths[0: int(history_arg)+1]:
- result_path = os.path.join(path, 'test_result')
- if os.path.isfile(result_path):
- try:
- with open(result_path) as json_file:
- result = json.load(json_file)
- total_summary = result.get(_TOTAL_SUMMARY_KEY, {})
- summary_str = ', '.join([k+':'+str(v)
- for k, v in total_summary.items()])
- print('{:<{uuid_len}} {:<{result_len}} {:<{command_len}}'
- .format(os.path.basename(path),
- summary_str,
- 'atest '+result.get(_ARGS_KEY, ''),
- uuid_len=_UUID_LEN,
- result_len=_RESULT_LEN,
- command_len=_COMMAND_LEN))
- except ValueError:
- pass
-
-
-def print_test_result_by_path(path):
- """Print latest test result.
-
- Args:
- path: A string of test result path.
- """
- if os.path.isfile(path):
- with open(path) as json_file:
- result = json.load(json_file)
- print("\natest {}".format(result.get(_ARGS_KEY, '')))
- print('\nTotal Summary:\n--------------')
- total_summary = result.get(_TOTAL_SUMMARY_KEY, {})
- print(', '.join([(k+':'+str(v))
- for k, v in total_summary.items()]))
- fail_num = total_summary.get(_STATUS_FAILED_KEY)
- if fail_num > 0:
- message = '%d test failed' % fail_num
- print('\n')
- print(au.colorize(message, constants.RED))
- print('-' * len(message))
- test_runner = result.get(_TEST_RUNNER_KEY, {})
- for runner_name in test_runner.keys():
- test_dict = test_runner.get(runner_name, {})
- for test_name in test_dict:
- test_details = test_dict.get(test_name, {})
- for fail in test_details.get(_STATUS_FAILED_KEY):
- print(au.colorize('{}'.format(
- fail.get(_TEST_NAME_KEY)), constants.RED))
- failure_files = glob.glob(_LOGCAT_FMT.format(
- os.path.dirname(path), fail.get(_TEST_NAME_KEY)
- ))
- if failure_files:
- print('{} {}'.format(
- au.colorize('LOGCAT-ON-FAILURES:',
- constants.CYAN),
- failure_files[0]))
- print('{} {}'.format(
- au.colorize('STACKTRACE:\n', constants.CYAN),
- fail.get(_TEST_DETAILS_KEY)))
-
-
-def has_non_test_options(args):
- """
- check whether non-test option in the args.
-
- Args:
- args: An argspace.Namespace class instance holding parsed args.
-
- Returns:
- True, if args has at least one non-test option.
- False, otherwise.
- """
- return (args.collect_tests_only
- or args.dry_run
- or args.help
- or args.history
- or args.info
- or args.version
- or args.latest_result)
-
-
-class AtestExecutionInfo(object):
- """Class that stores the whole test progress information in JSON format.
-
- ----
- For example, running command
- atest hello_world_test HelloWorldTest
-
- will result in storing the execution detail in JSON:
- {
- "args": "hello_world_test HelloWorldTest",
- "test_runner": {
- "AtestTradefedTestRunner": {
- "hello_world_test": {
- "FAILED": [
- {"test_time": "(5ms)",
- "details": "Hello, Wor...",
- "test_name": "HelloWorldTest#PrintHelloWorld"}
- ],
- "summary": {"FAILED": 1, "PASSED": 0, "IGNORED": 0}
- },
- "HelloWorldTests": {
- "PASSED": [
- {"test_time": "(27ms)",
- "details": null,
- "test_name": "...HelloWorldTest#testHalloWelt"},
- {"test_time": "(1ms)",
- "details": null,
- "test_name": "....HelloWorldTest#testHelloWorld"}
- ],
- "summary": {"FAILED": 0, "PASSED": 2, "IGNORED": 0}
- }
- }
- },
- "total_summary": {"FAILED": 1, "PASSED": 2, "IGNORED": 0}
- }
- """
-
- result_reporters = []
-
- def __init__(self, args, work_dir, args_ns):
- """Initialise an AtestExecutionInfo instance.
-
- Args:
- args: Command line parameters.
- work_dir: The directory for saving information.
- args_ns: An argspace.Namespace class instance holding parsed args.
-
- Returns:
- A json format string.
- """
- self.args = args
- self.work_dir = work_dir
- self.result_file = None
- self.args_ns = args_ns
-
- def __enter__(self):
- """Create and return information file object."""
- full_file_name = os.path.join(self.work_dir, _TEST_RESULT_NAME)
- try:
- self.result_file = open(full_file_name, 'w')
- except IOError:
- logging.error('Cannot open file %s', full_file_name)
- return self.result_file
-
- def __exit__(self, exit_type, value, traceback):
- """Write execution information and close information file."""
- if self.result_file:
- self.result_file.write(AtestExecutionInfo.
- _generate_execution_detail(self.args))
- self.result_file.close()
- if not has_non_test_options(self.args_ns):
- symlink_latest_result(self.work_dir)
- main_module = sys.modules.get(_MAIN_MODULE_KEY)
- main_exit_code = getattr(main_module, _EXIT_CODE_ATTR,
- constants.EXIT_CODE_ERROR)
- if main_exit_code == constants.EXIT_CODE_SUCCESS:
- metrics_utils.send_exit_event(main_exit_code)
- else:
- metrics_utils.handle_exc_and_send_exit_event(main_exit_code)
-
- @staticmethod
- def _generate_execution_detail(args):
- """Generate execution detail.
-
- Args:
- args: Command line parameters that you want to save.
-
- Returns:
- A json format string.
- """
- info_dict = {_ARGS_KEY: ' '.join(args)}
- try:
- AtestExecutionInfo._arrange_test_result(
- info_dict,
- AtestExecutionInfo.result_reporters)
- return json.dumps(info_dict)
- except ValueError as err:
- logging.warn('Parsing test result failed due to : %s', err)
-
- @staticmethod
- def _arrange_test_result(info_dict, reporters):
- """Append test result information in given dict.
-
- Arrange test information to below
- "test_runner": {
- "test runner name": {
- "test name": {
- "FAILED": [
- {"test time": "",
- "details": "",
- "test name": ""}
- ],
- "summary": {"FAILED": 0, "PASSED": 0, "IGNORED": 0}
- },
- },
- "total_summary": {"FAILED": 0, "PASSED": 0, "IGNORED": 0}
-
- Args:
- info_dict: A dict you want to add result information in.
- reporters: A list of result_reporter.
-
- Returns:
- A dict contains test result information data.
- """
- info_dict[_TEST_RUNNER_KEY] = {}
- for reporter in reporters:
- for test in reporter.all_test_results:
- runner = info_dict[_TEST_RUNNER_KEY].setdefault(test.runner_name, {})
- group = runner.setdefault(test.group_name, {})
- result_dict = {_TEST_NAME_KEY : test.test_name,
- _TEST_TIME_KEY : test.test_time,
- _TEST_DETAILS_KEY : test.details}
- group.setdefault(test.status, []).append(result_dict)
-
- total_test_group_summary = _SUMMARY_MAP_TEMPLATE.copy()
- for runner in info_dict[_TEST_RUNNER_KEY]:
- for group in info_dict[_TEST_RUNNER_KEY][runner]:
- group_summary = _SUMMARY_MAP_TEMPLATE.copy()
- for status in info_dict[_TEST_RUNNER_KEY][runner][group]:
- count = len(info_dict[_TEST_RUNNER_KEY][runner][group][status])
- if _SUMMARY_MAP_TEMPLATE.has_key(status):
- group_summary[status] = count
- total_test_group_summary[status] += count
- info_dict[_TEST_RUNNER_KEY][runner][group][_SUMMARY_KEY] = group_summary
- info_dict[_TOTAL_SUMMARY_KEY] = total_test_group_summary
- return info_dict
diff --git a/atest-py2/atest_execution_info_unittest.py b/atest-py2/atest_execution_info_unittest.py
deleted file mode 100755
index f638f82..0000000
--- a/atest-py2/atest_execution_info_unittest.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for atest_execution_info."""
-
-import time
-import unittest
-
-from test_runners import test_runner_base
-import atest_execution_info as aei
-import result_reporter
-
-RESULT_TEST_TEMPLATE = test_runner_base.TestResult(
- runner_name='someRunner',
- group_name='someModule',
- test_name='someClassName#sostName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-# pylint: disable=protected-access
-class AtestRunInfoUnittests(unittest.TestCase):
- """Unit tests for atest_execution_info.py"""
-
- def test_arrange_test_result_one_module(self):
- """Test _arrange_test_result method with only one module."""
- pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
- pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
- pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)
- fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)
- fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)
- ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)
- reporter_1 = result_reporter.ResultReporter()
- reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])
- reporter_2 = result_reporter.ResultReporter()
- reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])
- info_dict = {}
- aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
- expect_summary = {aei._STATUS_IGNORED_KEY : 1,
- aei._STATUS_FAILED_KEY : 2,
- aei._STATUS_PASSED_KEY : 3}
- self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
-
- def test_arrange_test_result_multi_module(self):
- """Test _arrange_test_result method with multi module."""
- group_a_pass_1 = self._create_test_result(group_name='grpup_a',
- status=test_runner_base.PASSED_STATUS)
- group_b_pass_1 = self._create_test_result(group_name='grpup_b',
- status=test_runner_base.PASSED_STATUS)
- group_c_pass_1 = self._create_test_result(group_name='grpup_c',
- status=test_runner_base.PASSED_STATUS)
- group_b_fail_1 = self._create_test_result(group_name='grpup_b',
- status=test_runner_base.FAILED_STATUS)
- group_c_fail_1 = self._create_test_result(group_name='grpup_c',
- status=test_runner_base.FAILED_STATUS)
- group_c_ignore_1 = self._create_test_result(group_name='grpup_c',
- status=test_runner_base.IGNORED_STATUS)
- reporter_1 = result_reporter.ResultReporter()
- reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])
- reporter_2 = result_reporter.ResultReporter()
- reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])
-
- info_dict = {}
- aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
- expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,
- aei._STATUS_FAILED_KEY : 0,
- aei._STATUS_PASSED_KEY : 1}
- self.assertEqual(
- expect_group_a_summary,
- info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])
-
- expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,
- aei._STATUS_FAILED_KEY : 1,
- aei._STATUS_PASSED_KEY : 1}
- self.assertEqual(
- expect_group_b_summary,
- info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])
-
- expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,
- aei._STATUS_FAILED_KEY : 1,
- aei._STATUS_PASSED_KEY : 1}
- self.assertEqual(
- expect_group_c_summary,
- info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])
-
- expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,
- aei._STATUS_FAILED_KEY : 2,
- aei._STATUS_PASSED_KEY : 3}
- self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
-
- def test_preparation_time(self):
- """Test preparation_time method."""
- start_time = time.time()
- aei.PREPARE_END_TIME = None
- self.assertTrue(aei.preparation_time(start_time) is None)
- aei.PREPARE_END_TIME = time.time()
- self.assertFalse(aei.preparation_time(start_time) is None)
-
- def test_arrange_test_result_multi_runner(self):
- """Test _arrange_test_result method with multi runner."""
- runner_a_pass_1 = self._create_test_result(runner_name='runner_a',
- status=test_runner_base.PASSED_STATUS)
- runner_a_pass_2 = self._create_test_result(runner_name='runner_a',
- status=test_runner_base.PASSED_STATUS)
- runner_a_pass_3 = self._create_test_result(runner_name='runner_a',
- status=test_runner_base.PASSED_STATUS)
- runner_b_fail_1 = self._create_test_result(runner_name='runner_b',
- status=test_runner_base.FAILED_STATUS)
- runner_b_fail_2 = self._create_test_result(runner_name='runner_b',
- status=test_runner_base.FAILED_STATUS)
- runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',
- status=test_runner_base.IGNORED_STATUS)
-
- reporter_1 = result_reporter.ResultReporter()
- reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])
- reporter_2 = result_reporter.ResultReporter()
- reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])
- info_dict = {}
- aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])
- expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,
- aei._STATUS_FAILED_KEY : 0,
- aei._STATUS_PASSED_KEY : 3}
- self.assertEqual(
- expect_group_a_summary,
- info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])
-
- expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,
- aei._STATUS_FAILED_KEY : 2,
- aei._STATUS_PASSED_KEY : 0}
- self.assertEqual(
- expect_group_b_summary,
- info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])
-
- expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,
- aei._STATUS_FAILED_KEY : 2,
- aei._STATUS_PASSED_KEY : 3}
- self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
-
- def _create_test_result(self, **kwargs):
- """A Helper to create TestResult"""
- test_info = test_runner_base.TestResult(**RESULT_TEST_TEMPLATE._asdict())
- return test_info._replace(**kwargs)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/atest-py2/atest_integration_tests.py b/atest-py2/atest_integration_tests.py
deleted file mode 100755
index 3287c1b..0000000
--- a/atest-py2/atest_integration_tests.py
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-ATest Integration Test Class.
-
-The purpose is to prevent potential side-effects from breaking ATest at the
-early stage while landing CLs with potential side-effects.
-
-It forks a subprocess with ATest commands to validate if it can pass all the
-finding, running logic of the python code, and waiting for TF to exit properly.
- - When running with ROBOLECTRIC tests, it runs without TF, and will exit
- the subprocess with the message "All tests passed"
- - If FAIL, it means something breaks ATest unexpectedly!
-"""
-
-from __future__ import print_function
-
-import os
-import subprocess
-import sys
-import tempfile
-import time
-import unittest
-
-_TEST_RUN_DIR_PREFIX = 'atest_integration_tests_%s_'
-_LOG_FILE = 'integration_tests.log'
-_FAILED_LINE_LIMIT = 50
-_INTEGRATION_TESTS = 'INTEGRATION_TESTS'
-_EXIT_TEST_FAILED = 1
-
-
-class ATestIntegrationTest(unittest.TestCase):
- """ATest Integration Test Class."""
- NAME = 'ATestIntegrationTest'
- EXECUTABLE = 'atest'
- OPTIONS = ''
- _RUN_CMD = '{exe} {options} {test}'
- _PASSED_CRITERIA = ['will be rescheduled', 'All tests passed']
-
- def setUp(self):
- """Set up stuff for testing."""
- self.full_env_vars = os.environ.copy()
- self.test_passed = False
- self.log = []
-
- def run_test(self, testcase):
- """Create a subprocess to execute the test command.
-
- Strategy:
- Fork a subprocess to wait for TF exit properly, and log the error
- if the exit code isn't 0.
-
- Args:
- testcase: A string of testcase name.
- """
- run_cmd_dict = {'exe': self.EXECUTABLE, 'options': self.OPTIONS,
- 'test': testcase}
- run_command = self._RUN_CMD.format(**run_cmd_dict)
- try:
- subprocess.check_output(run_command,
- stderr=subprocess.PIPE,
- env=self.full_env_vars,
- shell=True)
- except subprocess.CalledProcessError as e:
- self.log.append(e.output)
- return False
- return True
-
- def get_failed_log(self):
- """Get a trimmed failed log.
-
- Strategy:
- In order not to show the unnecessary log such as build log,
- it's better to get a trimmed failed log that contains the
- most important information.
-
- Returns:
- A trimmed failed log.
- """
- failed_log = '\n'.join(filter(None, self.log[-_FAILED_LINE_LIMIT:]))
- return failed_log
-
-
-def create_test_method(testcase, log_path):
- """Create a test method according to the testcase.
-
- Args:
- testcase: A testcase name.
- log_path: A file path for storing the test result.
-
- Returns:
- A created test method, and a test function name.
- """
- test_function_name = 'test_%s' % testcase.replace(' ', '_')
- # pylint: disable=missing-docstring
- def template_test_method(self):
- self.test_passed = self.run_test(testcase)
- open(log_path, 'a').write('\n'.join(self.log))
- failed_message = 'Running command: %s failed.\n' % testcase
- failed_message += '' if self.test_passed else self.get_failed_log()
- self.assertTrue(self.test_passed, failed_message)
- return test_function_name, template_test_method
-
-
-def create_test_run_dir():
- """Create the test run directory in tmp.
-
- Returns:
- A string of the directory path.
- """
- utc_epoch_time = int(time.time())
- prefix = _TEST_RUN_DIR_PREFIX % utc_epoch_time
- return tempfile.mkdtemp(prefix=prefix)
-
-
-if __name__ == '__main__':
- # TODO(b/129029189) Implement detail comparison check for dry-run mode.
- ARGS = ' '.join(sys.argv[1:])
- if ARGS:
- ATestIntegrationTest.OPTIONS = ARGS
- TEST_PLANS = os.path.join(os.path.dirname(__file__), _INTEGRATION_TESTS)
- try:
- LOG_PATH = os.path.join(create_test_run_dir(), _LOG_FILE)
- with open(TEST_PLANS) as test_plans:
- for test in test_plans:
- # Skip test when the line startswith #.
- if not test.strip() or test.strip().startswith('#'):
- continue
- test_func_name, test_func = create_test_method(
- test.strip(), LOG_PATH)
- setattr(ATestIntegrationTest, test_func_name, test_func)
- SUITE = unittest.TestLoader().loadTestsFromTestCase(ATestIntegrationTest)
- RESULTS = unittest.TextTestRunner(verbosity=2).run(SUITE)
- finally:
- if RESULTS.failures:
- print('Full test log is saved to %s' % LOG_PATH)
- sys.exit(_EXIT_TEST_FAILED)
- else:
- os.remove(LOG_PATH)
diff --git a/atest-py2/atest_integration_tests.xml b/atest-py2/atest_integration_tests.xml
deleted file mode 100644
index dd8ee82..0000000
--- a/atest-py2/atest_integration_tests.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2018 The Android Open Source Project
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration description="Config to run atest integration tests">
- <option name="test-suite-tag" value="atest_integration_tests" />
-
- <test class="com.android.tradefed.testtype.python.PythonBinaryHostTest" >
- <option name="par-file-name" value="atest_integration_tests" />
- <option name="test-timeout" value="120m" />
- </test>
-</configuration>
diff --git a/atest-py2/atest_run_unittests.py b/atest-py2/atest_run_unittests.py
deleted file mode 100755
index f23c59d..0000000
--- a/atest-py2/atest_run_unittests.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Main entrypoint for all of atest's unittest."""
-
-import logging
-import os
-import sys
-import unittest
-from importlib import import_module
-
-# Setup logging to be silent so unittests can pass through TF.
-logging.disable(logging.ERROR)
-
-def get_test_modules():
- """Returns a list of testable modules.
-
- Finds all the test files (*_unittest.py) and get their relative
- path (internal/lib/utils_test.py) and translate it to an import path and
- strip the py ext (internal.lib.utils_test).
-
- Returns:
- List of strings (the testable module import path).
- """
- testable_modules = []
- base_path = os.path.dirname(os.path.realpath(__file__))
-
- for dirpath, _, files in os.walk(base_path):
- for f in files:
- if f.endswith("_unittest.py"):
- # Now transform it into a relative import path.
- full_file_path = os.path.join(dirpath, f)
- rel_file_path = os.path.relpath(full_file_path, base_path)
- rel_file_path, _ = os.path.splitext(rel_file_path)
- rel_file_path = rel_file_path.replace(os.sep, ".")
- testable_modules.append(rel_file_path)
-
- return testable_modules
-
-def main(_):
- """Main unittest entry.
-
- Args:
- argv: A list of system arguments. (unused)
-
- Returns:
- 0 if success. None-zero if fails.
- """
- test_modules = get_test_modules()
- for mod in test_modules:
- import_module(mod)
-
- loader = unittest.defaultTestLoader
- test_suite = loader.loadTestsFromNames(test_modules)
- runner = unittest.TextTestRunner(verbosity=2)
- result = runner.run(test_suite)
- sys.exit(not result.wasSuccessful())
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/atest-py2/atest_unittest.py b/atest-py2/atest_unittest.py
deleted file mode 100755
index 84f640c..0000000
--- a/atest-py2/atest_unittest.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for atest."""
-
-import datetime
-import os
-import sys
-import tempfile
-import unittest
-import mock
-
-import atest
-import constants
-import module_info
-
-from metrics import metrics_utils
-from test_finders import test_info
-
-if sys.version_info[0] == 2:
- from StringIO import StringIO
-else:
- from io import StringIO
-
-#pylint: disable=protected-access
-class AtestUnittests(unittest.TestCase):
- """Unit tests for atest.py"""
-
- @mock.patch('os.environ.get', return_value=None)
- def test_missing_environment_variables_uninitialized(self, _):
- """Test _has_environment_variables when no env vars."""
- self.assertTrue(atest._missing_environment_variables())
-
- @mock.patch('os.environ.get', return_value='out/testcases/')
- def test_missing_environment_variables_initialized(self, _):
- """Test _has_environment_variables when env vars."""
- self.assertFalse(atest._missing_environment_variables())
-
- def test_parse_args(self):
- """Test _parse_args parses command line args."""
- test_one = 'test_name_one'
- test_two = 'test_name_two'
- custom_arg = '--custom_arg'
- custom_arg_val = 'custom_arg_val'
- pos_custom_arg = 'pos_custom_arg'
-
- # Test out test and custom args are properly retrieved.
- args = [test_one, test_two, '--', custom_arg, custom_arg_val]
- parsed_args = atest._parse_args(args)
- self.assertEqual(parsed_args.tests, [test_one, test_two])
- self.assertEqual(parsed_args.custom_args, [custom_arg, custom_arg_val])
-
- # Test out custom positional args with no test args.
- args = ['--', pos_custom_arg, custom_arg_val]
- parsed_args = atest._parse_args(args)
- self.assertEqual(parsed_args.tests, [])
- self.assertEqual(parsed_args.custom_args, [pos_custom_arg,
- custom_arg_val])
-
- def test_has_valid_test_mapping_args(self):
- """Test _has_valid_test_mapping_args mehod."""
- # Test test mapping related args are not mixed with incompatible args.
- options_no_tm_support = [
- ('--generate-baseline', '5'),
- ('--detect-regression', 'path'),
- ('--generate-new-metrics', '5')
- ]
- tm_options = [
- '--test-mapping',
- '--include-subdirs'
- ]
-
- for tm_option in tm_options:
- for no_tm_option, no_tm_option_value in options_no_tm_support:
- args = [tm_option, no_tm_option]
- if no_tm_option_value != None:
- args.append(no_tm_option_value)
- parsed_args = atest._parse_args(args)
- self.assertFalse(
- atest._has_valid_test_mapping_args(parsed_args),
- 'Failed to validate: %s' % args)
-
- @mock.patch('json.load', return_value={})
- @mock.patch('__builtin__.open', new_callable=mock.mock_open)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch('atest_utils._has_colors', return_value=True)
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
- def test_print_module_info_from_module_name(self, mock_get_module_info,
- _mock_has_colors, _isfile,
- _open, _json):
- """Test _print_module_info_from_module_name mehod."""
- mod_one_name = 'mod1'
- mod_one_path = ['src/path/mod1']
- mod_one_installed = ['installed/path/mod1']
- mod_one_suites = ['device_test_mod1', 'native_test_mod1']
- mod_one = {constants.MODULE_NAME: mod_one_name,
- constants.MODULE_PATH: mod_one_path,
- constants.MODULE_INSTALLED: mod_one_installed,
- constants.MODULE_COMPATIBILITY_SUITES: mod_one_suites}
-
- # Case 1: The testing_module('mod_one') can be found in module_info.
- mock_get_module_info.return_value = mod_one
- capture_output = StringIO()
- sys.stdout = capture_output
- mod_info = module_info.ModuleInfo()
- # Check return value = True, since 'mod_one' can be found.
- self.assertTrue(
- atest._print_module_info_from_module_name(mod_info, mod_one_name))
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tdevice_test_mod1\n'
- '\t\tnative_test_mod1\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tsrc/path/mod1\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/path/mod1\n')
- # Check the function correctly printed module_info in color to stdout
- self.assertEqual(capture_output.getvalue(), correct_output)
-
- # Case 2: The testing_module('mod_one') can NOT be found in module_info.
- mock_get_module_info.return_value = None
- capture_output = StringIO()
- sys.stdout = capture_output
- # Check return value = False, since 'mod_one' can NOT be found.
- self.assertFalse(
- atest._print_module_info_from_module_name(mod_info, mod_one_name))
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- null_output = ''
- # Check if no module_info, then nothing printed to screen.
- self.assertEqual(capture_output.getvalue(), null_output)
-
- @mock.patch('json.load', return_value={})
- @mock.patch('__builtin__.open', new_callable=mock.mock_open)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch('atest_utils._has_colors', return_value=True)
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
- def test_print_test_info(self, mock_get_module_info, _mock_has_colors,
- _isfile, _open, _json):
- """Test _print_test_info mehod."""
- mod_one_name = 'mod1'
- mod_one = {constants.MODULE_NAME: mod_one_name,
- constants.MODULE_PATH: ['path/mod1'],
- constants.MODULE_INSTALLED: ['installed/mod1'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod1']}
- mod_two_name = 'mod2'
- mod_two = {constants.MODULE_NAME: mod_two_name,
- constants.MODULE_PATH: ['path/mod2'],
- constants.MODULE_INSTALLED: ['installed/mod2'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod2']}
- mod_three_name = 'mod3'
- mod_three = {constants.MODULE_NAME: mod_two_name,
- constants.MODULE_PATH: ['path/mod3'],
- constants.MODULE_INSTALLED: ['installed/mod3'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod3']}
- test_name = mod_one_name
- build_targets = set([mod_one_name, mod_two_name, mod_three_name])
- t_info = test_info.TestInfo(test_name, 'mock_runner', build_targets)
- test_infos = set([t_info])
-
- # The _print_test_info() will print the module_info of the test_info's
- # test_name first. Then, print its related build targets. If the build
- # target be printed before(e.g. build_target == test_info's test_name),
- # it will skip it and print the next build_target.
- # Since the build_targets of test_info are mod_one, mod_two, and
- # mod_three, it will print mod_one first, then mod_two, and mod_three.
- #
- # _print_test_info() calls _print_module_info_from_module_name() to
- # print the module_info. And _print_module_info_from_module_name()
- # calls get_module_info() to get the module_info. So we can mock
- # get_module_info() to achieve that.
- mock_get_module_info.side_effect = [mod_one, mod_two, mod_three]
-
- capture_output = StringIO()
- sys.stdout = capture_output
- mod_info = module_info.ModuleInfo()
- atest._print_test_info(mod_info, test_infos)
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod1\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod1\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod1\n'
- '\x1b[1;35m\tRelated build targets\x1b[0m\n'
- '\t\tmod1, mod2, mod3\n'
- '\x1b[1;32mmod2\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod2\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod2\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod2\n'
- '\x1b[1;32mmod3\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod3\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod3\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod3\n'
- '\x1b[1;37m\x1b[0m\n')
- self.assertEqual(capture_output.getvalue(), correct_output)
-
- @mock.patch.object(metrics_utils, 'send_exit_event')
- def test_validate_exec_mode(self, _send_exit):
- """Test _validate_exec_mode."""
- args = []
- parsed_args = atest._parse_args(args)
- no_install_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["JAVA_LIBRARIES"],
- install_locations=set(['device']))
- host_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['host']))
- device_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['device']))
- both_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['host', 'device']))
-
- # $atest <Both-support>
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- # $atest <Both-support> with host_tests set to True
- parsed_args = atest._parse_args([])
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=True)
- # Make sure the host option is not set.
- self.assertFalse(parsed_args.host)
-
- # $atest <Both-support> with host_tests set to False
- parsed_args = atest._parse_args([])
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=False)
- self.assertFalse(parsed_args.host)
-
- # $atest <device-only> with host_tests set to False
- parsed_args = atest._parse_args([])
- test_infos = [device_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=False)
- # Make sure the host option is not set.
- self.assertFalse(parsed_args.host)
-
- # $atest <device-only> with host_tests set to True
- parsed_args = atest._parse_args([])
- test_infos = [device_test_info]
- self.assertRaises(SystemExit, atest._validate_exec_mode,
- parsed_args, test_infos, host_tests=True)
-
- # $atest <Both-support>
- parsed_args = atest._parse_args([])
- test_infos = [both_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- # $atest <no_install_test_info>
- parsed_args = atest._parse_args([])
- test_infos = [no_install_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- def test_make_test_run_dir(self):
- """Test make_test_run_dir."""
- tmp_dir = tempfile.mkdtemp()
- constants.ATEST_RESULT_ROOT = tmp_dir
- date_time = None
-
- work_dir = atest.make_test_run_dir()
- folder_name = os.path.basename(work_dir)
- date_time = datetime.datetime.strptime('_'.join(folder_name.split('_')[0:2]),
- atest.TEST_RUN_DIR_PREFIX)
-
- reload(constants)
- self.assertIsNotNone(date_time)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/atest_utils.py b/atest-py2/atest_utils.py
deleted file mode 100644
index f1be007..0000000
--- a/atest-py2/atest_utils.py
+++ /dev/null
@@ -1,629 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Utility functions for atest.
-"""
-
-
-from __future__ import print_function
-
-import hashlib
-import itertools
-import json
-import logging
-import os
-import pickle
-import re
-import shutil
-import subprocess
-import sys
-
-import atest_decorator
-import atest_error
-import constants
-
-from metrics import metrics_base
-from metrics import metrics_utils
-
-
-_BASH_RESET_CODE = '\033[0m\n'
-# Arbitrary number to limit stdout for failed runs in _run_limited_output.
-# Reason for its use is that the make command itself has its own carriage
-# return output mechanism that when collected line by line causes the streaming
-# full_output list to be extremely large.
-_FAILED_OUTPUT_LINE_LIMIT = 100
-# Regular expression to match the start of a ninja compile:
-# ex: [ 99% 39710/39711]
-_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
-_BUILD_FAILURE = 'FAILED: '
-CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
- os.getcwd()),
- 'tools/tradefederation/core/atest/test_data',
- 'test_commands.json')
-BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
- encode()).hexdigest()
-TEST_INFO_CACHE_ROOT = os.path.join(os.path.expanduser('~'), '.atest',
- 'info_cache', BUILD_TOP_HASH[:8])
-_DEFAULT_TERMINAL_WIDTH = 80
-_DEFAULT_TERMINAL_HEIGHT = 25
-_BUILD_CMD = 'build/soong/soong_ui.bash'
-_FIND_MODIFIED_FILES_CMDS = (
- "cd {};"
- "local_branch=$(git rev-parse --abbrev-ref HEAD);"
- "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
- # Get the number of commits from local branch to remote branch.
- "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
- "| awk '{{print $1}}');"
- # Get the list of modified files from HEAD to previous $ahead generation.
- "git diff HEAD~$ahead --name-only")
-
-
-def get_build_cmd():
- """Compose build command with relative path and flag "--make-mode".
-
- Returns:
- A list of soong build command.
- """
- make_cmd = ('%s/%s' %
- (os.path.relpath(os.environ.get(
- constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
- _BUILD_CMD))
- return [make_cmd, '--make-mode']
-
-
-def _capture_fail_section(full_log):
- """Return the error message from the build output.
-
- Args:
- full_log: List of strings representing full output of build.
-
- Returns:
- capture_output: List of strings that are build errors.
- """
- am_capturing = False
- capture_output = []
- for line in full_log:
- if am_capturing and _BUILD_COMPILE_STATUS.match(line):
- break
- if am_capturing or line.startswith(_BUILD_FAILURE):
- capture_output.append(line)
- am_capturing = True
- continue
- return capture_output
-
-
-def _run_limited_output(cmd, env_vars=None):
- """Runs a given command and streams the output on a single line in stdout.
-
- Args:
- cmd: A list of strings representing the command to run.
- env_vars: Optional arg. Dict of env vars to set during build.
-
- Raises:
- subprocess.CalledProcessError: When the command exits with a non-0
- exitcode.
- """
- # Send stderr to stdout so we only have to deal with a single pipe.
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, env=env_vars)
- sys.stdout.write('\n')
- term_width, _ = get_terminal_size()
- white_space = " " * int(term_width)
- full_output = []
- while proc.poll() is None:
- line = proc.stdout.readline()
- # Readline will often return empty strings.
- if not line:
- continue
- full_output.append(line.decode('utf-8'))
- # Trim the line to the width of the terminal.
- # Note: Does not handle terminal resizing, which is probably not worth
- # checking the width every loop.
- if len(line) >= term_width:
- line = line[:term_width - 1]
- # Clear the last line we outputted.
- sys.stdout.write('\r%s\r' % white_space)
- sys.stdout.write('%s' % line.strip())
- sys.stdout.flush()
- # Reset stdout (on bash) to remove any custom formatting and newline.
- sys.stdout.write(_BASH_RESET_CODE)
- sys.stdout.flush()
- # Wait for the Popen to finish completely before checking the returncode.
- proc.wait()
- if proc.returncode != 0:
- # Parse out the build error to output.
- output = _capture_fail_section(full_output)
- if not output:
- output = full_output
- if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
- output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
- output = 'Output (may be trimmed):\n%s' % ''.join(output)
- raise subprocess.CalledProcessError(proc.returncode, cmd, output)
-
-
-def build(build_targets, verbose=False, env_vars=None):
- """Shell out and make build_targets.
-
- Args:
- build_targets: A set of strings of build targets to make.
- verbose: Optional arg. If True output is streamed to the console.
- If False, only the last line of the build output is outputted.
- env_vars: Optional arg. Dict of env vars to set during build.
-
- Returns:
- Boolean of whether build command was successful, True if nothing to
- build.
- """
- if not build_targets:
- logging.debug('No build targets, skipping build.')
- return True
- full_env_vars = os.environ.copy()
- if env_vars:
- full_env_vars.update(env_vars)
- print('\n%s\n%s' % (colorize("Building Dependencies...", constants.CYAN),
- ', '.join(build_targets)))
- logging.debug('Building Dependencies: %s', ' '.join(build_targets))
- cmd = get_build_cmd() + list(build_targets)
- logging.debug('Executing command: %s', cmd)
- try:
- if verbose:
- subprocess.check_call(cmd, stderr=subprocess.STDOUT,
- env=full_env_vars)
- else:
- # TODO: Save output to a log file.
- _run_limited_output(cmd, env_vars=full_env_vars)
- logging.info('Build successful')
- return True
- except subprocess.CalledProcessError as err:
- logging.error('Error building: %s', build_targets)
- if err.output:
- logging.error(err.output)
- return False
-
-
-def _can_upload_to_result_server():
- """Return True if we can talk to result server."""
- # TODO: Also check if we have a slow connection to result server.
- if constants.RESULT_SERVER:
- try:
- try:
- # If PYTHON2
- from urllib2 import urlopen
- except ImportError:
- metrics_utils.handle_exc_and_send_exit_event(
- constants.IMPORT_FAILURE)
- from urllib.request import urlopen
- urlopen(constants.RESULT_SERVER,
- timeout=constants.RESULT_SERVER_TIMEOUT).close()
- return True
- # pylint: disable=broad-except
- except Exception as err:
- logging.debug('Talking to result server raised exception: %s', err)
- return False
-
-
-def get_result_server_args(for_test_mapping=False):
- """Return list of args for communication with result server.
-
- Args:
- for_test_mapping: True if the test run is for Test Mapping to include
- additional reporting args. Default is False.
- """
- # TODO (b/147644460) Temporarily disable Sponge V1 since it will be turned
- # down.
- if _can_upload_to_result_server():
- if for_test_mapping:
- return (constants.RESULT_SERVER_ARGS +
- constants.TEST_MAPPING_RESULT_SERVER_ARGS)
- return constants.RESULT_SERVER_ARGS
- return []
-
-
-def sort_and_group(iterable, key):
- """Sort and group helper function."""
- return itertools.groupby(sorted(iterable, key=key), key=key)
-
-
-def is_test_mapping(args):
- """Check if the atest command intends to run tests in test mapping.
-
- When atest runs tests in test mapping, it must have at most one test
- specified. If a test is specified, it must be started with `:`,
- which means the test value is a test group name in TEST_MAPPING file, e.g.,
- `:postsubmit`.
-
- If any test mapping options is specified, the atest command must also be
- set to run tests in test mapping files.
-
- Args:
- args: arg parsed object.
-
- Returns:
- True if the args indicates atest shall run tests in test mapping. False
- otherwise.
- """
- return (
- args.test_mapping or
- args.include_subdirs or
- not args.tests or
- (len(args.tests) == 1 and args.tests[0][0] == ':'))
-
-@atest_decorator.static_var("cached_has_colors", {})
-def _has_colors(stream):
- """Check the output stream is colorful.
-
- Args:
- stream: The standard file stream.
-
- Returns:
- True if the file stream can interpreter the ANSI color code.
- """
- cached_has_colors = _has_colors.cached_has_colors
- if stream in cached_has_colors:
- return cached_has_colors[stream]
- else:
- cached_has_colors[stream] = True
- # Following from Python cookbook, #475186
- if not hasattr(stream, "isatty"):
- cached_has_colors[stream] = False
- return False
- if not stream.isatty():
- # Auto color only on TTYs
- cached_has_colors[stream] = False
- return False
- try:
- import curses
- curses.setupterm()
- cached_has_colors[stream] = curses.tigetnum("colors") > 2
- # pylint: disable=broad-except
- except Exception as err:
- logging.debug('Checking colorful raised exception: %s', err)
- cached_has_colors[stream] = False
- return cached_has_colors[stream]
-
-
-def colorize(text, color, highlight=False):
- """ Convert to colorful string with ANSI escape code.
-
- Args:
- text: A string to print.
- color: ANSI code shift for colorful print. They are defined
- in constants_default.py.
- highlight: True to print with highlight.
-
- Returns:
- Colorful string with ANSI escape code.
- """
- clr_pref = '\033[1;'
- clr_suff = '\033[0m'
- has_colors = _has_colors(sys.stdout)
- if has_colors:
- if highlight:
- ansi_shift = 40 + color
- else:
- ansi_shift = 30 + color
- clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
- else:
- clr_str = text
- return clr_str
-
-
-def colorful_print(text, color, highlight=False, auto_wrap=True):
- """Print out the text with color.
-
- Args:
- text: A string to print.
- color: ANSI code shift for colorful print. They are defined
- in constants_default.py.
- highlight: True to print with highlight.
- auto_wrap: If True, Text wraps while print.
- """
- output = colorize(text, color, highlight)
- if auto_wrap:
- print(output)
- else:
- print(output, end="")
-
-
-# pylint: disable=no-member
-# TODO: remove the above disable when migrating to python3.
-def get_terminal_size():
- """Get terminal size and return a tuple.
-
- Returns:
- 2 integers: the size of X(columns) and Y(lines/rows).
- """
- # Determine the width of the terminal. We'll need to clear this many
- # characters when carriage returning. Set default value as 80.
- try:
- if sys.version_info[0] == 2:
- _y, _x = subprocess.check_output(['stty', 'size']).decode().split()
- return int(_x), int(_y)
- return (shutil.get_terminal_size().columns,
- shutil.get_terminal_size().lines)
- # b/137521782 stty size could have changed for reasones.
- except subprocess.CalledProcessError:
- return _DEFAULT_TERMINAL_WIDTH, _DEFAULT_TERMINAL_HEIGHT
-
-
-def is_external_run():
- # TODO(b/133905312): remove this function after aidegen calling
- # metrics_base.get_user_type directly.
- """Check is external run or not.
-
- Determine the internal user by passing at least one check:
- - whose git mail domain is from google
- - whose hostname is from google
- Otherwise is external user.
-
- Returns:
- True if this is an external run, False otherwise.
- """
- return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
-
-
-def print_data_collection_notice():
- """Print the data collection notice."""
- anonymous = ''
- user_type = 'INTERNAL'
- if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
- anonymous = ' anonymous'
- user_type = 'EXTERNAL'
- notice = (' We collect%s usage statistics in accordance with our Content '
- 'Licenses (%s), Contributor License Agreement (%s), Privacy '
- 'Policy (%s) and Terms of Service (%s).'
- ) % (anonymous,
- constants.CONTENT_LICENSES_URL,
- constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
- constants.PRIVACY_POLICY_URL,
- constants.TERMS_SERVICE_URL
- )
- print('\n==================')
- colorful_print("Notice:", constants.RED)
- colorful_print("%s" % notice, constants.GREEN)
- print('==================\n')
-
-
-def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
- result_path=CMD_RESULT_PATH):
- """Handle the runner command of input tests.
-
- Args:
- input_test: A string of input tests pass to atest.
- test_cmds: A list of strings for running input tests.
- do_verification: A boolean to indicate the action of this method.
- True: Do verification without updating result map and
- raise DryRunVerificationError if verifying fails.
- False: Update result map, if the former command is
- different with current command, it will confirm
- with user if they want to update or not.
- result_path: The file path for saving result.
- """
- full_result_content = {}
- if os.path.isfile(result_path):
- with open(result_path) as json_file:
- full_result_content = json.load(json_file)
- former_test_cmds = full_result_content.get(input_test, [])
- if not _are_identical_cmds(test_cmds, former_test_cmds):
- if do_verification:
- raise atest_error.DryRunVerificationError('Dry run verification failed,'
- ' former commands: %s' %
- former_test_cmds)
- if former_test_cmds:
- # If former_test_cmds is different from test_cmds, ask users if they
- # are willing to update the result.
- print('Former cmds = %s' % former_test_cmds)
- print('Current cmds = %s' % test_cmds)
- try:
- # TODO(b/137156054):
- # Move the import statement into a method for that distutils is
- # not a built-in lib in older python3(b/137017806). Will move it
- # back when embedded_launcher fully supports Python3.
- from distutils.util import strtobool
- if not strtobool(raw_input('Do you want to update former result'
- 'with the latest one?(Y/n)')):
- print('SKIP updating result!!!')
- return
- except ValueError:
- # Default action is updating the command result of the input_test.
- # If the user input is unrecognizable telling yes or no,
- # "Y" is implicitly applied.
- pass
- else:
- # If current commands are the same as the formers, no need to update
- # result.
- return
- full_result_content[input_test] = test_cmds
- with open(result_path, 'w') as outfile:
- json.dump(full_result_content, outfile, indent=0)
- print('Save result mapping to %s' % result_path)
-
-
-def _are_identical_cmds(current_cmds, former_cmds):
- """Tell two commands are identical. Note that '--atest-log-file-path' is not
- considered a critical argument, therefore, it will be removed during
- the comparison. Also, atest can be ran in any place, so verifying relative
- path is regardless as well.
-
- Args:
- current_cmds: A list of strings for running input tests.
- former_cmds: A list of strings recorded from the previous run.
-
- Returns:
- True if both commands are identical, False otherwise.
- """
- def _normalize(cmd_list):
- """Method that normalize commands.
-
- Args:
- cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
-
- Returns:
- A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
- """
- _cmd = ''.join(cmd_list).encode('utf-8').split()
- for cmd in _cmd:
- if cmd.startswith('--atest-log-file-path'):
- _cmd.remove(cmd)
- continue
- if _BUILD_CMD in cmd:
- _cmd.remove(cmd)
- _cmd.append(os.path.join('./', _BUILD_CMD))
- continue
- return _cmd
-
- _current_cmds = _normalize(current_cmds)
- _former_cmds = _normalize(former_cmds)
- # Always sort cmd list to make it comparable.
- _current_cmds.sort()
- _former_cmds.sort()
- return _current_cmds == _former_cmds
-
-def _get_hashed_file_name(main_file_name):
- """Convert the input string to a md5-hashed string. If file_extension is
- given, returns $(hashed_string).$(file_extension), otherwise
- $(hashed_string).cache.
-
- Args:
- main_file_name: The input string need to be hashed.
-
- Returns:
- A string as hashed file name with .cache file extension.
- """
- hashed_fn = hashlib.md5(str(main_file_name).encode())
- hashed_name = hashed_fn.hexdigest()
- return hashed_name + '.cache'
-
-def get_test_info_cache_path(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
- """Get the cache path of the desired test_infos.
-
- Args:
- test_reference: A string of the test.
- cache_root: Folder path where stores caches.
-
- Returns:
- A string of the path of test_info cache.
- """
- return os.path.join(cache_root,
- _get_hashed_file_name(test_reference))
-
-def update_test_info_cache(test_reference, test_infos,
- cache_root=TEST_INFO_CACHE_ROOT):
- """Update cache content which stores a set of test_info objects through
- pickle module, each test_reference will be saved as a cache file.
-
- Args:
- test_reference: A string referencing a test.
- test_infos: A set of TestInfos.
- cache_root: Folder path for saving caches.
- """
- if not os.path.isdir(cache_root):
- os.makedirs(cache_root)
- cache_path = get_test_info_cache_path(test_reference, cache_root)
- # Save test_info to files.
- try:
- with open(cache_path, 'wb') as test_info_cache_file:
- logging.debug('Saving cache %s.', cache_path)
- pickle.dump(test_infos, test_info_cache_file, protocol=2)
- except (pickle.PicklingError, TypeError, IOError) as err:
- # Won't break anything, just log this error, and collect the exception
- # by metrics.
- logging.debug('Exception raised: %s', err)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_CACHE_FAILURE)
-
-
-def load_test_info_cache(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
- """Load cache by test_reference to a set of test_infos object.
-
- Args:
- test_reference: A string referencing a test.
- cache_root: Folder path for finding caches.
-
- Returns:
- A list of TestInfo namedtuple if cache found, else None.
- """
- cache_file = get_test_info_cache_path(test_reference, cache_root)
- if os.path.isfile(cache_file):
- logging.debug('Loading cache %s.', cache_file)
- try:
- with open(cache_file, 'rb') as config_dictionary_file:
- return pickle.load(config_dictionary_file)
- except (pickle.UnpicklingError, ValueError, EOFError, IOError) as err:
- # Won't break anything, just remove the old cache, log this error, and
- # collect the exception by metrics.
- logging.debug('Exception raised: %s', err)
- os.remove(cache_file)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_CACHE_FAILURE)
- return None
-
-def clean_test_info_caches(tests, cache_root=TEST_INFO_CACHE_ROOT):
- """Clean caches of input tests.
-
- Args:
- tests: A list of test references.
- cache_root: Folder path for finding caches.
- """
- for test in tests:
- cache_file = get_test_info_cache_path(test, cache_root)
- if os.path.isfile(cache_file):
- logging.debug('Removing cache: %s', cache_file)
- try:
- os.remove(cache_file)
- except IOError as err:
- logging.debug('Exception raised: %s', err)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_CACHE_FAILURE)
-
-def get_modified_files(root_dir):
- """Get the git modified files. The git path here is git top level of
- the root_dir. It's inevitable to utilise different commands to fulfill
- 2 scenario:
- 1. locate unstaged/staged files
- 2. locate committed files but not yet merged.
- the 'git_status_cmd' fulfils the former while the 'find_modified_files'
- fulfils the latter.
-
- Args:
- root_dir: the root where it starts finding.
-
- Returns:
- A set of modified files altered since last commit.
- """
- modified_files = set()
- try:
- find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
- git_paths = subprocess.check_output(
- find_git_cmd, shell=True).splitlines()
- for git_path in git_paths:
- # Find modified files from git working tree status.
- git_status_cmd = ("repo forall {} -c git status --short | "
- "awk '{{print $NF}}'").format(git_path)
- modified_wo_commit = subprocess.check_output(
- git_status_cmd, shell=True).rstrip().splitlines()
- for change in modified_wo_commit:
- modified_files.add(
- os.path.normpath('{}/{}'.format(git_path, change)))
- # Find modified files that are committed but not yet merged.
- find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
- commit_modified_files = subprocess.check_output(
- find_modified_files, shell=True).splitlines()
- for line in commit_modified_files:
- modified_files.add(os.path.normpath('{}/{}'.format(
- git_path, line)))
- except (OSError, subprocess.CalledProcessError) as err:
- logging.debug('Exception raised: %s', err)
- return modified_files
diff --git a/atest-py2/atest_utils_unittest.py b/atest-py2/atest_utils_unittest.py
deleted file mode 100755
index eb89427..0000000
--- a/atest-py2/atest_utils_unittest.py
+++ /dev/null
@@ -1,409 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for atest_utils."""
-
-import hashlib
-import os
-import subprocess
-import sys
-import tempfile
-import unittest
-import mock
-
-import atest_error
-import atest_utils
-import constants
-import unittest_utils
-from test_finders import test_info
-
-if sys.version_info[0] == 2:
- from StringIO import StringIO
-else:
- from io import StringIO
-
-TEST_MODULE_NAME_A = 'ModuleNameA'
-TEST_RUNNER_A = 'FakeTestRunnerA'
-TEST_BUILD_TARGET_A = set(['bt1', 'bt2'])
-TEST_DATA_A = {'test_data_a_1': 'a1',
- 'test_data_a_2': 'a2'}
-TEST_SUITE_A = 'FakeSuiteA'
-TEST_MODULE_CLASS_A = 'FAKE_MODULE_CLASS_A'
-TEST_INSTALL_LOC_A = set(['host', 'device'])
-TEST_FINDER_A = 'MODULE'
-TEST_INFO_A = test_info.TestInfo(TEST_MODULE_NAME_A, TEST_RUNNER_A,
- TEST_BUILD_TARGET_A, TEST_DATA_A,
- TEST_SUITE_A, TEST_MODULE_CLASS_A,
- TEST_INSTALL_LOC_A)
-TEST_INFO_A.test_finder = TEST_FINDER_A
-
-#pylint: disable=protected-access
-class AtestUtilsUnittests(unittest.TestCase):
- """Unit tests for atest_utils.py"""
-
- def test_capture_fail_section_has_fail_section(self):
- """Test capture_fail_section when has fail section."""
- test_list = ['AAAAAA', 'FAILED: Error1', '^\n', 'Error2\n',
- '[ 6% 191/2997] BBBBBB\n', 'CCCCC',
- '[ 20% 322/2997] DDDDDD\n', 'EEEEE']
- want_list = ['FAILED: Error1', '^\n', 'Error2\n']
- self.assertEqual(want_list,
- atest_utils._capture_fail_section(test_list))
-
- def test_capture_fail_section_no_fail_section(self):
- """Test capture_fail_section when no fail section."""
- test_list = ['[ 6% 191/2997] XXXXX', 'YYYYY: ZZZZZ']
- want_list = []
- self.assertEqual(want_list,
- atest_utils._capture_fail_section(test_list))
-
- def test_is_test_mapping(self):
- """Test method is_test_mapping."""
- tm_option_attributes = [
- 'test_mapping',
- 'include_subdirs'
- ]
- for attr_to_test in tm_option_attributes:
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, attr == attr_to_test)
- args.tests = []
- self.assertTrue(
- atest_utils.is_test_mapping(args),
- 'Failed to validate option %s' % attr_to_test)
-
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = [':group_name']
- self.assertTrue(atest_utils.is_test_mapping(args))
-
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = [':test1', 'test2']
- self.assertFalse(atest_utils.is_test_mapping(args))
-
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = ['test2']
- self.assertFalse(atest_utils.is_test_mapping(args))
-
- @mock.patch('curses.tigetnum')
- def test_has_colors(self, mock_curses_tigetnum):
- """Test method _has_colors."""
- # stream is file I/O
- stream = open('/tmp/test_has_colors.txt', 'wb')
- self.assertFalse(atest_utils._has_colors(stream))
- stream.close()
-
- # stream is not a tty(terminal).
- stream = mock.Mock()
- stream.isatty.return_value = False
- self.assertFalse(atest_utils._has_colors(stream))
-
- # stream is a tty(terminal) and colors < 2.
- stream = mock.Mock()
- stream.isatty.return_value = True
- mock_curses_tigetnum.return_value = 1
- self.assertFalse(atest_utils._has_colors(stream))
-
- # stream is a tty(terminal) and colors > 2.
- stream = mock.Mock()
- stream.isatty.return_value = True
- mock_curses_tigetnum.return_value = 256
- self.assertTrue(atest_utils._has_colors(stream))
-
-
- @mock.patch('atest_utils._has_colors')
- def test_colorize(self, mock_has_colors):
- """Test method colorize."""
- original_str = "test string"
- green_no = 2
-
- # _has_colors() return False.
- mock_has_colors.return_value = False
- converted_str = atest_utils.colorize(original_str, green_no,
- highlight=True)
- self.assertEqual(original_str, converted_str)
-
- # Green with highlight.
- mock_has_colors.return_value = True
- converted_str = atest_utils.colorize(original_str, green_no,
- highlight=True)
- green_highlight_string = '\x1b[1;42m%s\x1b[0m' % original_str
- self.assertEqual(green_highlight_string, converted_str)
-
- # Green, no highlight.
- mock_has_colors.return_value = True
- converted_str = atest_utils.colorize(original_str, green_no,
- highlight=False)
- green_no_highlight_string = '\x1b[1;32m%s\x1b[0m' % original_str
- self.assertEqual(green_no_highlight_string, converted_str)
-
-
- @mock.patch('atest_utils._has_colors')
- def test_colorful_print(self, mock_has_colors):
- """Test method colorful_print."""
- testing_str = "color_print_test"
- green_no = 2
-
- # _has_colors() return False.
- mock_has_colors.return_value = False
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
- auto_wrap=False)
- sys.stdout = sys.__stdout__
- uncolored_string = testing_str
- self.assertEqual(capture_output.getvalue(), uncolored_string)
-
- # Green with highlight, but no wrap.
- mock_has_colors.return_value = True
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
- auto_wrap=False)
- sys.stdout = sys.__stdout__
- green_highlight_no_wrap_string = '\x1b[1;42m%s\x1b[0m' % testing_str
- self.assertEqual(capture_output.getvalue(),
- green_highlight_no_wrap_string)
-
- # Green, no highlight, no wrap.
- mock_has_colors.return_value = True
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=False,
- auto_wrap=False)
- sys.stdout = sys.__stdout__
- green_no_high_no_wrap_string = '\x1b[1;32m%s\x1b[0m' % testing_str
- self.assertEqual(capture_output.getvalue(),
- green_no_high_no_wrap_string)
-
- # Green with highlight and wrap.
- mock_has_colors.return_value = True
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
- auto_wrap=True)
- sys.stdout = sys.__stdout__
- green_highlight_wrap_string = '\x1b[1;42m%s\x1b[0m\n' % testing_str
- self.assertEqual(capture_output.getvalue(), green_highlight_wrap_string)
-
- # Green with wrap, but no highlight.
- mock_has_colors.return_value = True
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=False,
- auto_wrap=True)
- sys.stdout = sys.__stdout__
- green_wrap_no_highlight_string = '\x1b[1;32m%s\x1b[0m\n' % testing_str
- self.assertEqual(capture_output.getvalue(),
- green_wrap_no_highlight_string)
-
- @mock.patch('socket.gethostname')
- @mock.patch('subprocess.check_output')
- def test_is_external_run(self, mock_output, mock_hostname):
- """Test method is_external_run."""
- mock_output.return_value = ''
- mock_hostname.return_value = ''
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.google.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.google.def.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@google.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'c.googlers.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'a.googlers.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.side_effect = OSError()
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.side_effect = subprocess.CalledProcessError(1, 'cmd')
- self.assertTrue(atest_utils.is_external_run())
-
- @mock.patch('metrics.metrics_base.get_user_type')
- def test_print_data_collection_notice(self, mock_get_user_type):
- """Test method print_data_collection_notice."""
-
- # get_user_type return 1(external).
- mock_get_user_type.return_value = 1
- notice_str = ('\n==================\nNotice:\n'
- ' We collect anonymous usage statistics'
- ' in accordance with our'
- ' Content Licenses (https://source.android.com/setup/start/licenses),'
- ' Contributor License Agreement (https://opensource.google.com/docs/cla/),'
- ' Privacy Policy (https://policies.google.com/privacy) and'
- ' Terms of Service (https://policies.google.com/terms).'
- '\n==================\n\n')
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.print_data_collection_notice()
- sys.stdout = sys.__stdout__
- uncolored_string = notice_str
- self.assertEqual(capture_output.getvalue(), uncolored_string)
-
- # get_user_type return 0(internal).
- mock_get_user_type.return_value = 0
- notice_str = ('\n==================\nNotice:\n'
- ' We collect usage statistics'
- ' in accordance with our'
- ' Content Licenses (https://source.android.com/setup/start/licenses),'
- ' Contributor License Agreement (https://cla.developers.google.com/),'
- ' Privacy Policy (https://policies.google.com/privacy) and'
- ' Terms of Service (https://policies.google.com/terms).'
- '\n==================\n\n')
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.print_data_collection_notice()
- sys.stdout = sys.__stdout__
- uncolored_string = notice_str
- self.assertEqual(capture_output.getvalue(), uncolored_string)
-
- @mock.patch('__builtin__.raw_input')
- @mock.patch('json.load')
- def test_update_test_runner_cmd(self, mock_json_load_data, mock_raw_input):
- """Test method handle_test_runner_cmd without enable do_verification."""
- former_cmd_str = 'Former cmds ='
- write_result_str = 'Save result mapping to test_result'
- tmp_file = tempfile.NamedTemporaryFile()
- input_cmd = 'atest_args'
- runner_cmds = ['cmd1', 'cmd2']
- capture_output = StringIO()
- sys.stdout = capture_output
- # Previous data is empty. Should not enter strtobool.
- # If entered, exception will be raised cause test fail.
- mock_json_load_data.return_value = {}
- atest_utils.handle_test_runner_cmd(input_cmd,
- runner_cmds,
- do_verification=False,
- result_path=tmp_file.name)
- sys.stdout = sys.__stdout__
- self.assertEqual(capture_output.getvalue().find(former_cmd_str), -1)
- # Previous data is the same as the new input. Should not enter strtobool.
- # If entered, exception will be raised cause test fail
- capture_output = StringIO()
- sys.stdout = capture_output
- mock_json_load_data.return_value = {input_cmd:runner_cmds}
- atest_utils.handle_test_runner_cmd(input_cmd,
- runner_cmds,
- do_verification=False,
- result_path=tmp_file.name)
- sys.stdout = sys.__stdout__
- self.assertEqual(capture_output.getvalue().find(former_cmd_str), -1)
- self.assertEqual(capture_output.getvalue().find(write_result_str), -1)
- # Previous data has different cmds. Should enter strtobool not update,
- # should not find write_result_str.
- prev_cmds = ['cmd1']
- mock_raw_input.return_value = 'n'
- capture_output = StringIO()
- sys.stdout = capture_output
- mock_json_load_data.return_value = {input_cmd:prev_cmds}
- atest_utils.handle_test_runner_cmd(input_cmd,
- runner_cmds,
- do_verification=False,
- result_path=tmp_file.name)
- sys.stdout = sys.__stdout__
- self.assertEqual(capture_output.getvalue().find(write_result_str), -1)
-
- @mock.patch('json.load')
- def test_verify_test_runner_cmd(self, mock_json_load_data):
- """Test method handle_test_runner_cmd without enable update_result."""
- tmp_file = tempfile.NamedTemporaryFile()
- input_cmd = 'atest_args'
- runner_cmds = ['cmd1', 'cmd2']
- # Previous data is the same as the new input. Should not raise exception.
- mock_json_load_data.return_value = {input_cmd:runner_cmds}
- atest_utils.handle_test_runner_cmd(input_cmd,
- runner_cmds,
- do_verification=True,
- result_path=tmp_file.name)
- # Previous data has different cmds. Should enter strtobool and hit
- # exception.
- prev_cmds = ['cmd1']
- mock_json_load_data.return_value = {input_cmd:prev_cmds}
- self.assertRaises(atest_error.DryRunVerificationError,
- atest_utils.handle_test_runner_cmd,
- input_cmd,
- runner_cmds,
- do_verification=True,
- result_path=tmp_file.name)
-
- def test_get_test_info_cache_path(self):
- """Test method get_test_info_cache_path."""
- input_file_name = 'mytest_name'
- cache_root = '/a/b/c'
- expect_hashed_name = ('%s.cache' % hashlib.md5(str(input_file_name).
- encode()).hexdigest())
- self.assertEqual(os.path.join(cache_root, expect_hashed_name),
- atest_utils.get_test_info_cache_path(input_file_name,
- cache_root))
-
- def test_get_and_load_cache(self):
- """Test method update_test_info_cache and load_test_info_cache."""
- test_reference = 'myTestRefA'
- test_cache_dir = tempfile.mkdtemp()
- atest_utils.update_test_info_cache(test_reference, [TEST_INFO_A],
- test_cache_dir)
- unittest_utils.assert_equal_testinfo_sets(
- self, set([TEST_INFO_A]),
- atest_utils.load_test_info_cache(test_reference, test_cache_dir))
-
- @mock.patch('os.getcwd')
- def test_get_build_cmd(self, mock_cwd):
- """Test method get_build_cmd."""
- build_top = '/home/a/b/c'
- rel_path = 'd/e'
- mock_cwd.return_value = os.path.join(build_top, rel_path)
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- expected_cmd = ['../../build/soong/soong_ui.bash', '--make-mode']
- self.assertEqual(expected_cmd, atest_utils.get_build_cmd())
-
- @mock.patch('subprocess.check_output')
- def test_get_modified_files(self, mock_co):
- """Test method get_modified_files"""
- mock_co.side_effect = ['/a/b/',
- '\n',
- 'test_fp1.java\nc/test_fp2.java']
- self.assertEqual({'/a/b/test_fp1.java', '/a/b/c/test_fp2.java'},
- atest_utils.get_modified_files(''))
- mock_co.side_effect = ['/a/b/',
- 'test_fp4',
- '/test_fp3.java']
- self.assertEqual({'/a/b/test_fp4', '/a/b/test_fp3.java'},
- atest_utils.get_modified_files(''))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/atest-py2/bug_detector.py b/atest-py2/bug_detector.py
deleted file mode 100644
index 25438d2..0000000
--- a/atest-py2/bug_detector.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Classes for bug events history
-"""
-
-import datetime
-import logging
-import json
-import os
-
-import constants
-
-from metrics import metrics_utils
-
-_META_FILE = os.path.join(os.path.expanduser('~'),
- '.config', 'asuite', 'atest_history.json')
-_DETECT_OPTION_FILTER = ['-v', '--verbose']
-_DETECTED_SUCCESS = 1
-_DETECTED_FAIL = 0
-# constants of history key
-_LATEST_EXIT_CODE = 'latest_exit_code'
-_UPDATED_AT = 'updated_at'
-
-class BugDetector(object):
- """Class for handling if a bug is detected by comparing test history."""
-
- def __init__(self, argv, exit_code, history_file=None):
- """BugDetector constructor
-
- Args:
- argv: A list of arguments.
- exit_code: An integer of exit code.
- history_file: A string of a given history file path.
- """
- self.detect_key = self.get_detect_key(argv)
- self.exit_code = exit_code
- self.file = history_file if history_file else _META_FILE
- self.history = self.get_history()
- self.caught_result = self.detect_bug_caught()
- self.update_history()
-
- def get_detect_key(self, argv):
- """Get the key for history searching.
-
- 1. remove '-v' in argv to argv_no_verbose
- 2. sort the argv_no_verbose
-
- Args:
- argv: A list of arguments.
-
- Returns:
- A string of ordered command line.
- """
- argv_without_option = [x for x in argv if x not in _DETECT_OPTION_FILTER]
- argv_without_option.sort()
- return ' '.join(argv_without_option)
-
- def get_history(self):
- """Get a history object from a history file.
-
- e.g.
- {
- "SystemUITests:.ScrimControllerTest":{
- "latest_exit_code": 5, "updated_at": "2019-01-26T15:33:08.305026"},
- "--host hello_world_test ":{
- "latest_exit_code": 0, "updated_at": "2019-02-26T15:33:08.305026"},
- }
-
- Returns:
- An object of loading from a history.
- """
- history = {}
- if os.path.exists(self.file):
- with open(self.file) as json_file:
- try:
- history = json.load(json_file)
- except ValueError as e:
- logging.debug(e)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_HISTORY_FAILURE)
- return history
-
- def detect_bug_caught(self):
- """Detection of catching bugs.
-
- When latest_exit_code and current exit_code are different, treat it
- as a bug caught.
-
- Returns:
- A integer of detecting result, e.g.
- 1: success
- 0: fail
- """
- if not self.history:
- return _DETECTED_FAIL
- latest = self.history.get(self.detect_key, {})
- if latest.get(_LATEST_EXIT_CODE, self.exit_code) == self.exit_code:
- return _DETECTED_FAIL
- return _DETECTED_SUCCESS
-
- def update_history(self):
- """Update the history file.
-
- 1. update latest_bug result to history cache.
- 2. trim history cache to size from oldest updated time.
- 3. write to the file.
- """
- latest_bug = {
- self.detect_key: {
- _LATEST_EXIT_CODE: self.exit_code,
- _UPDATED_AT: datetime.datetime.now().isoformat()
- }
- }
- self.history.update(latest_bug)
- num_history = len(self.history)
- if num_history > constants.UPPER_LIMIT:
- sorted_history = sorted(self.history.items(),
- key=lambda kv: kv[1][_UPDATED_AT])
- self.history = dict(
- sorted_history[(num_history - constants.TRIM_TO_SIZE):])
- with open(self.file, 'w') as outfile:
- try:
- json.dump(self.history, outfile, indent=0)
- except ValueError as e:
- logging.debug(e)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_HISTORY_FAILURE)
diff --git a/atest-py2/bug_detector_unittest.py b/atest-py2/bug_detector_unittest.py
deleted file mode 100644
index a9356fc..0000000
--- a/atest-py2/bug_detector_unittest.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for bug_detector."""
-
-import datetime
-import json
-import os
-import unittest
-import mock
-
-import bug_detector
-import constants
-import unittest_constants as uc
-
-TEST_DICT = {
- 'test1': {
- 'latest_exit_code': 5,
- 'updated_at': ''
- },
- 'test2': {
- 'latest_exit_code': 0,
- 'updated_at': ''
- }
-}
-
-class BugDetectorUnittest(unittest.TestCase):
- """Unit test for bug_detector.py"""
-
- def setUp(self):
- """Set up stuff for testing."""
- self.history_file = os.path.join(uc.TEST_DATA_DIR, 'bug_detector.json')
- self.detector = bug_detector.BugDetector(['test1'], 5, self.history_file)
- self._reset_history_file()
- self.history_file2 = os.path.join(uc.TEST_DATA_DIR, 'bug_detector2.json')
-
- def tearDown(self):
- """Run after execution of every test"""
- if os.path.isfile(self.history_file):
- os.remove(self.history_file)
- if os.path.isfile(self.history_file2):
- os.remove(self.history_file2)
-
- def _reset_history_file(self):
- """Reset test history file."""
- with open(self.history_file, 'w') as outfile:
- json.dump(TEST_DICT, outfile)
-
- def _make_test_file(self, file_size):
- temp_history = {}
- for i in range(file_size):
- latest_bug = {
- i: {
- 'latest_exit_code': i,
- 'updated_at': datetime.datetime.now().isoformat()
- }
- }
- temp_history.update(latest_bug)
- with open(self.history_file2, 'w') as outfile:
- json.dump(temp_history, outfile, indent=0)
-
- @mock.patch.object(bug_detector.BugDetector, 'update_history')
- def test_get_detect_key(self, _):
- """Test get_detect_key."""
- # argv without -v
- argv = ['test2', 'test1']
- want_key = 'test1 test2'
- dtr = bug_detector.BugDetector(argv, 0)
- self.assertEqual(dtr.get_detect_key(argv), want_key)
-
- # argv with -v
- argv = ['-v', 'test2', 'test1']
- want_key = 'test1 test2'
- dtr = bug_detector.BugDetector(argv, 0)
- self.assertEqual(dtr.get_detect_key(argv), want_key)
-
- # argv with --verbose
- argv = ['--verbose', 'test2', 'test3', 'test1']
- want_key = 'test1 test2 test3'
- dtr = bug_detector.BugDetector(argv, 0)
- self.assertEqual(dtr.get_detect_key(argv), want_key)
-
- def test_get_history(self):
- """Test get_history."""
- self.assertEqual(self.detector.get_history(), TEST_DICT)
-
- @mock.patch.object(bug_detector.BugDetector, 'update_history')
- def test_detect_bug_caught(self, _):
- """Test detect_bug_caught."""
- self._reset_history_file()
- dtr = bug_detector.BugDetector(['test1'], 0, self.history_file)
- success = 1
- self.assertEqual(dtr.detect_bug_caught(), success)
-
- def test_update_history(self):
- """Test update_history."""
- constants.UPPER_LIMIT = 10
- constants.TRIM_TO_SIZE = 3
-
- mock_file_size = 0
- self._make_test_file(mock_file_size)
- dtr = bug_detector.BugDetector(['test1'], 0, self.history_file2)
- self.assertTrue(dtr.history.has_key('test1'))
-
- # History is larger than constants.UPPER_LIMIT. Trim to size.
- mock_file_size = 10
- self._make_test_file(mock_file_size)
- dtr = bug_detector.BugDetector(['test1'], 0, self.history_file2)
- self.assertEqual(len(dtr.history), constants.TRIM_TO_SIZE)
- keys = ['test1', '9', '8']
- for key in keys:
- self.assertTrue(dtr.history.has_key(key))
-
- # History is not larger than constants.UPPER_LIMIT.
- mock_file_size = 5
- self._make_test_file(mock_file_size)
- dtr = bug_detector.BugDetector(['test1'], 0, self.history_file2)
- self.assertEqual(len(dtr.history), mock_file_size+1)
- keys = ['test1', '4', '3', '2', '1', '0']
- for key in keys:
- self.assertTrue(dtr.history.has_key(key))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/cli_translator.py b/atest-py2/cli_translator.py
deleted file mode 100644
index f11b34b..0000000
--- a/atest-py2/cli_translator.py
+++ /dev/null
@@ -1,516 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#pylint: disable=too-many-lines
-"""
-Command Line Translator for atest.
-"""
-
-from __future__ import print_function
-
-import fnmatch
-import json
-import logging
-import os
-import re
-import sys
-import time
-
-import atest_error
-import atest_utils
-import constants
-import test_finder_handler
-import test_mapping
-
-from metrics import metrics
-from metrics import metrics_utils
-from test_finders import module_finder
-
-FUZZY_FINDER = 'FUZZY'
-CACHE_FINDER = 'CACHE'
-
-# Pattern used to identify comments start with '//' or '#' in TEST_MAPPING.
-_COMMENTS_RE = re.compile(r'(?m)[\s\t]*(#|//).*|(\".*?\")')
-_COMMENTS = frozenset(['//', '#'])
-
-#pylint: disable=no-self-use
-class CLITranslator(object):
- """
- CLITranslator class contains public method translate() and some private
- helper methods. The atest tool can call the translate() method with a list
- of strings, each string referencing a test to run. Translate() will
- "translate" this list of test strings into a list of build targets and a
- list of TradeFederation run commands.
-
- Translation steps for a test string reference:
- 1. Narrow down the type of reference the test string could be, i.e.
- whether it could be referencing a Module, Class, Package, etc.
- 2. Try to find the test files assuming the test string is one of these
- types of reference.
- 3. If test files found, generate Build Targets and the Run Command.
- """
-
- def __init__(self, module_info=None, print_cache_msg=True):
- """CLITranslator constructor
-
- Args:
- module_info: ModuleInfo class that has cached module-info.json.
- print_cache_msg: Boolean whether printing clear cache message or not.
- True will print message while False won't print.
- """
- self.mod_info = module_info
- self.enable_file_patterns = False
- self.msg = ''
- if print_cache_msg:
- self.msg = ('(Test info has been cached for speeding up the next '
- 'run, if test info need to be updated, please add -c '
- 'to clean the old cache.)')
-
- # pylint: disable=too-many-locals
- def _find_test_infos(self, test, tm_test_detail):
- """Return set of TestInfos based on a given test.
-
- Args:
- test: A string representing test references.
- tm_test_detail: The TestDetail of test configured in TEST_MAPPING
- files.
-
- Returns:
- Set of TestInfos based on the given test.
- """
- test_infos = set()
- test_find_starts = time.time()
- test_found = False
- test_finders = []
- test_info_str = ''
- find_test_err_msg = None
- for finder in test_finder_handler.get_find_methods_for_test(
- self.mod_info, test):
- # For tests in TEST_MAPPING, find method is only related to
- # test name, so the details can be set after test_info object
- # is created.
- try:
- found_test_infos = finder.find_method(
- finder.test_finder_instance, test)
- except atest_error.TestDiscoveryException as e:
- find_test_err_msg = e
- if found_test_infos:
- finder_info = finder.finder_info
- for test_info in found_test_infos:
- if tm_test_detail:
- test_info.data[constants.TI_MODULE_ARG] = (
- tm_test_detail.options)
- test_info.from_test_mapping = True
- test_info.host = tm_test_detail.host
- if finder_info != CACHE_FINDER:
- test_info.test_finder = finder_info
- test_infos.add(test_info)
- test_found = True
- print("Found '%s' as %s" % (
- atest_utils.colorize(test, constants.GREEN),
- finder_info))
- if finder_info == CACHE_FINDER and test_infos:
- test_finders.append(list(test_infos)[0].test_finder)
- test_finders.append(finder_info)
- test_info_str = ','.join([str(x) for x in found_test_infos])
- break
- if not test_found:
- f_results = self._fuzzy_search_and_msg(test, find_test_err_msg)
- if f_results:
- test_infos.update(f_results)
- test_found = True
- test_finders.append(FUZZY_FINDER)
- metrics.FindTestFinishEvent(
- duration=metrics_utils.convert_duration(
- time.time() - test_find_starts),
- success=test_found,
- test_reference=test,
- test_finders=test_finders,
- test_info=test_info_str)
- # Cache test_infos by default except running with TEST_MAPPING which may
- # include customized flags and they are likely to mess up other
- # non-test_mapping tests.
- if test_infos and not tm_test_detail:
- atest_utils.update_test_info_cache(test, test_infos)
- print(self.msg)
- return test_infos
-
- def _fuzzy_search_and_msg(self, test, find_test_err_msg):
- """ Fuzzy search and print message.
-
- Args:
- test: A string representing test references
- find_test_err_msg: A string of find test error message.
-
- Returns:
- A list of TestInfos if found, otherwise None.
- """
- print('No test found for: %s' %
- atest_utils.colorize(test, constants.RED))
- # Currently we focus on guessing module names. Append names on
- # results if more finders support fuzzy searching.
- mod_finder = module_finder.ModuleFinder(self.mod_info)
- results = mod_finder.get_fuzzy_searching_results(test)
- if len(results) == 1 and self._confirm_running(results):
- found_test_infos = mod_finder.find_test_by_module_name(results[0])
- # found_test_infos is a list with at most 1 element.
- if found_test_infos:
- return found_test_infos
- elif len(results) > 1:
- self._print_fuzzy_searching_results(results)
- else:
- print('No matching result for {0}.'.format(test))
- if find_test_err_msg:
- print('%s\n' % (atest_utils.colorize(
- find_test_err_msg, constants.MAGENTA)))
- else:
- print('(This can happen after a repo sync or if the test'
- ' is new. Running: with "%s" may resolve the issue.)'
- '\n' % (atest_utils.colorize(
- constants.REBUILD_MODULE_INFO_FLAG,
- constants.RED)))
- return None
-
- def _get_test_infos(self, tests, test_mapping_test_details=None):
- """Return set of TestInfos based on passed in tests.
-
- Args:
- tests: List of strings representing test references.
- test_mapping_test_details: List of TestDetail for tests configured
- in TEST_MAPPING files.
-
- Returns:
- Set of TestInfos based on the passed in tests.
- """
- test_infos = set()
- if not test_mapping_test_details:
- test_mapping_test_details = [None] * len(tests)
- for test, tm_test_detail in zip(tests, test_mapping_test_details):
- found_test_infos = self._find_test_infos(test, tm_test_detail)
- test_infos.update(found_test_infos)
- return test_infos
-
- def _confirm_running(self, results):
- """Listen to an answer from raw input.
-
- Args:
- results: A list of results.
-
- Returns:
- True is the answer is affirmative.
- """
- decision = raw_input('Did you mean {0}? [Y/n] '.format(
- atest_utils.colorize(results[0], constants.GREEN)))
- return decision in constants.AFFIRMATIVES
-
- def _print_fuzzy_searching_results(self, results):
- """Print modules when fuzzy searching gives multiple results.
-
- If the result is lengthy, just print the first 10 items only since we
- have already given enough-accurate result.
-
- Args:
- results: A list of guessed testable module names.
-
- """
- atest_utils.colorful_print('Did you mean the following modules?',
- constants.WHITE)
- for mod in results[:10]:
- atest_utils.colorful_print(mod, constants.GREEN)
-
- def filter_comments(self, test_mapping_file):
- """Remove comments in TEST_MAPPING file to valid format. Only '//' and
- '#' are regarded as comments.
-
- Args:
- test_mapping_file: Path to a TEST_MAPPING file.
-
- Returns:
- Valid json string without comments.
- """
- def _replace(match):
- """Replace comments if found matching the defined regular expression.
-
- Args:
- match: The matched regex pattern
-
- Returns:
- "" if it matches _COMMENTS, otherwise original string.
- """
- line = match.group(0).strip()
- return "" if any(map(line.startswith, _COMMENTS)) else line
- with open(test_mapping_file) as json_file:
- return re.sub(_COMMENTS_RE, _replace, json_file.read())
-
- def _read_tests_in_test_mapping(self, test_mapping_file):
- """Read tests from a TEST_MAPPING file.
-
- Args:
- test_mapping_file: Path to a TEST_MAPPING file.
-
- Returns:
- A tuple of (all_tests, imports), where
- all_tests is a dictionary of all tests in the TEST_MAPPING file,
- grouped by test group.
- imports is a list of test_mapping.Import to include other test
- mapping files.
- """
- all_tests = {}
- imports = []
- test_mapping_dict = json.loads(self.filter_comments(test_mapping_file))
- for test_group_name, test_list in test_mapping_dict.items():
- if test_group_name == constants.TEST_MAPPING_IMPORTS:
- for import_detail in test_list:
- imports.append(
- test_mapping.Import(test_mapping_file, import_detail))
- else:
- grouped_tests = all_tests.setdefault(test_group_name, set())
- tests = []
- for test in test_list:
- if (self.enable_file_patterns and
- not test_mapping.is_match_file_patterns(
- test_mapping_file, test)):
- continue
- test_mod_info = self.mod_info.name_to_module_info.get(
- test['name'])
- if not test_mod_info:
- print('WARNING: %s is not a valid build target and '
- 'may not be discoverable by TreeHugger. If you '
- 'want to specify a class or test-package, '
- 'please set \'name\' to the test module and use '
- '\'options\' to specify the right tests via '
- '\'include-filter\'.\nNote: this can also occur '
- 'if the test module is not built for your '
- 'current lunch target.\n' %
- atest_utils.colorize(test['name'], constants.RED))
- elif not any(x in test_mod_info['compatibility_suites'] for
- x in constants.TEST_MAPPING_SUITES):
- print('WARNING: Please add %s to either suite: %s for '
- 'this TEST_MAPPING file to work with TreeHugger.' %
- (atest_utils.colorize(test['name'],
- constants.RED),
- atest_utils.colorize(constants.TEST_MAPPING_SUITES,
- constants.GREEN)))
- tests.append(test_mapping.TestDetail(test))
- grouped_tests.update(tests)
- return all_tests, imports
-
- def _find_files(self, path, file_name=constants.TEST_MAPPING):
- """Find all files with given name under the given path.
-
- Args:
- path: A string of path in source.
-
- Returns:
- A list of paths of the files with the matching name under the given
- path.
- """
- test_mapping_files = []
- for root, _, filenames in os.walk(path):
- for filename in fnmatch.filter(filenames, file_name):
- test_mapping_files.append(os.path.join(root, filename))
- return test_mapping_files
-
- def _get_tests_from_test_mapping_files(
- self, test_group, test_mapping_files):
- """Get tests in the given test mapping files with the match group.
-
- Args:
- test_group: Group of tests to run. Default is set to `presubmit`.
- test_mapping_files: A list of path of TEST_MAPPING files.
-
- Returns:
- A tuple of (tests, all_tests, imports), where,
- tests is a set of tests (test_mapping.TestDetail) defined in
- TEST_MAPPING file of the given path, and its parent directories,
- with matching test_group.
- all_tests is a dictionary of all tests in TEST_MAPPING files,
- grouped by test group.
- imports is a list of test_mapping.Import objects that contains the
- details of where to import a TEST_MAPPING file.
- """
- all_imports = []
- # Read and merge the tests in all TEST_MAPPING files.
- merged_all_tests = {}
- for test_mapping_file in test_mapping_files:
- all_tests, imports = self._read_tests_in_test_mapping(
- test_mapping_file)
- all_imports.extend(imports)
- for test_group_name, test_list in all_tests.items():
- grouped_tests = merged_all_tests.setdefault(
- test_group_name, set())
- grouped_tests.update(test_list)
-
- tests = set(merged_all_tests.get(test_group, []))
- if test_group == constants.TEST_GROUP_ALL:
- for grouped_tests in merged_all_tests.values():
- tests.update(grouped_tests)
- return tests, merged_all_tests, all_imports
-
- # pylint: disable=too-many-arguments
- # pylint: disable=too-many-locals
- def _find_tests_by_test_mapping(
- self, path='', test_group=constants.TEST_GROUP_PRESUBMIT,
- file_name=constants.TEST_MAPPING, include_subdirs=False,
- checked_files=None):
- """Find tests defined in TEST_MAPPING in the given path.
-
- Args:
- path: A string of path in source. Default is set to '', i.e., CWD.
- test_group: Group of tests to run. Default is set to `presubmit`.
- file_name: Name of TEST_MAPPING file. Default is set to
- `TEST_MAPPING`. The argument is added for testing purpose.
- include_subdirs: True to include tests in TEST_MAPPING files in sub
- directories.
- checked_files: Paths of TEST_MAPPING files that have been checked.
-
- Returns:
- A tuple of (tests, all_tests), where,
- tests is a set of tests (test_mapping.TestDetail) defined in
- TEST_MAPPING file of the given path, and its parent directories,
- with matching test_group.
- all_tests is a dictionary of all tests in TEST_MAPPING files,
- grouped by test group.
- """
- path = os.path.realpath(path)
- test_mapping_files = set()
- all_tests = {}
- test_mapping_file = os.path.join(path, file_name)
- if os.path.exists(test_mapping_file):
- test_mapping_files.add(test_mapping_file)
- # Include all TEST_MAPPING files in sub-directories if `include_subdirs`
- # is set to True.
- if include_subdirs:
- test_mapping_files.update(self._find_files(path, file_name))
- # Include all possible TEST_MAPPING files in parent directories.
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, os.sep)
- while path != root_dir and path != os.sep:
- path = os.path.dirname(path)
- test_mapping_file = os.path.join(path, file_name)
- if os.path.exists(test_mapping_file):
- test_mapping_files.add(test_mapping_file)
-
- if checked_files is None:
- checked_files = set()
- test_mapping_files.difference_update(checked_files)
- checked_files.update(test_mapping_files)
- if not test_mapping_files:
- return test_mapping_files, all_tests
-
- tests, all_tests, imports = self._get_tests_from_test_mapping_files(
- test_group, test_mapping_files)
-
- # Load TEST_MAPPING files from imports recursively.
- if imports:
- for import_detail in imports:
- path = import_detail.get_path()
- # (b/110166535 #19) Import path might not exist if a project is
- # located in different directory in different branches.
- if path is None:
- logging.warn(
- 'Failed to import TEST_MAPPING at %s', import_detail)
- continue
- # Search for tests based on the imported search path.
- import_tests, import_all_tests = (
- self._find_tests_by_test_mapping(
- path, test_group, file_name, include_subdirs,
- checked_files))
- # Merge the collections
- tests.update(import_tests)
- for group, grouped_tests in import_all_tests.items():
- all_tests.setdefault(group, set()).update(grouped_tests)
-
- return tests, all_tests
-
- def _gather_build_targets(self, test_infos):
- targets = set()
- for test_info in test_infos:
- targets |= test_info.build_targets
- return targets
-
- def _get_test_mapping_tests(self, args):
- """Find the tests in TEST_MAPPING files.
-
- Args:
- args: arg parsed object.
-
- Returns:
- A tuple of (test_names, test_details_list), where
- test_names: a list of test name
- test_details_list: a list of test_mapping.TestDetail objects for
- the tests in TEST_MAPPING files with matching test group.
- """
- # Pull out tests from test mapping
- src_path = ''
- test_group = constants.TEST_GROUP_PRESUBMIT
- if args.tests:
- if ':' in args.tests[0]:
- src_path, test_group = args.tests[0].split(':')
- else:
- src_path = args.tests[0]
-
- test_details, all_test_details = self._find_tests_by_test_mapping(
- path=src_path, test_group=test_group,
- include_subdirs=args.include_subdirs, checked_files=set())
- test_details_list = list(test_details)
- if not test_details_list:
- logging.warn(
- 'No tests of group `%s` found in TEST_MAPPING at %s or its '
- 'parent directories.\nYou might be missing atest arguments,'
- ' try `atest --help` for more information',
- test_group, os.path.realpath(''))
- if all_test_details:
- tests = ''
- for test_group, test_list in all_test_details.items():
- tests += '%s:\n' % test_group
- for test_detail in sorted(test_list):
- tests += '\t%s\n' % test_detail
- logging.warn(
- 'All available tests in TEST_MAPPING files are:\n%s',
- tests)
- metrics_utils.send_exit_event(constants.EXIT_CODE_TEST_NOT_FOUND)
- sys.exit(constants.EXIT_CODE_TEST_NOT_FOUND)
-
- logging.debug(
- 'Test details:\n%s',
- '\n'.join([str(detail) for detail in test_details_list]))
- test_names = [detail.name for detail in test_details_list]
- return test_names, test_details_list
-
-
- def translate(self, args):
- """Translate atest command line into build targets and run commands.
-
- Args:
- args: arg parsed object.
-
- Returns:
- A tuple with set of build_target strings and list of TestInfos.
- """
- tests = args.tests
- # Test details from TEST_MAPPING files
- test_details_list = None
- if atest_utils.is_test_mapping(args):
- if args.enable_file_patterns:
- self.enable_file_patterns = True
- tests, test_details_list = self._get_test_mapping_tests(args)
- atest_utils.colorful_print("\nFinding Tests...", constants.CYAN)
- logging.debug('Finding Tests: %s', tests)
- start = time.time()
- test_infos = self._get_test_infos(tests, test_details_list)
- logging.debug('Found tests in %ss', time.time() - start)
- for test_info in test_infos:
- logging.debug('%s\n', test_info)
- build_targets = self._gather_build_targets(test_infos)
- return build_targets, test_infos
diff --git a/atest-py2/cli_translator_unittest.py b/atest-py2/cli_translator_unittest.py
deleted file mode 100755
index 0b39be2..0000000
--- a/atest-py2/cli_translator_unittest.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for cli_translator."""
-
-import unittest
-import json
-import os
-import re
-import sys
-import mock
-
-import cli_translator as cli_t
-import constants
-import test_finder_handler
-import test_mapping
-import unittest_constants as uc
-import unittest_utils
-from metrics import metrics
-from test_finders import module_finder
-from test_finders import test_finder_base
-
-# Import StringIO in Python2/3 compatible way.
-if sys.version_info[0] == 2:
- from StringIO import StringIO
-else:
- from io import StringIO
-
-# TEST_MAPPING related consts
-TEST_MAPPING_TOP_DIR = os.path.join(uc.TEST_DATA_DIR, 'test_mapping')
-TEST_MAPPING_DIR = os.path.join(TEST_MAPPING_TOP_DIR, 'folder1')
-TEST_1 = test_mapping.TestDetail({'name': 'test1', 'host': True})
-TEST_2 = test_mapping.TestDetail({'name': 'test2'})
-TEST_3 = test_mapping.TestDetail({'name': 'test3'})
-TEST_4 = test_mapping.TestDetail({'name': 'test4'})
-TEST_5 = test_mapping.TestDetail({'name': 'test5'})
-TEST_6 = test_mapping.TestDetail({'name': 'test6'})
-TEST_7 = test_mapping.TestDetail({'name': 'test7'})
-TEST_8 = test_mapping.TestDetail({'name': 'test8'})
-TEST_9 = test_mapping.TestDetail({'name': 'test9'})
-TEST_10 = test_mapping.TestDetail({'name': 'test10'})
-
-SEARCH_DIR_RE = re.compile(r'^find ([^ ]*).*$')
-
-
-#pylint: disable=unused-argument
-def gettestinfos_side_effect(test_names, test_mapping_test_details=None):
- """Mock return values for _get_test_info."""
- test_infos = set()
- for test_name in test_names:
- if test_name == uc.MODULE_NAME:
- test_infos.add(uc.MODULE_INFO)
- if test_name == uc.CLASS_NAME:
- test_infos.add(uc.CLASS_INFO)
- return test_infos
-
-
-#pylint: disable=protected-access
-#pylint: disable=no-self-use
-class CLITranslatorUnittests(unittest.TestCase):
- """Unit tests for cli_t.py"""
-
- def setUp(self):
- """Run before execution of every test"""
- self.ctr = cli_t.CLITranslator()
-
- # Create a mock of args.
- self.args = mock.Mock
- self.args.tests = []
- # Test mapping related args
- self.args.test_mapping = False
- self.args.include_subdirs = False
- self.args.enable_file_patterns = False
- # Cache finder related args
- self.args.clear_cache = False
- self.ctr.mod_info = mock.Mock
- self.ctr.mod_info.name_to_module_info = {}
-
- def tearDown(self):
- """Run after execution of every test"""
- reload(uc)
-
- @mock.patch('__builtin__.raw_input', return_value='n')
- @mock.patch.object(module_finder.ModuleFinder, 'find_test_by_module_name')
- @mock.patch.object(module_finder.ModuleFinder, 'get_fuzzy_searching_results')
- @mock.patch.object(metrics, 'FindTestFinishEvent')
- @mock.patch.object(test_finder_handler, 'get_find_methods_for_test')
- # pylint: disable=too-many-locals
- def test_get_test_infos(self, mock_getfindmethods, _metrics, mock_getfuzzyresults,
- mock_findtestbymodule, mock_raw_input):
- """Test _get_test_infos method."""
- ctr = cli_t.CLITranslator()
- find_method_return_module_info = lambda x, y: uc.MODULE_INFOS
- # pylint: disable=invalid-name
- find_method_return_module_class_info = (lambda x, test: uc.MODULE_INFOS
- if test == uc.MODULE_NAME
- else uc.CLASS_INFOS)
- find_method_return_nothing = lambda x, y: None
- one_test = [uc.MODULE_NAME]
- mult_test = [uc.MODULE_NAME, uc.CLASS_NAME]
-
- # Let's make sure we return what we expect.
- expected_test_infos = {uc.MODULE_INFO}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_module_info, None)]
- unittest_utils.assert_strict_equal(
- self, ctr._get_test_infos(one_test), expected_test_infos)
-
- # Check we receive multiple test infos.
- expected_test_infos = {uc.MODULE_INFO, uc.CLASS_INFO}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_module_class_info,
- None)]
- unittest_utils.assert_strict_equal(
- self, ctr._get_test_infos(mult_test), expected_test_infos)
-
- # Check return null set when we have no tests found or multiple results.
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_nothing, None)]
- null_test_info = set()
- mock_getfuzzyresults.return_value = []
- self.assertEqual(null_test_info, ctr._get_test_infos(one_test))
- self.assertEqual(null_test_info, ctr._get_test_infos(mult_test))
-
- # Check returning test_info when the user says Yes.
- mock_raw_input.return_value = "Y"
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_module_info, None)]
- mock_getfuzzyresults.return_value = one_test
- mock_findtestbymodule.return_value = uc.MODULE_INFO
- unittest_utils.assert_strict_equal(
- self, ctr._get_test_infos([uc.TYPO_MODULE_NAME]), {uc.MODULE_INFO})
-
- # Check the method works for test mapping.
- test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
- test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
- expected_test_infos = {uc.MODULE_INFO, uc.CLASS_INFO}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_module_class_info,
- None)]
- test_infos = ctr._get_test_infos(
- mult_test, [test_detail1, test_detail2])
- unittest_utils.assert_strict_equal(
- self, test_infos, expected_test_infos)
- for test_info in test_infos:
- if test_info == uc.MODULE_INFO:
- self.assertEqual(
- test_detail1.options,
- test_info.data[constants.TI_MODULE_ARG])
- else:
- self.assertEqual(
- test_detail2.options,
- test_info.data[constants.TI_MODULE_ARG])
-
- @mock.patch.object(metrics, 'FindTestFinishEvent')
- @mock.patch.object(test_finder_handler, 'get_find_methods_for_test')
- def test_get_test_infos_2(self, mock_getfindmethods, _metrics):
- """Test _get_test_infos method."""
- ctr = cli_t.CLITranslator()
- find_method_return_module_info2 = lambda x, y: uc.MODULE_INFOS2
- find_method_ret_mod_cls_info2 = (
- lambda x, test: uc.MODULE_INFOS2
- if test == uc.MODULE_NAME else uc.CLASS_INFOS2)
- one_test = [uc.MODULE_NAME]
- mult_test = [uc.MODULE_NAME, uc.CLASS_NAME]
- # Let's make sure we return what we expect.
- expected_test_infos = {uc.MODULE_INFO, uc.MODULE_INFO2}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_return_module_info2,
- None)]
- unittest_utils.assert_strict_equal(
- self, ctr._get_test_infos(one_test), expected_test_infos)
- # Check we receive multiple test infos.
- expected_test_infos = {uc.MODULE_INFO, uc.CLASS_INFO, uc.MODULE_INFO2,
- uc.CLASS_INFO2}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_ret_mod_cls_info2,
- None)]
- unittest_utils.assert_strict_equal(
- self, ctr._get_test_infos(mult_test), expected_test_infos)
- # Check the method works for test mapping.
- test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
- test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
- expected_test_infos = {uc.MODULE_INFO, uc.CLASS_INFO, uc.MODULE_INFO2,
- uc.CLASS_INFO2}
- mock_getfindmethods.return_value = [
- test_finder_base.Finder(None, find_method_ret_mod_cls_info2,
- None)]
- test_infos = ctr._get_test_infos(
- mult_test, [test_detail1, test_detail2])
- unittest_utils.assert_strict_equal(
- self, test_infos, expected_test_infos)
- for test_info in test_infos:
- if test_info in [uc.MODULE_INFO, uc.MODULE_INFO2]:
- self.assertEqual(
- test_detail1.options,
- test_info.data[constants.TI_MODULE_ARG])
- elif test_info in [uc.CLASS_INFO, uc.CLASS_INFO2]:
- self.assertEqual(
- test_detail2.options,
- test_info.data[constants.TI_MODULE_ARG])
-
- @mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
- side_effect=gettestinfos_side_effect)
- def test_translate_class(self, _info):
- """Test translate method for tests by class name."""
- # Check that we can find a class.
- self.args.tests = [uc.CLASS_NAME]
- targets, test_infos = self.ctr.translate(self.args)
- unittest_utils.assert_strict_equal(
- self, targets, uc.CLASS_BUILD_TARGETS)
- unittest_utils.assert_strict_equal(self, test_infos, {uc.CLASS_INFO})
-
- @mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
- side_effect=gettestinfos_side_effect)
- def test_translate_module(self, _info):
- """Test translate method for tests by module or class name."""
- # Check that we get all the build targets we expect.
- self.args.tests = [uc.MODULE_NAME, uc.CLASS_NAME]
- targets, test_infos = self.ctr.translate(self.args)
- unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
- unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
- uc.CLASS_INFO})
-
- @mock.patch.object(cli_t.CLITranslator, '_find_tests_by_test_mapping')
- @mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
- side_effect=gettestinfos_side_effect)
- def test_translate_test_mapping(self, _info, mock_testmapping):
- """Test translate method for tests in test mapping."""
- # Check that test mappings feeds into get_test_info properly.
- test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
- test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
- mock_testmapping.return_value = ([test_detail1, test_detail2], None)
- self.args.tests = []
- targets, test_infos = self.ctr.translate(self.args)
- unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
- unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
- uc.CLASS_INFO})
-
- @mock.patch.object(cli_t.CLITranslator, '_find_tests_by_test_mapping')
- @mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
- side_effect=gettestinfos_side_effect)
- def test_translate_test_mapping_all(self, _info, mock_testmapping):
- """Test translate method for tests in test mapping."""
- # Check that test mappings feeds into get_test_info properly.
- test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
- test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
- mock_testmapping.return_value = ([test_detail1, test_detail2], None)
- self.args.tests = ['src_path:all']
- self.args.test_mapping = True
- targets, test_infos = self.ctr.translate(self.args)
- unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
- unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
- uc.CLASS_INFO})
-
- def test_find_tests_by_test_mapping_presubmit(self):
- """Test _find_tests_by_test_mapping method to locate presubmit tests."""
- os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- tests, all_tests = self.ctr._find_tests_by_test_mapping(
- path=TEST_MAPPING_DIR, file_name='test_mapping_sample',
- checked_files=set())
- expected = set([TEST_1, TEST_2, TEST_5, TEST_7, TEST_9])
- expected_all_tests = {'presubmit': expected,
- 'postsubmit': set(
- [TEST_3, TEST_6, TEST_8, TEST_10]),
- 'other_group': set([TEST_4])}
- self.assertEqual(expected, tests)
- self.assertEqual(expected_all_tests, all_tests)
-
- def test_find_tests_by_test_mapping_postsubmit(self):
- """Test _find_tests_by_test_mapping method to locate postsubmit tests.
- """
- os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- tests, all_tests = self.ctr._find_tests_by_test_mapping(
- path=TEST_MAPPING_DIR,
- test_group=constants.TEST_GROUP_POSTSUBMIT,
- file_name='test_mapping_sample', checked_files=set())
- expected_presubmit = set([TEST_1, TEST_2, TEST_5, TEST_7, TEST_9])
- expected = set([TEST_3, TEST_6, TEST_8, TEST_10])
- expected_all_tests = {'presubmit': expected_presubmit,
- 'postsubmit': set(
- [TEST_3, TEST_6, TEST_8, TEST_10]),
- 'other_group': set([TEST_4])}
- self.assertEqual(expected, tests)
- self.assertEqual(expected_all_tests, all_tests)
-
- def test_find_tests_by_test_mapping_all_group(self):
- """Test _find_tests_by_test_mapping method to locate postsubmit tests.
- """
- os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- tests, all_tests = self.ctr._find_tests_by_test_mapping(
- path=TEST_MAPPING_DIR, test_group=constants.TEST_GROUP_ALL,
- file_name='test_mapping_sample', checked_files=set())
- expected_presubmit = set([TEST_1, TEST_2, TEST_5, TEST_7, TEST_9])
- expected = set([
- TEST_1, TEST_2, TEST_3, TEST_4, TEST_5, TEST_6, TEST_7, TEST_8,
- TEST_9, TEST_10])
- expected_all_tests = {'presubmit': expected_presubmit,
- 'postsubmit': set(
- [TEST_3, TEST_6, TEST_8, TEST_10]),
- 'other_group': set([TEST_4])}
- self.assertEqual(expected, tests)
- self.assertEqual(expected_all_tests, all_tests)
-
- def test_find_tests_by_test_mapping_include_subdir(self):
- """Test _find_tests_by_test_mapping method to include sub directory."""
- os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- tests, all_tests = self.ctr._find_tests_by_test_mapping(
- path=TEST_MAPPING_TOP_DIR, file_name='test_mapping_sample',
- include_subdirs=True, checked_files=set())
- expected = set([TEST_1, TEST_2, TEST_5, TEST_7, TEST_9])
- expected_all_tests = {'presubmit': expected,
- 'postsubmit': set([
- TEST_3, TEST_6, TEST_8, TEST_10]),
- 'other_group': set([TEST_4])}
- self.assertEqual(expected, tests)
- self.assertEqual(expected_all_tests, all_tests)
-
- @mock.patch('__builtin__.raw_input', return_value='')
- def test_confirm_running(self, mock_raw_input):
- """Test _confirm_running method."""
- self.assertTrue(self.ctr._confirm_running([TEST_1]))
- mock_raw_input.return_value = 'N'
- self.assertFalse(self.ctr._confirm_running([TEST_2]))
-
- def test_print_fuzzy_searching_results(self):
- """Test _print_fuzzy_searching_results"""
- modules = [uc.MODULE_NAME, uc.MODULE2_NAME]
- capture_output = StringIO()
- sys.stdout = capture_output
- self.ctr._print_fuzzy_searching_results(modules)
- sys.stdout = sys.__stdout__
- output = 'Did you mean the following modules?\n{0}\n{1}\n'.format(
- uc.MODULE_NAME, uc.MODULE2_NAME)
- self.assertEqual(capture_output.getvalue(), output)
-
- def test_filter_comments(self):
- """Test filter_comments method"""
- file_with_comments = os.path.join(TEST_MAPPING_TOP_DIR,
- 'folder6',
- 'test_mapping_sample_with_comments')
- file_with_comments_golden = os.path.join(TEST_MAPPING_TOP_DIR,
- 'folder6',
- 'test_mapping_sample_golden')
- test_mapping_dict = json.loads(
- self.ctr.filter_comments(file_with_comments))
- test_mapping_dict_gloden = None
- with open(file_with_comments_golden) as json_file:
- test_mapping_dict_gloden = json.load(json_file)
-
- self.assertEqual(test_mapping_dict, test_mapping_dict_gloden)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/constants.py b/atest-py2/constants.py
deleted file mode 100644
index fad8ef5..0000000
--- a/atest-py2/constants.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Imports the various constant files that are available (default, google, etc).
-"""
-#pylint: disable=wildcard-import
-#pylint: disable=unused-wildcard-import
-
-from constants_default import *
-
-
-# Now try to import the various constant files outside this repo to overwrite
-# the globals as desired.
-try:
- from constants_google import *
-except ImportError:
- pass
diff --git a/atest-py2/constants_default.py b/atest-py2/constants_default.py
deleted file mode 100644
index adfba98..0000000
--- a/atest-py2/constants_default.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Various globals used by atest.
-"""
-
-import os
-import re
-
-MODE = 'DEFAULT'
-
-# Result server constants for atest_utils.
-RESULT_SERVER = ''
-RESULT_SERVER_ARGS = []
-RESULT_SERVER_TIMEOUT = 5
-# Result arguments if tests are configured in TEST_MAPPING.
-TEST_MAPPING_RESULT_SERVER_ARGS = []
-
-# Google service key for gts tests.
-GTS_GOOGLE_SERVICE_ACCOUNT = ''
-
-# Arg constants.
-WAIT_FOR_DEBUGGER = 'WAIT_FOR_DEBUGGER'
-DISABLE_INSTALL = 'DISABLE_INSTALL'
-DISABLE_TEARDOWN = 'DISABLE_TEARDOWN'
-PRE_PATCH_ITERATIONS = 'PRE_PATCH_ITERATIONS'
-POST_PATCH_ITERATIONS = 'POST_PATCH_ITERATIONS'
-PRE_PATCH_FOLDER = 'PRE_PATCH_FOLDER'
-POST_PATCH_FOLDER = 'POST_PATCH_FOLDER'
-SERIAL = 'SERIAL'
-SHARDING = 'SHARDING'
-ALL_ABI = 'ALL_ABI'
-HOST = 'HOST'
-CUSTOM_ARGS = 'CUSTOM_ARGS'
-DRY_RUN = 'DRY_RUN'
-ANDROID_SERIAL = 'ANDROID_SERIAL'
-INSTANT = 'INSTANT'
-USER_TYPE = 'USER_TYPE'
-ITERATIONS = 'ITERATIONS'
-RERUN_UNTIL_FAILURE = 'RERUN_UNTIL_FAILURE'
-RETRY_ANY_FAILURE = 'RETRY_ANY_FAILURE'
-TF_DEBUG = 'TF_DEBUG'
-TF_TEMPLATE = 'TF_TEMPLATE'
-COLLECT_TESTS_ONLY = 'COLLECT_TESTS_ONLY'
-
-# Application exit codes.
-EXIT_CODE_SUCCESS = 0
-EXIT_CODE_ENV_NOT_SETUP = 1
-EXIT_CODE_BUILD_FAILURE = 2
-EXIT_CODE_ERROR = 3
-EXIT_CODE_TEST_NOT_FOUND = 4
-EXIT_CODE_TEST_FAILURE = 5
-EXIT_CODE_VERIFY_FAILURE = 6
-EXIT_CODE_OUTSIDE_ROOT = 7
-
-# Codes of specific events. These are exceptions that don't stop anything
-# but sending metrics.
-ACCESS_CACHE_FAILURE = 101
-ACCESS_HISTORY_FAILURE = 102
-IMPORT_FAILURE = 103
-MLOCATEDB_LOCKED = 104
-
-# Test finder constants.
-MODULE_CONFIG = 'AndroidTest.xml'
-MODULE_COMPATIBILITY_SUITES = 'compatibility_suites'
-MODULE_NAME = 'module_name'
-MODULE_PATH = 'path'
-MODULE_CLASS = 'class'
-MODULE_INSTALLED = 'installed'
-MODULE_CLASS_ROBOLECTRIC = 'ROBOLECTRIC'
-MODULE_CLASS_NATIVE_TESTS = 'NATIVE_TESTS'
-MODULE_CLASS_JAVA_LIBRARIES = 'JAVA_LIBRARIES'
-MODULE_TEST_CONFIG = 'test_config'
-
-# Env constants
-ANDROID_BUILD_TOP = 'ANDROID_BUILD_TOP'
-ANDROID_OUT = 'OUT'
-ANDROID_OUT_DIR = 'OUT_DIR'
-ANDROID_HOST_OUT = 'ANDROID_HOST_OUT'
-ANDROID_PRODUCT_OUT = 'ANDROID_PRODUCT_OUT'
-USER_FROM_TOOL = 'USER_FROM_TOOL'
-
-# Test Info data keys
-# Value of include-filter option.
-TI_FILTER = 'filter'
-TI_REL_CONFIG = 'rel_config'
-TI_MODULE_CLASS = 'module_class'
-# Value of module-arg option
-TI_MODULE_ARG = 'module-arg'
-
-# Google TF
-GTF_MODULE = 'google-tradefed'
-GTF_TARGET = 'google-tradefed-core'
-
-# TEST_MAPPING filename
-TEST_MAPPING = 'TEST_MAPPING'
-# Test group for tests in TEST_MAPPING
-TEST_GROUP_PRESUBMIT = 'presubmit'
-TEST_GROUP_POSTSUBMIT = 'postsubmit'
-TEST_GROUP_ALL = 'all'
-# Key in TEST_MAPPING file for a list of imported TEST_MAPPING file
-TEST_MAPPING_IMPORTS = 'imports'
-
-# TradeFed command line args
-TF_INCLUDE_FILTER_OPTION = 'include-filter'
-TF_EXCLUDE_FILTER_OPTION = 'exclude-filter'
-TF_INCLUDE_FILTER = '--include-filter'
-TF_EXCLUDE_FILTER = '--exclude-filter'
-TF_ATEST_INCLUDE_FILTER = '--atest-include-filter'
-TF_ATEST_INCLUDE_FILTER_VALUE_FMT = '{test_name}:{test_filter}'
-TF_MODULE_ARG = '--module-arg'
-TF_MODULE_ARG_VALUE_FMT = '{test_name}:{option_name}:{option_value}'
-TF_SUITE_FILTER_ARG_VALUE_FMT = '"{test_name} {option_value}"'
-TF_SKIP_LOADING_CONFIG_JAR = '--skip-loading-config-jar'
-
-# Suite Plans
-SUITE_PLANS = frozenset(['cts'])
-
-# Constants of Steps
-REBUILD_MODULE_INFO_FLAG = '--rebuild-module-info'
-BUILD_STEP = 'build'
-INSTALL_STEP = 'install'
-TEST_STEP = 'test'
-ALL_STEPS = [BUILD_STEP, INSTALL_STEP, TEST_STEP]
-
-# ANSI code shift for colorful print
-BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
-
-# Answers equivalent to YES!
-AFFIRMATIVES = ['y', 'Y', 'yes', 'Yes', 'YES', '']
-LD_RANGE = 2
-
-# Types of Levenshetine Distance Cost
-COST_TYPO = (1, 1, 1)
-COST_SEARCH = (8, 1, 5)
-
-# Value of TestInfo install_locations.
-DEVICELESS_TEST = 'host'
-DEVICE_TEST = 'device'
-BOTH_TEST = 'both'
-
-# Metrics
-METRICS_URL = 'http://asuite-218222.appspot.com/atest/metrics'
-EXTERNAL = 'EXTERNAL_RUN'
-INTERNAL = 'INTERNAL_RUN'
-INTERNAL_EMAIL = '@google.com'
-INTERNAL_HOSTNAME = ['.google.com', 'c.googlers.com']
-CONTENT_LICENSES_URL = 'https://source.android.com/setup/start/licenses'
-CONTRIBUTOR_AGREEMENT_URL = {
- 'INTERNAL': 'https://cla.developers.google.com/',
- 'EXTERNAL': 'https://opensource.google.com/docs/cla/'
-}
-PRIVACY_POLICY_URL = 'https://policies.google.com/privacy'
-TERMS_SERVICE_URL = 'https://policies.google.com/terms'
-TOOL_NAME = 'atest'
-TF_PREPARATION = 'tf-preparation'
-
-# Detect type for local_detect_event.
-# Next expansion : DETECT_TYPE_XXX = 1
-DETECT_TYPE_BUG_DETECTED = 0
-# Considering a trade-off between speed and size, we set UPPER_LIMIT to 100000
-# to make maximum file space 10M(100000(records)*100(byte/record)) at most.
-# Therefore, to update history file will spend 1 sec at most in each run.
-UPPER_LIMIT = 100000
-TRIM_TO_SIZE = 50000
-
-# VTS plans
-VTS_STAGING_PLAN = 'vts-staging-default'
-
-# TreeHugger TEST_MAPPING SUITE_PLANS
-TEST_MAPPING_SUITES = ['device-tests', 'general-tests']
-
-# VTS10 TF
-VTS_TF_MODULE = 'vts10-tradefed'
-
-# VTS TF
-VTS_CORE_TF_MODULE = 'vts-tradefed'
-
-# VTS suite set
-VTS_CORE_SUITE = 'vts'
-
-# ATest TF
-ATEST_TF_MODULE = 'atest-tradefed'
-
-# Build environment variable for each build on ATest
-# With RECORD_ALL_DEPS enabled, ${ANDROID_PRODUCT_OUT}/module-info.json will
-# generate modules' dependencies info when make.
-# With SOONG_COLLECT_JAVA_DEPS enabled, out/soong/module_bp_java_deps.json will
-# be generated when make.
-ATEST_BUILD_ENV = {'RECORD_ALL_DEPS':'true', 'SOONG_COLLECT_JAVA_DEPS':'true'}
-
-# Atest index path and relative dirs/caches.
-INDEX_DIR = os.path.join(os.getenv(ANDROID_HOST_OUT, ''), 'indexes')
-LOCATE_CACHE = os.path.join(INDEX_DIR, 'mlocate.db')
-INT_INDEX = os.path.join(INDEX_DIR, 'integration.idx')
-CLASS_INDEX = os.path.join(INDEX_DIR, 'classes.idx')
-CC_CLASS_INDEX = os.path.join(INDEX_DIR, 'cc_classes.idx')
-PACKAGE_INDEX = os.path.join(INDEX_DIR, 'packages.idx')
-QCLASS_INDEX = os.path.join(INDEX_DIR, 'fqcn.idx')
-MODULE_INDEX = os.path.join(INDEX_DIR, 'modules.idx')
-VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
-
-# Regeular Expressions
-CC_EXT_RE = re.compile(r'.*\.(cc|cpp)$')
-JAVA_EXT_RE = re.compile(r'.*\.(java|kt)$')
-# e.g. /path/to/ccfile.cc: TEST_F(test_name, method_name){
-CC_OUTPUT_RE = re.compile(r'(?P<file_path>/.*):\s*TEST(_F|_P)?[ ]*\('
- r'(?P<test_name>\w+)\s*,\s*(?P<method_name>\w+)\)'
- r'\s*\{')
-CC_GREP_RE = r'^[ ]*TEST(_P|_F)?[ ]*\([[:alnum:]].*,'
-# e.g. /path/to/Javafile.java:package com.android.settings.accessibility
-# grab the path, Javafile(class) and com.android.settings.accessibility(package)
-CLASS_OUTPUT_RE = re.compile(r'(?P<java_path>.*/(?P<class>[A-Z]\w+)\.\w+)[:].*')
-QCLASS_OUTPUT_RE = re.compile(r'(?P<java_path>.*/(?P<class>[A-Z]\w+)\.\w+)'
- r'[:]\s*package\s+(?P<package>[^(;|\s)]+)\s*')
-PACKAGE_OUTPUT_RE = re.compile(r'(?P<java_dir>/.*/).*[.](java|kt)[:]\s*package\s+'
- r'(?P<package>[^(;|\s)]+)\s*')
-
-ATEST_RESULT_ROOT = '/tmp/atest_result'
-LATEST_RESULT_FILE = os.path.join(ATEST_RESULT_ROOT, 'LATEST', 'test_result')
-
-# Tests list which need vts_kernel_tests as test dependency
-REQUIRED_KERNEL_TEST_MODULES = [
- 'vts_ltp_test_arm',
- 'vts_ltp_test_arm_64',
- 'vts_linux_kselftest_arm_32',
- 'vts_linux_kselftest_arm_64',
- 'vts_linux_kselftest_x86_32',
- 'vts_linux_kselftest_x86_64'
-]
diff --git a/atest-py2/docs/atest_structure.md b/atest-py2/docs/atest_structure.md
deleted file mode 100644
index 1ff7b90..0000000
--- a/atest-py2/docs/atest_structure.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# Atest Developer Guide
-
-You're here because you'd like to contribute to atest. To start off, we'll
-explain how atest is structured and where the major pieces live and what they
-do. If you're more interested in how to use atest, go to the [README](../README.md).
-
-##### Table of Contents
-1. [Overall Structure](#overall-structure)
-2. [Major Files and Dirs](#major-files-and-dirs)
-3. [Test Finders](#test-finders)
-4. [Test Runners](#test-runners)
-5. [Constants Override](#constants-override)
-
-## <a name="overall-structure">Overall Structure</a>
-
-Atest is primarily composed of 2 components: [test finders](#test-finders) and
-[test runners](#test-runners). At a high level, atest does the following:
-1. Parse args and verify environment
-2. Find test(s) based on user input
-3. Build test dependencies
-4. Run test(s)
-
-Let's walk through an example run and highlight what happens under the covers.
-
-> ```# atest hello_world_test```
-
-Atest will first check the environment is setup and then load up the
-module-info.json file (and build it if it's not detected or we want to rebuild
-it). That is a critical piece that atest depends on. Module-info.json contains a
-list of all modules in the android repo and some relevant info (e.g.
-compatibility_suite, auto_gen_config, etc) that is used during the test finding
-process. We create the results dir for our test runners to dump results in and
-proceed to the first juicy part of atest, finding tests.
-
-The tests specified by the user are passed into the ```CLITranslator``` to
-transform the user input into a ```TestInfo``` object that contains all of the
-required and optional bits used to run the test as how the user intended.
-Required info would be the test name, test dependencies, and the test runner
-used to run the test. Optional bits would be additional args for the test and
-method/class filters.
-
-Once ```TestInfo``` objects have been constructed for all the tests passed in
-by the user, all of the test dependencies are built. This step can by bypassed
-if the user specifies only _-t_ or _-i_.
-
-The final step is to run the tests which is where the test runners do their job.
-All of the ```TestInfo``` objects get passed into the ```test_runner_handler```
-which invokes each ```TestInfo``` specified test runner. In this specific case,
-the ```AtestTradefedTestRunner``` is used to kick off ```hello_world_test```.
-
-Read on to learn more about the classes mentioned.
-
-## <a name="major-files-and-dirs">Major Files and Dirs</a>
-
-Here is a list of major files and dirs that are important to point out:
-* ```atest.py``` - Main entry point.
-* ```cli_translator.py``` - Home of ```CLITranslator``` class. Translates the
- user input into something the test runners can understand.
-* ```test_finder_handler.py``` - Module that collects all test finders,
- determines which test finder methods to use and returns them for
- ```CLITranslator``` to utilize.
-* ```test_finders/``` - Location of test finder classes. More details on test
- finders [below](#test-finders).
-* ```test_finders/test_info.py``` - Module that defines ```TestInfo``` class.
-* ```test_runner_handler.py``` - Module that collects all test runners and
- contains logic to determine what test runner to use for a particular
- ```TestInfo```.
-* ```test_runners/``` - Location of test runner classes. More details on test
- runners [below](#test-runners).
-* ```constants_default.py``` - Location of constant defaults. Need to override
- some of these constants for your private repo? [Instructions below](#constants-override).
-
-## <a name="test-finders">Test Finders</a>
-
-Test finders are classes that host find methods. The find methods are called by
-atest to find tests in the android repo based on the user's input (path,
-filename, class, etc). Find methods will also find the corresponding test
-dependencies for the supplied test, translating it into a form that a test
-runner can understand, and specifying the test runner.
-
-For more details and instructions on how to create new test finders,
-[go here](./develop_test_finders.md)
-
-## <a name="test-runners">Test Runners</a>
-
-Test Runners are classes that execute the tests. They consume a ```TestInfo```
-and execute the test as specified.
-
-For more details and instructions on how to create new test runners, [go here](./develop_test_runners.md)
-
-## <a name="constants-override">Constants Override</a>
-
-You'd like to override some constants but not sure how? Override them with your
-own constants_override.py that lives in your own private repo.
-
-1. Create new ```constants_override.py``` (or whatever you'd like to name it) in
- your own repo. It can live anywhere but just for example purposes, let's
- specify the path to be ```<private repo>/path/to/constants_override/constants_override.py```.
-2. Add a ```vendorsetup.sh``` script in ```//vendor/<somewhere>``` to export the
- path of ```constants_override.py``` base path into ```PYTHONPATH```.
-```bash
-# This file is executed by build/envsetup.sh
-_path_to_constants_override="$(gettop)/path/to/constants_override"
-if [[ ! $PYTHONPATH == *${_path_to_constants_override}* ]]; then
- export PYTHONPATH=${_path_to_constants_override}:$PYTHONPATH
-fi
-```
-3. Try-except import ```constants_override``` in ```constants.py```.
-```python
-try:
- from constants_override import *
-except ImportError:
- pass
-```
-4. You're done! To pick up the override, rerun build/envsetup.sh to kick off the
- vendorsetup.sh script.
diff --git a/atest-py2/docs/develop_test_finders.md b/atest-py2/docs/develop_test_finders.md
deleted file mode 100644
index 5235ef7..0000000
--- a/atest-py2/docs/develop_test_finders.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Test Finder Developer Guide
-
-Learn about test finders and how to create a new test finder class.
-
-##### Table of Contents
-1. [Test Finder Details](#test-finder-details)
-2. [Creating a Test Finder](#creating-a-test-finder)
-
-## <a name="test-finder-details">Test Finder Details</a>
-
-A test finder class holds find methods. A find method is given a string (the
-user input) and should try to resolve that string into a ```TestInfo``` object.
-A ```TestInfo``` object holds the test name, test dependencies, test runner, and
-a data field to hold misc bits like filters and extra args for the test. The
-test finder class can hold multiple find methods. The find methods are grouped
-together in a class so they can share metadata for optimal test finding.
-Examples of metadata would be the ```ModuleInfo``` object or the dirs that hold
-the test configs for the ```TFIntegrationFinder```.
-
-**When should I create a new test finder class?**
-
-If the metadata used to find a test is unlike existing test finder classes,
-that is the right time to create a new class. Metadata can be anything like
-file name patterns, a special file in a dir to indicate it's a test, etc. The
-default test finder classes use the module-info.json and specific dir paths
-metadata (```ModuleFinder``` and ```TFIntegrationFinder``` respectively).
-
-## <a name="creating-a-test-finder">Creating a Test Finder</a>
-
-First thing to choose is where to put the test finder. This will primarily
-depend on if the test finder will be public or private. If public,
-```test_finders/``` is the default location.
-
-> If it will be private, then you can
-> follow the same instructions for ```vendorsetup.sh``` in
-> [constants override](atest_structure.md#constants-override) where you will
-> add the path of where the test finder lives into ```$PYTHONPATH```. Same
-> rules apply, rerun ```build/envsetup.sh``` to update ```$PYTHONPATH```.
-
-Now define your class and decorate it with the
-```test_finder_base.find_method_register``` decorator. This decorator will
-create a list of find methods that ```test_finder_handler``` will use to collect
-the find methods from your test finder class. Take a look at
-```test_finders/example_test_finder.py``` as an example.
-
-Define the find methods in your test finder class. These find methods must
-return a ```TestInfo``` object. Extra bits of info can be stored in the data
-field as a dict. Check out ```ExampleFinder``` to see how the data field is
-used.
-
-Decorate each find method with the ```test_finder_base.register``` decorator.
-This is used by the class decorator to identify the find methods of the class.
-
-Final bit is to add your test finder class to ```test_finder_handler```.
-Try-except import it in the ```_get_test_finders``` method and that should be
-it. The find methods will be collected and executed before the default find
-methods.
-```python
-try:
- from test_finders import new_test_finder
- test_finders_list.add(new_test_finder.NewTestFinder)
-except ImportError:
- pass
-```
diff --git a/atest-py2/docs/develop_test_runners.md b/atest-py2/docs/develop_test_runners.md
deleted file mode 100644
index 80388ac..0000000
--- a/atest-py2/docs/develop_test_runners.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Test Runner Developer Guide
-
-Learn about test runners and how to create a new test runner class.
-
-##### Table of Contents
-1. [Test Runner Details](#test-runner-details)
-2. [Creating a Test Runner](#creating-a-test-runner)
-
-## <a name="test-runner-details">Test Runner Details</a>
-
-The test runner class is responsible for test execution. Its primary logic
-involve construction of the commandline given a ```TestInfo``` and
-```extra_args``` passed into the ```run_tests``` method. The extra_args are
-top-level args consumed by atest passed onto the test runner. It is up to the
-test runner to translate those args into the specific args the test runner
-accepts. In this way, you can think of the test runner as a translator between
-the atest CLI and your test runner's CLI. The reason for this is so that atest
-can have a consistent CLI for args instead of requiring the users to remember
-the differing CLIs of various test runners. The test runner should also
-determine its specific dependencies that need to be built prior to any test
-execution.
-
-## <a name="creating-a-test-runner">Creating a Test Runner</a>
-
-First thing to choose is where to put the test runner. This will primarily
-depend on if the test runner will be public or private. If public,
-```test_runners/``` is the default location.
-
-> If it will be private, then you can
-> follow the same instructions for ```vendorsetup.sh``` in
-> [constants override](atest_structure.md#constants-override) where you will
-> add the path of where the test runner lives into ```$PYTHONPATH```. Same
-> rules apply, rerun ```build/envsetup.sh``` to update ```$PYTHONPATH```.
-
-To create a new test runner, create a new class that inherits
-```TestRunnerBase```. Take a look at ```test_runners/example_test_runner.py```
-to see what a simple test runner will look like.
-
-**Important Notes**
-You'll need to override the following parent methods:
-* ```host_env_check()```: Check if host environment is properly setup for the
- test runner. Raise an expception if not.
-* ```get_test_runner_build_reqs()```: Return a set of build targets that need
- to be built prior to test execution.
-* ```run_tests()```: Execute the test(s).
-
-And define the following class vars:
-* ```NAME```: Unique name of the test runner.
-* ```EXECUTABLE```: Test runner command, should be an absolute path if the
- command can not be found in ```$PATH```.
-
-There is a parent helper method (```run```) that should be used to execute the
-actual test command.
-
-Once the test runner class is created, you'll need to add it in
-```test_runner_handler``` so that atest is aware of it. Try-except import the
-test runner in ```_get_test_runners``` like how ```ExampleTestRunner``` is.
-```python
-try:
- from test_runners import new_test_runner
- test_runners_dict[new_test_runner.NewTestRunner.NAME] = new_test_runner.NewTestRunner
-except ImportError:
- pass
-```
diff --git a/atest-py2/docs/developer_workflow.md b/atest-py2/docs/developer_workflow.md
deleted file mode 100644
index d3c2a32..0000000
--- a/atest-py2/docs/developer_workflow.md
+++ /dev/null
@@ -1,154 +0,0 @@
-# Atest Developer Workflow
-
-This document explains the practical steps for contributing code to atest.
-
-##### Table of Contents
-1. [Identify the code you should work on](#identify-the-code-you-should-work-on)
-2. [Working on the Python Code](#working-on-the-python-code)
-3. [Working on the TradeFed Code](#working-on-the-tradefed-code)
-4. [Working on the VTS10-TradeFed Code](#working-on-the-vts10-tradefed-code)
-5. [Working on the Robolectric Code](#working-on-the-robolectric-code)
-
-
-## <a name="what-code">Identify the code you should work on</a>
-
-Atest is essentially a wrapper around various test runners. Because of
-this division, your first step should be to identify the code
-involved with your change. This will help determine what tests you write
-and run. Note that the wrapper code is written in python, so we'll be
-referring to it as the "Python Code".
-
-##### The Python Code
-
-This code defines atest's command line interface.
-Its job is to translate user inputs into (1) build targets and (2)
-information needed for the test runner to run the test. It then invokes
-the appropriate test runner code to run the tests. As the tests
-are run it also parses the test runner's output into the output seen by
-the user. It uses Test Finder and Test Runner classes to do this work.
-If your contribution involves any of this functionality, this is the
-code you'll want to work on.
-
-<p>For more details on how this code works, checkout the following docs:
-
- - [General Structure](./atest_structure.md)
- - [Test Finders](./develop_test_finders.md)
- - [Test Runners](./develop_test_runners.md)
-
-##### The Test Runner Code
-
-This is the code that actually runs the test. If your change
-involves how the test is actually run, you'll need to work with this
-code.
-
-Each test runner will have a different workflow. Atest currently
-supports the following test runners:
-- TradeFed
-- VTS10-TradeFed
-- Robolectric
-
-
-## <a name="working-on-the-python-code">Working on the Python Code</a>
-
-##### Where does the Python code live?
-
-The python code lives here: `tools/tradefederation/core/atest/`
-(path relative to android repo root)
-
-##### Writing tests
-
-Test files go in the same directory as the file being tested. The test
-file should have the same name as the file it's testing, except it
-should have "_unittests" appended to the name. For example, tests
-for the logic in `cli_translator.py` go in the file
-`cli_translator_unittests.py` in the same directory.
-
-
-##### Running tests
-
-Python tests are just python files executable by the Python interpreter.
-You can run ALL the python tests by executing this bash script in the
-atest root dir: `./run_atest_unittests.sh`. Alternatively, you can
-directly execute any individual unittest file. However, you'll need to
-first add atest to your PYTHONPATH via entering in your terminal:
-`PYTHONPATH=<atest_dir>:$PYTHONPATH`.
-
-All tests should be passing before you submit your change.
-
-## <a name="working-on-the-tradefed-code">Working on the TradeFed Code</a>
-
-##### Where does the TradeFed code live?
-
-The TradeFed code lives here:
-`tools/tradefederation/core/src/com/android/tradefed/` (path relative
-to android repo root).
-
-The `testtype/suite/AtestRunner.java` is the most important file in
-the TradeFed Code. It defines the TradeFed API used
-by the Python Code, specifically by
-`test_runners/atest_tf_test_runner.py`. This is the file you'll want
-to edit if you need to make changes to the TradeFed code.
-
-
-##### Writing tests
-
-Tradefed test files live in a parallel `/tests/` file tree here:
-`tools/tradefederation/core/tests/src/com/android/tradefed/`.
-A test file should have the same name as the file it's testing,
-except with the word "Test" appended to the end. <p>
-For example, the tests for `tools/tradefederation/core/src/com/android/tradefed/testtype/suite/AtestRunner.java`
-can be found in `tools/tradefederation/core/tests/src/com/android/tradefed/testtype/suite/AtestRunnerTest.java`.
-
-##### Running tests
-
-TradeFed itself is used to run the TradeFed unittests so you'll need
-to build TradeFed first. See the
-[TradeFed README](../../README.md) for information about setting up
-TradeFed. <p>
-There are so many TradeFed tests that you'll probably want to
-first run the test file your code change affected individually. The
-command to run an individual test file is:<br>
-
-`tradefed.sh run host -n --class <fully.qualified.ClassName>`
-
-Thus, to run all the tests in AtestRunnerTest.java, you'd enter:
-
-`tradefed.sh run host -n --class com.android.tradefed.testtype.suite.AtestRunnerTest`
-
-To run ALL the TradeFed unittests, enter:
-`./tools/tradefederation/core/tests/run_tradefed_tests.sh`
-(from android repo root)
-
-Before submitting code you should run all the TradeFed tests.
-
-## <a name="working-on-the-vts10-tradefed-code">Working on the VTS10-TradeFed Code</a>
-
-##### Where does the VTS10-TradeFed code live?
-
-The VTS10-Tradefed code lives here: `test/vts/tools/vts-tradefed/`
-(path relative to android repo root)
-
-##### Writing tests
-
-You shouldn't need to edit vts10-tradefed code, so there is no
-need to write vts10 tests. Reach out to the vts team
-if you need information on their unittests.
-
-##### Running tests
-
-Again, you shouldn't need to change vts10-tradefed code.
-
-## <a name="working-on-the-robolectric-code">Working on the Robolectric Code</a>
-
-##### Where does the Robolectric code live?
-
-The Robolectric code lives here: `prebuilts/misc/common/robolectric/3.6.1/`
-(path relative to android repo root)
-
-##### Writing tests
-
-You shouldn't need to edit this code, so no need to write tests.
-
-##### Running tests
-
-Again, you shouldn't need to edit this code, so no need to run test.
diff --git a/atest-py2/metrics/__init__.py b/atest-py2/metrics/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/metrics/__init__.py
+++ /dev/null
diff --git a/atest-py2/metrics/clearcut_client.py b/atest-py2/metrics/clearcut_client.py
deleted file mode 100644
index ecb83c3..0000000
--- a/atest-py2/metrics/clearcut_client.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Python client library to write logs to Clearcut.
-
-This class is intended to be general-purpose, usable for any Clearcut LogSource.
-
- Typical usage example:
-
- client = clearcut.Clearcut(clientanalytics_pb2.LogRequest.MY_LOGSOURCE)
- client.log(my_event)
- client.flush_events()
-"""
-
-import logging
-import threading
-import time
-try:
- # PYTHON2
- from urllib2 import urlopen
- from urllib2 import Request
- from urllib2 import HTTPError
- from urllib2 import URLError
-except ImportError:
- # PYTHON3
- from urllib.request import urlopen
- from urllib.request import Request
- from urllib.request import HTTPError
- from urllib.request import URLError
-
-from proto import clientanalytics_pb2
-
-_CLEARCUT_PROD_URL = 'https://play.googleapis.com/log'
-_DEFAULT_BUFFER_SIZE = 100 # Maximum number of events to be buffered.
-_DEFAULT_FLUSH_INTERVAL_SEC = 60 # 1 Minute.
-_BUFFER_FLUSH_RATIO = 0.5 # Flush buffer when we exceed this ratio.
-_CLIENT_TYPE = 6
-
-class Clearcut(object):
- """Handles logging to Clearcut."""
-
- def __init__(self, log_source, url=None, buffer_size=None,
- flush_interval_sec=None):
- """Initializes a Clearcut client.
-
- Args:
- log_source: The log source.
- url: The Clearcut url to connect to.
- buffer_size: The size of the client buffer in number of events.
- flush_interval_sec: The flush interval in seconds.
- """
- self._clearcut_url = url if url else _CLEARCUT_PROD_URL
- self._log_source = log_source
- self._buffer_size = buffer_size if buffer_size else _DEFAULT_BUFFER_SIZE
- self._pending_events = []
- if flush_interval_sec:
- self._flush_interval_sec = flush_interval_sec
- else:
- self._flush_interval_sec = _DEFAULT_FLUSH_INTERVAL_SEC
- self._pending_events_lock = threading.Lock()
- self._scheduled_flush_thread = None
- self._scheduled_flush_time = float('inf')
- self._min_next_request_time = 0
-
- def log(self, event):
- """Logs events to Clearcut.
-
- Logging an event can potentially trigger a flush of queued events. Flushing
- is triggered when the buffer is more than half full or after the flush
- interval has passed.
-
- Args:
- event: A LogEvent to send to Clearcut.
- """
- self._append_events_to_buffer([event])
-
- def flush_events(self):
- """ Cancel whatever is scheduled and schedule an immediate flush."""
- if self._scheduled_flush_thread:
- self._scheduled_flush_thread.cancel()
- self._min_next_request_time = 0
- self._schedule_flush_thread(0)
-
- def _serialize_events_to_proto(self, events):
- log_request = clientanalytics_pb2.LogRequest()
- log_request.request_time_ms = int(time.time() * 1000)
- # pylint: disable=no-member
- log_request.client_info.client_type = _CLIENT_TYPE
- log_request.log_source = self._log_source
- log_request.log_event.extend(events)
- return log_request
-
- def _append_events_to_buffer(self, events, retry=False):
- with self._pending_events_lock:
- self._pending_events.extend(events)
- if len(self._pending_events) > self._buffer_size:
- index = len(self._pending_events) - self._buffer_size
- del self._pending_events[:index]
- self._schedule_flush(retry)
-
- def _schedule_flush(self, retry):
- if (not retry
- and len(self._pending_events) >= int(self._buffer_size *
- _BUFFER_FLUSH_RATIO)
- and self._scheduled_flush_time > time.time()):
- # Cancel whatever is scheduled and schedule an immediate flush.
- if self._scheduled_flush_thread:
- self._scheduled_flush_thread.cancel()
- self._schedule_flush_thread(0)
- elif self._pending_events and not self._scheduled_flush_thread:
- # Schedule a flush to run later.
- self._schedule_flush_thread(self._flush_interval_sec)
-
- def _schedule_flush_thread(self, time_from_now):
- min_wait_sec = self._min_next_request_time - time.time()
- if min_wait_sec > time_from_now:
- time_from_now = min_wait_sec
- logging.debug('Scheduling thread to run in %f seconds', time_from_now)
- self._scheduled_flush_thread = threading.Timer(time_from_now, self._flush)
- self._scheduled_flush_time = time.time() + time_from_now
- self._scheduled_flush_thread.start()
-
- def _flush(self):
- """Flush buffered events to Clearcut.
-
- If the sent request is unsuccessful, the events will be appended to
- buffer and rescheduled for next flush.
- """
- with self._pending_events_lock:
- self._scheduled_flush_time = float('inf')
- self._scheduled_flush_thread = None
- events = self._pending_events
- self._pending_events = []
- if self._min_next_request_time > time.time():
- self._append_events_to_buffer(events, retry=True)
- return
- log_request = self._serialize_events_to_proto(events)
- self._send_to_clearcut(log_request.SerializeToString())
-
- #pylint: disable=broad-except
- def _send_to_clearcut(self, data):
- """Sends a POST request with data as the body.
-
- Args:
- data: The serialized proto to send to Clearcut.
- """
- request = Request(self._clearcut_url, data=data)
- try:
- response = urlopen(request)
- msg = response.read()
- logging.debug('LogRequest successfully sent to Clearcut.')
- log_response = clientanalytics_pb2.LogResponse()
- log_response.ParseFromString(msg)
- # pylint: disable=no-member
- # Throttle based on next_request_wait_millis value.
- self._min_next_request_time = (log_response.next_request_wait_millis
- / 1000 + time.time())
- logging.debug('LogResponse: %s', log_response)
- except HTTPError as e:
- logging.debug('Failed to push events to Clearcut. Error code: %d',
- e.code)
- except URLError:
- logging.debug('Failed to push events to Clearcut.')
- except Exception as e:
- logging.debug(e)
diff --git a/atest-py2/metrics/metrics.py b/atest-py2/metrics/metrics.py
deleted file mode 100644
index f6446a6..0000000
--- a/atest-py2/metrics/metrics.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Metrics class.
-"""
-
-import constants
-
-from . import metrics_base
-
-class AtestStartEvent(metrics_base.MetricsBase):
- """
- Create Atest start event and send to clearcut.
-
- Usage:
- metrics.AtestStartEvent(
- command_line='example_atest_command',
- test_references=['example_test_reference'],
- cwd='example/working/dir',
- os='example_os')
- """
- _EVENT_NAME = 'atest_start_event'
- command_line = constants.INTERNAL
- test_references = constants.INTERNAL
- cwd = constants.INTERNAL
- os = constants.INTERNAL
-
-class AtestExitEvent(metrics_base.MetricsBase):
- """
- Create Atest exit event and send to clearcut.
-
- Usage:
- metrics.AtestExitEvent(
- duration=metrics_utils.convert_duration(end-start),
- exit_code=0,
- stacktrace='some_trace',
- logs='some_logs')
- """
- _EVENT_NAME = 'atest_exit_event'
- duration = constants.EXTERNAL
- exit_code = constants.EXTERNAL
- stacktrace = constants.INTERNAL
- logs = constants.INTERNAL
-
-class FindTestFinishEvent(metrics_base.MetricsBase):
- """
- Create find test finish event and send to clearcut.
-
- Occurs after a SINGLE test reference has been resolved to a test or
- not found.
-
- Usage:
- metrics.FindTestFinishEvent(
- duration=metrics_utils.convert_duration(end-start),
- success=true,
- test_reference='hello_world_test',
- test_finders=['example_test_reference', 'ref2'],
- test_info="test_name: hello_world_test -
- test_runner:AtestTradefedTestRunner -
- build_targets:
- set(['MODULES-IN-platform_testing-tests-example-native']) -
- data:{'rel_config':
- 'platform_testing/tests/example/native/AndroidTest.xml',
- 'filter': frozenset([])} -
- suite:None - module_class: ['NATIVE_TESTS'] -
- install_locations:set(['device', 'host'])")
- """
- _EVENT_NAME = 'find_test_finish_event'
- duration = constants.EXTERNAL
- success = constants.EXTERNAL
- test_reference = constants.INTERNAL
- test_finders = constants.INTERNAL
- test_info = constants.INTERNAL
-
-class BuildFinishEvent(metrics_base.MetricsBase):
- """
- Create build finish event and send to clearcut.
-
- Occurs after the build finishes, either successfully or not.
-
- Usage:
- metrics.BuildFinishEvent(
- duration=metrics_utils.convert_duration(end-start),
- success=true,
- targets=['target1', 'target2'])
- """
- _EVENT_NAME = 'build_finish_event'
- duration = constants.EXTERNAL
- success = constants.EXTERNAL
- targets = constants.INTERNAL
-
-class RunnerFinishEvent(metrics_base.MetricsBase):
- """
- Create run finish event and send to clearcut.
-
- Occurs when a single test runner has completed.
-
- Usage:
- metrics.RunnerFinishEvent(
- duration=metrics_utils.convert_duration(end-start),
- success=true,
- runner_name='AtestTradefedTestRunner'
- test=[{name:'hello_world_test', result:0, stacktrace:''},
- {name:'test2', result:1, stacktrace:'xxx'}])
- """
- _EVENT_NAME = 'runner_finish_event'
- duration = constants.EXTERNAL
- success = constants.EXTERNAL
- runner_name = constants.EXTERNAL
- test = constants.INTERNAL
-
-class RunTestsFinishEvent(metrics_base.MetricsBase):
- """
- Create run tests finish event and send to clearcut.
-
- Occurs after all test runners and tests have finished.
-
- Usage:
- metrics.RunTestsFinishEvent(
- duration=metrics_utils.convert_duration(end-start))
- """
- _EVENT_NAME = 'run_tests_finish_event'
- duration = constants.EXTERNAL
-
-class LocalDetectEvent(metrics_base.MetricsBase):
- """
- Create local detection event and send it to clearcut.
-
- Usage:
- metrics.LocalDetectEvent(
- detect_type=0,
- result=0)
- """
- _EVENT_NAME = 'local_detect_event'
- detect_type = constants.EXTERNAL
- result = constants.EXTERNAL
diff --git a/atest-py2/metrics/metrics_base.py b/atest-py2/metrics/metrics_base.py
deleted file mode 100644
index 44b3819..0000000
--- a/atest-py2/metrics/metrics_base.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Metrics base class.
-"""
-
-from __future__ import print_function
-
-import logging
-import random
-import socket
-import subprocess
-import time
-import uuid
-
-import asuite_metrics
-import constants
-
-from proto import clientanalytics_pb2
-from proto import external_user_log_pb2
-from proto import internal_user_log_pb2
-
-from . import clearcut_client
-
-INTERNAL_USER = 0
-EXTERNAL_USER = 1
-
-ATEST_EVENTS = {
- INTERNAL_USER: internal_user_log_pb2.AtestLogEventInternal,
- EXTERNAL_USER: external_user_log_pb2.AtestLogEventExternal
-}
-# log source
-ATEST_LOG_SOURCE = {
- INTERNAL_USER: 971,
- EXTERNAL_USER: 934
-}
-
-
-def get_user_type():
- """Get user type.
-
- Determine the internal user by passing at least one check:
- - whose git mail domain is from google
- - whose hostname is from google
- Otherwise is external user.
-
- Returns:
- INTERNAL_USER if user is internal, EXTERNAL_USER otherwise.
- """
- try:
- output = subprocess.check_output(['git', 'config', '--get', 'user.email'],
- universal_newlines=True)
- if output and output.strip().endswith(constants.INTERNAL_EMAIL):
- return INTERNAL_USER
- except OSError:
- # OSError can be raised when running atest_unittests on a host
- # without git being set up.
- logging.debug('Unable to determine if this is an external run, git is '
- 'not found.')
- except subprocess.CalledProcessError:
- logging.debug('Unable to determine if this is an external run, email '
- 'is not found in git config.')
- try:
- hostname = socket.getfqdn()
- if (hostname and
- any([(x in hostname) for x in constants.INTERNAL_HOSTNAME])):
- return INTERNAL_USER
- except IOError:
- logging.debug('Unable to determine if this is an external run, '
- 'hostname is not found.')
- return EXTERNAL_USER
-
-
-class MetricsBase(object):
- """Class for separating allowed fields and sending metric."""
-
- _run_id = str(uuid.uuid4())
- try:
- #pylint: disable=protected-access
- _user_key = str(asuite_metrics._get_grouping_key())
- #pylint: disable=broad-except
- except Exception:
- _user_key = asuite_metrics.UNUSED_UUID
- _user_type = get_user_type()
- _log_source = ATEST_LOG_SOURCE[_user_type]
- cc = clearcut_client.Clearcut(_log_source)
- tool_name = None
-
- def __new__(cls, **kwargs):
- """Send metric event to clearcut.
-
- Args:
- cls: this class object.
- **kwargs: A dict of named arguments.
-
- Returns:
- A Clearcut instance.
- """
- # pylint: disable=no-member
- if not cls.tool_name:
- logging.debug('There is no tool_name, and metrics stops sending.')
- return None
- allowed = ({constants.EXTERNAL} if cls._user_type == EXTERNAL_USER
- else {constants.EXTERNAL, constants.INTERNAL})
- fields = [k for k, v in vars(cls).items()
- if not k.startswith('_') and v in allowed]
- fields_and_values = {}
- for field in fields:
- if field in kwargs:
- fields_and_values[field] = kwargs.pop(field)
- params = {'user_key': cls._user_key,
- 'run_id': cls._run_id,
- 'user_type': cls._user_type,
- 'tool_name': cls.tool_name,
- cls._EVENT_NAME: fields_and_values}
- log_event = cls._build_full_event(ATEST_EVENTS[cls._user_type](**params))
- cls.cc.log(log_event)
- return cls.cc
-
- @classmethod
- def _build_full_event(cls, atest_event):
- """This is all protobuf building you can ignore.
-
- Args:
- cls: this class object.
- atest_event: A client_pb2.AtestLogEvent instance.
-
- Returns:
- A clientanalytics_pb2.LogEvent instance.
- """
- log_event = clientanalytics_pb2.LogEvent()
- log_event.event_time_ms = int((time.time() - random.randint(1, 600)) * 1000)
- log_event.source_extension = atest_event.SerializeToString()
- return log_event
diff --git a/atest-py2/metrics/metrics_utils.py b/atest-py2/metrics/metrics_utils.py
deleted file mode 100644
index a43b8f6..0000000
--- a/atest-py2/metrics/metrics_utils.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Utility functions for metrics.
-"""
-
-import os
-import platform
-import sys
-import time
-import traceback
-
-from . import metrics
-from . import metrics_base
-
-
-def static_var(varname, value):
- """Decorator to cache static variable."""
- def fun_var_decorate(func):
- """Set the static variable in a function."""
- setattr(func, varname, value)
- return func
- return fun_var_decorate
-
-
-@static_var("start_time", [])
-def get_start_time():
- """Get start time.
-
- Return:
- start_time: Start time in seconds. Return cached start_time if exists,
- time.time() otherwise.
- """
- if not get_start_time.start_time:
- get_start_time.start_time = time.time()
- return get_start_time.start_time
-
-
-def convert_duration(diff_time_sec):
- """Compute duration from time difference.
-
- A Duration represents a signed, fixed-length span of time represented
- as a count of seconds and fractions of seconds at nanosecond
- resolution.
-
- Args:
- dur_time_sec: The time in seconds as a floating point number.
-
- Returns:
- A dict of Duration.
- """
- seconds = int(diff_time_sec)
- nanos = int((diff_time_sec - seconds)*10**9)
- return {'seconds': seconds, 'nanos': nanos}
-
-
-# pylint: disable=broad-except
-def handle_exc_and_send_exit_event(exit_code):
- """handle exceptions and send exit event.
-
- Args:
- exit_code: An integer of exit code.
- """
- stacktrace = logs = ''
- try:
- exc_type, exc_msg, _ = sys.exc_info()
- stacktrace = traceback.format_exc()
- if exc_type:
- logs = '{etype}: {value}'.format(etype=exc_type.__name__,
- value=exc_msg)
- except Exception:
- pass
- send_exit_event(exit_code, stacktrace=stacktrace, logs=logs)
-
-
-def send_exit_event(exit_code, stacktrace='', logs=''):
- """Log exit event and flush all events to clearcut.
-
- Args:
- exit_code: An integer of exit code.
- stacktrace: A string of stacktrace.
- logs: A string of logs.
- """
- clearcut = metrics.AtestExitEvent(
- duration=convert_duration(time.time()-get_start_time()),
- exit_code=exit_code,
- stacktrace=stacktrace,
- logs=logs)
- # pylint: disable=no-member
- if clearcut:
- clearcut.flush_events()
-
-
-def send_start_event(tool_name, command_line='', test_references='',
- cwd=None, operating_system=None):
- """Log start event of clearcut.
-
- Args:
- tool_name: A string of the asuite product name.
- command_line: A string of the user input command.
- test_references: A string of the input tests.
- cwd: A string of current path.
- operating_system: A string of user's operating system.
- """
- if not cwd:
- cwd = os.getcwd()
- if not operating_system:
- operating_system = platform.platform()
- # Without tool_name information, asuite's clearcut client will not send
- # event to server.
- metrics_base.MetricsBase.tool_name = tool_name
- get_start_time()
- metrics.AtestStartEvent(command_line=command_line,
- test_references=test_references,
- cwd=cwd,
- os=operating_system)
diff --git a/atest-py2/module_info.py b/atest-py2/module_info.py
deleted file mode 100644
index d925548..0000000
--- a/atest-py2/module_info.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Module Info class used to hold cached module-info.json.
-"""
-
-import json
-import logging
-import os
-
-import atest_utils
-import constants
-
-# JSON file generated by build system that lists all buildable targets.
-_MODULE_INFO = 'module-info.json'
-
-
-class ModuleInfo(object):
- """Class that offers fast/easy lookup for Module related details."""
-
- def __init__(self, force_build=False, module_file=None):
- """Initialize the ModuleInfo object.
-
- Load up the module-info.json file and initialize the helper vars.
-
- Args:
- force_build: Boolean to indicate if we should rebuild the
- module_info file regardless if it's created or not.
- module_file: String of path to file to load up. Used for testing.
- """
- module_info_target, name_to_module_info = self._load_module_info_file(
- force_build, module_file)
- self.name_to_module_info = name_to_module_info
- self.module_info_target = module_info_target
- self.path_to_module_info = self._get_path_to_module_info(
- self.name_to_module_info)
- self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
-
- @staticmethod
- def _discover_mod_file_and_target(force_build):
- """Find the module file.
-
- Args:
- force_build: Boolean to indicate if we should rebuild the
- module_info file regardless if it's created or not.
-
- Returns:
- Tuple of module_info_target and path to module file.
- """
- module_info_target = None
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, '/')
- out_dir = os.environ.get(constants.ANDROID_PRODUCT_OUT, root_dir)
- module_file_path = os.path.join(out_dir, _MODULE_INFO)
-
- # Check if the user set a custom out directory by comparing the out_dir
- # to the root_dir.
- if out_dir.find(root_dir) == 0:
- # Make target is simply file path relative to root
- module_info_target = os.path.relpath(module_file_path, root_dir)
- else:
- # If the user has set a custom out directory, generate an absolute
- # path for module info targets.
- logging.debug('User customized out dir!')
- module_file_path = os.path.join(
- os.environ.get(constants.ANDROID_PRODUCT_OUT), _MODULE_INFO)
- module_info_target = module_file_path
- if not os.path.isfile(module_file_path) or force_build:
- logging.debug('Generating %s - this is required for '
- 'initial runs.', _MODULE_INFO)
- build_env = dict(constants.ATEST_BUILD_ENV)
- atest_utils.build([module_info_target],
- verbose=logging.getLogger().isEnabledFor(logging.DEBUG),
- env_vars=build_env)
- return module_info_target, module_file_path
-
- def _load_module_info_file(self, force_build, module_file):
- """Load the module file.
-
- Args:
- force_build: Boolean to indicate if we should rebuild the
- module_info file regardless if it's created or not.
- module_file: String of path to file to load up. Used for testing.
-
- Returns:
- Tuple of module_info_target and dict of json.
- """
- # If module_file is specified, we're testing so we don't care if
- # module_info_target stays None.
- module_info_target = None
- file_path = module_file
- if not file_path:
- module_info_target, file_path = self._discover_mod_file_and_target(
- force_build)
- with open(file_path) as json_file:
- mod_info = json.load(json_file)
- return module_info_target, mod_info
-
- @staticmethod
- def _get_path_to_module_info(name_to_module_info):
- """Return the path_to_module_info dict.
-
- Args:
- name_to_module_info: Dict of module name to module info dict.
-
- Returns:
- Dict of module path to module info dict.
- """
- path_to_module_info = {}
- for mod_name, mod_info in name_to_module_info.items():
- # Cross-compiled and multi-arch modules actually all belong to
- # a single target so filter out these extra modules.
- if mod_name != mod_info.get(constants.MODULE_NAME, ''):
- continue
- for path in mod_info.get(constants.MODULE_PATH, []):
- mod_info[constants.MODULE_NAME] = mod_name
- # There could be multiple modules in a path.
- if path in path_to_module_info:
- path_to_module_info[path].append(mod_info)
- else:
- path_to_module_info[path] = [mod_info]
- return path_to_module_info
-
- def is_module(self, name):
- """Return True if name is a module, False otherwise."""
- return name in self.name_to_module_info
-
- def get_paths(self, name):
- """Return paths of supplied module name, Empty list if non-existent."""
- info = self.name_to_module_info.get(name)
- if info:
- return info.get(constants.MODULE_PATH, [])
- return []
-
- def get_module_names(self, rel_module_path):
- """Get the modules that all have module_path.
-
- Args:
- rel_module_path: path of module in module-info.json
-
- Returns:
- List of module names.
- """
- return [m.get(constants.MODULE_NAME)
- for m in self.path_to_module_info.get(rel_module_path, [])]
-
- def get_module_info(self, mod_name):
- """Return dict of info for given module name, None if non-existent."""
- module_info = self.name_to_module_info.get(mod_name)
- # Android's build system will automatically adding 2nd arch bitness
- # string at the end of the module name which will make atest could not
- # finding matched module. Rescan the module-info with matched module
- # name without bitness.
- if not module_info:
- for _, module_info in self.name_to_module_info.items():
- if mod_name == module_info.get(constants.MODULE_NAME, ''):
- break
- return module_info
-
- def is_suite_in_compatibility_suites(self, suite, mod_info):
- """Check if suite exists in the compatibility_suites of module-info.
-
- Args:
- suite: A string of suite name.
- mod_info: Dict of module info to check.
-
- Returns:
- True if it exists in mod_info, False otherwise.
- """
- return suite in mod_info.get(constants.MODULE_COMPATIBILITY_SUITES, [])
-
- def get_testable_modules(self, suite=None):
- """Return the testable modules of the given suite name.
-
- Args:
- suite: A string of suite name. Set to None to return all testable
- modules.
-
- Returns:
- List of testable modules. Empty list if non-existent.
- If suite is None, return all the testable modules in module-info.
- """
- modules = set()
- for _, info in self.name_to_module_info.items():
- if self.is_testable_module(info):
- if suite:
- if self.is_suite_in_compatibility_suites(suite, info):
- modules.add(info.get(constants.MODULE_NAME))
- else:
- modules.add(info.get(constants.MODULE_NAME))
- return modules
-
- def is_testable_module(self, mod_info):
- """Check if module is something we can test.
-
- A module is testable if:
- - it's installed, or
- - it's a robolectric module (or shares path with one).
-
- Args:
- mod_info: Dict of module info to check.
-
- Returns:
- True if we can test this module, False otherwise.
- """
- if not mod_info:
- return False
- if mod_info.get(constants.MODULE_INSTALLED) and self.has_test_config(mod_info):
- return True
- if self.is_robolectric_test(mod_info.get(constants.MODULE_NAME)):
- return True
- return False
-
- def has_test_config(self, mod_info):
- """Validate if this module has a test config.
-
- A module can have a test config in the following manner:
- - AndroidTest.xml at the module path.
- - test_config be set in module-info.json.
- - Auto-generated config via the auto_test_config key in module-info.json.
-
- Args:
- mod_info: Dict of module info to check.
-
- Returns:
- True if this module has a test config, False otherwise.
- """
- # Check if test_config in module-info is set.
- for test_config in mod_info.get(constants.MODULE_TEST_CONFIG, []):
- if os.path.isfile(os.path.join(self.root_dir, test_config)):
- return True
- # Check for AndroidTest.xml at the module path.
- for path in mod_info.get(constants.MODULE_PATH, []):
- if os.path.isfile(os.path.join(self.root_dir, path,
- constants.MODULE_CONFIG)):
- return True
- # Check if the module has an auto-generated config.
- return self.is_auto_gen_test_config(mod_info.get(constants.MODULE_NAME))
-
- def get_robolectric_test_name(self, module_name):
- """Returns runnable robolectric module name.
-
- There are at least 2 modules in every robolectric module path, return
- the module that we can run as a build target.
-
- Arg:
- module_name: String of module.
-
- Returns:
- String of module that is the runnable robolectric module, None if
- none could be found.
- """
- module_name_info = self.name_to_module_info.get(module_name)
- if not module_name_info:
- return None
- module_paths = module_name_info.get(constants.MODULE_PATH, [])
- if module_paths:
- for mod in self.get_module_names(module_paths[0]):
- mod_info = self.get_module_info(mod)
- if self.is_robolectric_module(mod_info):
- return mod
- return None
-
- def is_robolectric_test(self, module_name):
- """Check if module is a robolectric test.
-
- A module can be a robolectric test if the specified module has their
- class set as ROBOLECTRIC (or shares their path with a module that does).
-
- Args:
- module_name: String of module to check.
-
- Returns:
- True if the module is a robolectric module, else False.
- """
- # Check 1, module class is ROBOLECTRIC
- mod_info = self.get_module_info(module_name)
- if self.is_robolectric_module(mod_info):
- return True
- # Check 2, shared modules in the path have class ROBOLECTRIC_CLASS.
- if self.get_robolectric_test_name(module_name):
- return True
- return False
-
- def is_auto_gen_test_config(self, module_name):
- """Check if the test config file will be generated automatically.
-
- Args:
- module_name: A string of the module name.
-
- Returns:
- True if the test config file will be generated automatically.
- """
- if self.is_module(module_name):
- mod_info = self.name_to_module_info.get(module_name)
- auto_test_config = mod_info.get('auto_test_config', [])
- return auto_test_config and auto_test_config[0]
- return False
-
- def is_robolectric_module(self, mod_info):
- """Check if a module is a robolectric module.
-
- Args:
- mod_info: ModuleInfo to check.
-
- Returns:
- True if module is a robolectric module, False otherwise.
- """
- if mod_info:
- return (mod_info.get(constants.MODULE_CLASS, [None])[0] ==
- constants.MODULE_CLASS_ROBOLECTRIC)
- return False
-
- def is_native_test(self, module_name):
- """Check if the input module is a native test.
-
- Args:
- module_name: A string of the module name.
-
- Returns:
- True if the test is a native test, False otherwise.
- """
- mod_info = self.get_module_info(module_name)
- return constants.MODULE_CLASS_NATIVE_TESTS in mod_info.get(
- constants.MODULE_CLASS, [])
diff --git a/atest-py2/module_info_unittest.py b/atest-py2/module_info_unittest.py
deleted file mode 100755
index 4e48977..0000000
--- a/atest-py2/module_info_unittest.py
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for module_info."""
-
-import os
-import unittest
-import mock
-
-import constants
-import module_info
-import unittest_constants as uc
-
-JSON_FILE_PATH = os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE)
-EXPECTED_MOD_TARGET = 'tradefed'
-EXPECTED_MOD_TARGET_PATH = ['tf/core']
-UNEXPECTED_MOD_TARGET = 'this_should_not_be_in_module-info.json'
-MOD_NO_PATH = 'module-no-path'
-PATH_TO_MULT_MODULES = 'shared/path/to/be/used'
-MULT_MOODULES_WITH_SHARED_PATH = ['module2', 'module1']
-PATH_TO_MULT_MODULES_WITH_MULTI_ARCH = 'shared/path/to/be/used2'
-TESTABLE_MODULES_WITH_SHARED_PATH = ['multiarch1', 'multiarch2', 'multiarch3', 'multiarch3_32']
-
-ROBO_MOD_PATH = ['/shared/robo/path']
-NON_RUN_ROBO_MOD_NAME = 'robo_mod'
-RUN_ROBO_MOD_NAME = 'run_robo_mod'
-NON_RUN_ROBO_MOD = {constants.MODULE_NAME: NON_RUN_ROBO_MOD_NAME,
- constants.MODULE_PATH: ROBO_MOD_PATH,
- constants.MODULE_CLASS: ['random_class']}
-RUN_ROBO_MOD = {constants.MODULE_NAME: RUN_ROBO_MOD_NAME,
- constants.MODULE_PATH: ROBO_MOD_PATH,
- constants.MODULE_CLASS: [constants.MODULE_CLASS_ROBOLECTRIC]}
-MOD_PATH_INFO_DICT = {ROBO_MOD_PATH[0]: [RUN_ROBO_MOD, NON_RUN_ROBO_MOD]}
-MOD_NAME_INFO_DICT = {
- RUN_ROBO_MOD_NAME: RUN_ROBO_MOD,
- NON_RUN_ROBO_MOD_NAME: NON_RUN_ROBO_MOD}
-MOD_NAME1 = 'mod1'
-MOD_NAME2 = 'mod2'
-MOD_NAME3 = 'mod3'
-MOD_NAME4 = 'mod4'
-MOD_INFO_DICT = {}
-MODULE_INFO = {constants.MODULE_NAME: 'random_name',
- constants.MODULE_PATH: 'a/b/c/path',
- constants.MODULE_CLASS: ['random_class']}
-NAME_TO_MODULE_INFO = {'random_name' : MODULE_INFO}
-
-#pylint: disable=protected-access
-class ModuleInfoUnittests(unittest.TestCase):
- """Unit tests for module_info.py"""
-
- @mock.patch('json.load', return_value={})
- @mock.patch('__builtin__.open', new_callable=mock.mock_open)
- @mock.patch('os.path.isfile', return_value=True)
- def test_load_mode_info_file_out_dir_handling(self, _isfile, _open, _json):
- """Test _load_module_info_file out dir handling."""
- # Test out default out dir is used.
- build_top = '/path/to/top'
- default_out_dir = os.path.join(build_top, 'out/dir/here')
- os_environ_mock = {'ANDROID_PRODUCT_OUT': default_out_dir,
- constants.ANDROID_BUILD_TOP: build_top}
- default_out_dir_mod_targ = 'out/dir/here/module-info.json'
- # Make sure module_info_target is what we think it is.
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- mod_info = module_info.ModuleInfo()
- self.assertEqual(default_out_dir_mod_targ,
- mod_info.module_info_target)
-
- # Test out custom out dir is used (OUT_DIR=dir2).
- custom_out_dir = os.path.join(build_top, 'out2/dir/here')
- os_environ_mock = {'ANDROID_PRODUCT_OUT': custom_out_dir,
- constants.ANDROID_BUILD_TOP: build_top}
- custom_out_dir_mod_targ = 'out2/dir/here/module-info.json'
- # Make sure module_info_target is what we think it is.
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- mod_info = module_info.ModuleInfo()
- self.assertEqual(custom_out_dir_mod_targ,
- mod_info.module_info_target)
-
- # Test out custom abs out dir is used (OUT_DIR=/tmp/out/dir2).
- abs_custom_out_dir = '/tmp/out/dir'
- os_environ_mock = {'ANDROID_PRODUCT_OUT': abs_custom_out_dir,
- constants.ANDROID_BUILD_TOP: build_top}
- custom_abs_out_dir_mod_targ = '/tmp/out/dir/module-info.json'
- # Make sure module_info_target is what we think it is.
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- mod_info = module_info.ModuleInfo()
- self.assertEqual(custom_abs_out_dir_mod_targ,
- mod_info.module_info_target)
-
- @mock.patch.object(module_info.ModuleInfo, '_load_module_info_file',)
- def test_get_path_to_module_info(self, mock_load_module):
- """Test that we correctly create the path to module info dict."""
- mod_one = 'mod1'
- mod_two = 'mod2'
- mod_path_one = '/path/to/mod1'
- mod_path_two = '/path/to/mod2'
- mod_info_dict = {mod_one: {constants.MODULE_PATH: [mod_path_one],
- constants.MODULE_NAME: mod_one},
- mod_two: {constants.MODULE_PATH: [mod_path_two],
- constants.MODULE_NAME: mod_two}}
- mock_load_module.return_value = ('mod_target', mod_info_dict)
- path_to_mod_info = {mod_path_one: [{constants.MODULE_NAME: mod_one,
- constants.MODULE_PATH: [mod_path_one]}],
- mod_path_two: [{constants.MODULE_NAME: mod_two,
- constants.MODULE_PATH: [mod_path_two]}]}
- mod_info = module_info.ModuleInfo()
- self.assertDictEqual(path_to_mod_info,
- mod_info._get_path_to_module_info(mod_info_dict))
-
- def test_is_module(self):
- """Test that we get the module when it's properly loaded."""
- # Load up the test json file and check that module is in it
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- self.assertTrue(mod_info.is_module(EXPECTED_MOD_TARGET))
- self.assertFalse(mod_info.is_module(UNEXPECTED_MOD_TARGET))
-
- def test_get_path(self):
- """Test that we get the module path when it's properly loaded."""
- # Load up the test json file and check that module is in it
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- self.assertEqual(mod_info.get_paths(EXPECTED_MOD_TARGET),
- EXPECTED_MOD_TARGET_PATH)
- self.assertEqual(mod_info.get_paths(MOD_NO_PATH), [])
-
- def test_get_module_names(self):
- """test that we get the module name properly."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- self.assertEqual(mod_info.get_module_names(EXPECTED_MOD_TARGET_PATH[0]),
- [EXPECTED_MOD_TARGET])
- self.assertEqual(mod_info.get_module_names(PATH_TO_MULT_MODULES),
- MULT_MOODULES_WITH_SHARED_PATH)
-
- def test_path_to_mod_info(self):
- """test that we get the module name properly."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- module_list = []
- for path_to_mod_info in mod_info.path_to_module_info[PATH_TO_MULT_MODULES_WITH_MULTI_ARCH]:
- module_list.append(path_to_mod_info.get(constants.MODULE_NAME))
- module_list.sort()
- TESTABLE_MODULES_WITH_SHARED_PATH.sort()
- self.assertEqual(module_list, TESTABLE_MODULES_WITH_SHARED_PATH)
-
- def test_is_suite_in_compatibility_suites(self):
- """Test is_suite_in_compatibility_suites."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- info = {'compatibility_suites': []}
- self.assertFalse(mod_info.is_suite_in_compatibility_suites("cts", info))
- info2 = {'compatibility_suites': ["cts"]}
- self.assertTrue(mod_info.is_suite_in_compatibility_suites("cts", info2))
- self.assertFalse(mod_info.is_suite_in_compatibility_suites("vts10", info2))
- info3 = {'compatibility_suites': ["cts", "vts10"]}
- self.assertTrue(mod_info.is_suite_in_compatibility_suites("cts", info3))
- self.assertTrue(mod_info.is_suite_in_compatibility_suites("vts10", info3))
- self.assertFalse(mod_info.is_suite_in_compatibility_suites("ats", info3))
-
- @mock.patch.object(module_info.ModuleInfo, 'is_testable_module')
- @mock.patch.object(module_info.ModuleInfo, 'is_suite_in_compatibility_suites')
- def test_get_testable_modules(self, mock_is_suite_exist, mock_is_testable):
- """Test get_testable_modules."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mock_is_testable.return_value = False
- self.assertEqual(mod_info.get_testable_modules(), set())
- mod_info.name_to_module_info = NAME_TO_MODULE_INFO
- mock_is_testable.return_value = True
- mock_is_suite_exist.return_value = True
- self.assertEqual(1, len(mod_info.get_testable_modules('test_suite')))
- mock_is_suite_exist.return_value = False
- self.assertEqual(0, len(mod_info.get_testable_modules('test_suite')))
- self.assertEqual(1, len(mod_info.get_testable_modules()))
-
- @mock.patch.object(module_info.ModuleInfo, 'has_test_config')
- @mock.patch.object(module_info.ModuleInfo, 'is_robolectric_test')
- def test_is_testable_module(self, mock_is_robo_test, mock_has_test_config):
- """Test is_testable_module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mock_is_robo_test.return_value = False
- mock_has_test_config.return_value = True
- installed_module_info = {constants.MODULE_INSTALLED:
- uc.DEFAULT_INSTALL_PATH}
- non_installed_module_info = {constants.MODULE_NAME: 'rand_name'}
- # Empty mod_info or a non-installed module.
- self.assertFalse(mod_info.is_testable_module(non_installed_module_info))
- self.assertFalse(mod_info.is_testable_module({}))
- # Testable Module or is a robo module for non-installed module.
- self.assertTrue(mod_info.is_testable_module(installed_module_info))
- mock_has_test_config.return_value = False
- self.assertFalse(mod_info.is_testable_module(installed_module_info))
- mock_is_robo_test.return_value = True
- self.assertTrue(mod_info.is_testable_module(non_installed_module_info))
-
- @mock.patch.object(module_info.ModuleInfo, 'is_auto_gen_test_config')
- def test_has_test_config(self, mock_is_auto_gen):
- """Test has_test_config."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- info = {constants.MODULE_PATH:[uc.TEST_DATA_DIR]}
- mock_is_auto_gen.return_value = True
- # Validate we see the config when it's auto-generated.
- self.assertTrue(mod_info.has_test_config(info))
- self.assertTrue(mod_info.has_test_config({}))
- # Validate when actual config exists and there's no auto-generated config.
- mock_is_auto_gen.return_value = False
- self.assertTrue(mod_info.has_test_config(info))
- self.assertFalse(mod_info.has_test_config({}))
- # Validate the case mod_info MODULE_TEST_CONFIG be set
- info2 = {constants.MODULE_PATH:[uc.TEST_CONFIG_DATA_DIR],
- constants.MODULE_TEST_CONFIG:[os.path.join(uc.TEST_CONFIG_DATA_DIR, "a.xml")]}
- self.assertTrue(mod_info.has_test_config(info2))
-
- @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
- def test_get_robolectric_test_name(self, mock_get_module_names):
- """Test get_robolectric_test_name."""
- # Happy path testing, make sure we get the run robo target.
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mod_info.name_to_module_info = MOD_NAME_INFO_DICT
- mod_info.path_to_module_info = MOD_PATH_INFO_DICT
- mock_get_module_names.return_value = [RUN_ROBO_MOD_NAME, NON_RUN_ROBO_MOD_NAME]
- self.assertEqual(mod_info.get_robolectric_test_name(
- NON_RUN_ROBO_MOD_NAME), RUN_ROBO_MOD_NAME)
- # Let's also make sure we don't return anything when we're not supposed
- # to.
- mock_get_module_names.return_value = [NON_RUN_ROBO_MOD_NAME]
- self.assertEqual(mod_info.get_robolectric_test_name(
- NON_RUN_ROBO_MOD_NAME), None)
-
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info')
- @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
- def test_is_robolectric_test(self, mock_get_module_names, mock_get_module_info):
- """Test is_robolectric_test."""
- # Happy path testing, make sure we get the run robo target.
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mod_info.name_to_module_info = MOD_NAME_INFO_DICT
- mod_info.path_to_module_info = MOD_PATH_INFO_DICT
- mock_get_module_names.return_value = [RUN_ROBO_MOD_NAME, NON_RUN_ROBO_MOD_NAME]
- mock_get_module_info.return_value = RUN_ROBO_MOD
- # Test on a run robo module.
- self.assertTrue(mod_info.is_robolectric_test(RUN_ROBO_MOD_NAME))
- # Test on a non-run robo module but shares with a run robo module.
- self.assertTrue(mod_info.is_robolectric_test(NON_RUN_ROBO_MOD_NAME))
- # Make sure we don't find robo tests where they don't exist.
- mock_get_module_info.return_value = None
- self.assertFalse(mod_info.is_robolectric_test('rand_mod'))
-
- @mock.patch.object(module_info.ModuleInfo, 'is_module')
- def test_is_auto_gen_test_config(self, mock_is_module):
- """Test is_auto_gen_test_config correctly detects the module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mock_is_module.return_value = True
- is_auto_test_config = {'auto_test_config': [True]}
- is_not_auto_test_config = {'auto_test_config': [False]}
- is_not_auto_test_config_again = {'auto_test_config': []}
- MOD_INFO_DICT[MOD_NAME1] = is_auto_test_config
- MOD_INFO_DICT[MOD_NAME2] = is_not_auto_test_config
- MOD_INFO_DICT[MOD_NAME3] = is_not_auto_test_config_again
- MOD_INFO_DICT[MOD_NAME4] = {}
- mod_info.name_to_module_info = MOD_INFO_DICT
- self.assertTrue(mod_info.is_auto_gen_test_config(MOD_NAME1))
- self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME2))
- self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME3))
- self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME4))
-
- def test_is_robolectric_module(self):
- """Test is_robolectric_module correctly detects the module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- is_robolectric_module = {'class': ['ROBOLECTRIC']}
- is_not_robolectric_module = {'class': ['OTHERS']}
- MOD_INFO_DICT[MOD_NAME1] = is_robolectric_module
- MOD_INFO_DICT[MOD_NAME2] = is_not_robolectric_module
- mod_info.name_to_module_info = MOD_INFO_DICT
- self.assertTrue(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME1]))
- self.assertFalse(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME2]))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/proto/__init__.py b/atest-py2/proto/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/proto/__init__.py
+++ /dev/null
diff --git a/atest-py2/proto/clientanalytics.proto b/atest-py2/proto/clientanalytics.proto
deleted file mode 100644
index e75bf78..0000000
--- a/atest-py2/proto/clientanalytics.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-syntax = "proto2";
-
-option java_package = "com.android.asuite.clearcut";
-
-message LogRequest {
- optional ClientInfo client_info = 1;
- optional int32 log_source = 2;
- optional int64 request_time_ms = 4;
- repeated LogEvent log_event = 3;
-}
-message ClientInfo {
- optional int32 client_type = 1;
-}
-
-message LogResponse {
- optional int64 next_request_wait_millis = 1 ;
-}
-
-message LogEvent {
- optional int64 event_time_ms = 1 ;
- optional bytes source_extension = 6;
-}
diff --git a/atest-py2/proto/clientanalytics_pb2.py b/atest-py2/proto/clientanalytics_pb2.py
deleted file mode 100644
index b58dcc7..0000000
--- a/atest-py2/proto/clientanalytics_pb2.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/clientanalytics.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/clientanalytics.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x1bproto/clientanalytics.proto\"y\n\nLogRequest\x12 \n\x0b\x63lient_info\x18\x01 \x01(\x0b\x32\x0b.ClientInfo\x12\x12\n\nlog_source\x18\x02 \x01(\x05\x12\x17\n\x0frequest_time_ms\x18\x04 \x01(\x03\x12\x1c\n\tlog_event\x18\x03 \x03(\x0b\x32\t.LogEvent\"!\n\nClientInfo\x12\x13\n\x0b\x63lient_type\x18\x01 \x01(\x05\"/\n\x0bLogResponse\x12 \n\x18next_request_wait_millis\x18\x01 \x01(\x03\";\n\x08LogEvent\x12\x15\n\revent_time_ms\x18\x01 \x01(\x03\x12\x18\n\x10source_extension\x18\x06 \x01(\x0c')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_LOGREQUEST = _descriptor.Descriptor(
- name='LogRequest',
- full_name='LogRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='client_info', full_name='LogRequest.client_info', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='log_source', full_name='LogRequest.log_source', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='request_time_ms', full_name='LogRequest.request_time_ms', index=2,
- number=4, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='log_event', full_name='LogRequest.log_event', index=3,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=31,
- serialized_end=152,
-)
-
-
-_CLIENTINFO = _descriptor.Descriptor(
- name='ClientInfo',
- full_name='ClientInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='client_type', full_name='ClientInfo.client_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=154,
- serialized_end=187,
-)
-
-
-_LOGRESPONSE = _descriptor.Descriptor(
- name='LogResponse',
- full_name='LogResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='next_request_wait_millis', full_name='LogResponse.next_request_wait_millis', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=189,
- serialized_end=236,
-)
-
-
-_LOGEVENT = _descriptor.Descriptor(
- name='LogEvent',
- full_name='LogEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='event_time_ms', full_name='LogEvent.event_time_ms', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='source_extension', full_name='LogEvent.source_extension', index=1,
- number=6, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=238,
- serialized_end=297,
-)
-
-_LOGREQUEST.fields_by_name['client_info'].message_type = _CLIENTINFO
-_LOGREQUEST.fields_by_name['log_event'].message_type = _LOGEVENT
-DESCRIPTOR.message_types_by_name['LogRequest'] = _LOGREQUEST
-DESCRIPTOR.message_types_by_name['ClientInfo'] = _CLIENTINFO
-DESCRIPTOR.message_types_by_name['LogResponse'] = _LOGRESPONSE
-DESCRIPTOR.message_types_by_name['LogEvent'] = _LOGEVENT
-
-LogRequest = _reflection.GeneratedProtocolMessageType('LogRequest', (_message.Message,), dict(
- DESCRIPTOR = _LOGREQUEST,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogRequest)
- ))
-_sym_db.RegisterMessage(LogRequest)
-
-ClientInfo = _reflection.GeneratedProtocolMessageType('ClientInfo', (_message.Message,), dict(
- DESCRIPTOR = _CLIENTINFO,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:ClientInfo)
- ))
-_sym_db.RegisterMessage(ClientInfo)
-
-LogResponse = _reflection.GeneratedProtocolMessageType('LogResponse', (_message.Message,), dict(
- DESCRIPTOR = _LOGRESPONSE,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogResponse)
- ))
-_sym_db.RegisterMessage(LogResponse)
-
-LogEvent = _reflection.GeneratedProtocolMessageType('LogEvent', (_message.Message,), dict(
- DESCRIPTOR = _LOGEVENT,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogEvent)
- ))
-_sym_db.RegisterMessage(LogEvent)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest-py2/proto/common.proto b/atest-py2/proto/common.proto
deleted file mode 100644
index 49cc48c..0000000
--- a/atest-py2/proto/common.proto
+++ /dev/null
@@ -1,16 +0,0 @@
-syntax = "proto2";
-
-option java_package = "com.android.asuite.clearcut";
-
-message Duration {
- required int64 seconds = 1;
- required int32 nanos = 2;
-}
-
-// ----------------
-// ENUM DEFINITIONS
-// ----------------
-enum UserType {
- GOOGLE = 0;
- EXTERNAL = 1;
-}
diff --git a/atest-py2/proto/common_pb2.py b/atest-py2/proto/common_pb2.py
deleted file mode 100644
index 5b7bd2e..0000000
--- a/atest-py2/proto/common_pb2.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/common.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/common.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x12proto/common.proto\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x02(\x03\x12\r\n\x05nanos\x18\x02 \x02(\x05*$\n\x08UserType\x12\n\n\x06GOOGLE\x10\x00\x12\x0c\n\x08\x45XTERNAL\x10\x01')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-_USERTYPE = _descriptor.EnumDescriptor(
- name='UserType',
- full_name='UserType',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='GOOGLE', index=0, number=0,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='EXTERNAL', index=1, number=1,
- options=None,
- type=None),
- ],
- containing_type=None,
- options=None,
- serialized_start=66,
- serialized_end=102,
-)
-_sym_db.RegisterEnumDescriptor(_USERTYPE)
-
-UserType = enum_type_wrapper.EnumTypeWrapper(_USERTYPE)
-GOOGLE = 0
-EXTERNAL = 1
-
-
-
-_DURATION = _descriptor.Descriptor(
- name='Duration',
- full_name='Duration',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='seconds', full_name='Duration.seconds', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='nanos', full_name='Duration.nanos', index=1,
- number=2, type=5, cpp_type=1, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=22,
- serialized_end=64,
-)
-
-DESCRIPTOR.message_types_by_name['Duration'] = _DURATION
-DESCRIPTOR.enum_types_by_name['UserType'] = _USERTYPE
-
-Duration = _reflection.GeneratedProtocolMessageType('Duration', (_message.Message,), dict(
- DESCRIPTOR = _DURATION,
- __module__ = 'proto.common_pb2'
- # @@protoc_insertion_point(class_scope:Duration)
- ))
-_sym_db.RegisterMessage(Duration)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest-py2/proto/external_user_log.proto b/atest-py2/proto/external_user_log.proto
deleted file mode 100644
index 533ff0a..0000000
--- a/atest-py2/proto/external_user_log.proto
+++ /dev/null
@@ -1,70 +0,0 @@
-syntax = "proto2";
-
-import "proto/common.proto";
-
-option java_package = "com.android.asuite.clearcut";
-
-// Proto used by Atest CLI Tool for External Non-PII Users
-message AtestLogEventExternal {
-
- // ------------------------
- // EVENT DEFINITIONS
- // ------------------------
- // Occurs immediately upon execution of atest
- message AtestStartEvent {
- }
-
- // Occurs when atest exits for any reason
- message AtestExitEvent {
- optional Duration duration = 1;
- optional int32 exit_code = 2;
- }
-
- // Occurs after a SINGLE test reference has been resolved to a test or
- // not found
- message FindTestFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- }
-
- // Occurs after the build finishes, either successfully or not.
- message BuildFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- }
-
- // Occurs when a single test runner has completed
- message RunnerFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- optional string runner_name = 3;
- }
-
- // Occurs after all test runners and tests have finished
- message RunTestsFinishEvent {
- optional Duration duration = 1;
- }
-
- // Occurs after detection of catching bug by atest have finished
- message LocalDetectEvent {
- optional int32 detect_type = 1;
- optional int32 result = 2;
- }
-
- // ------------------------
- // FIELDS FOR ATESTLOGEVENT
- // ------------------------
- optional string user_key = 1;
- optional string run_id = 2;
- optional UserType user_type = 3;
- optional string tool_name = 10;
- oneof event {
- AtestStartEvent atest_start_event = 4;
- AtestExitEvent atest_exit_event = 5;
- FindTestFinishEvent find_test_finish_event= 6;
- BuildFinishEvent build_finish_event = 7;
- RunnerFinishEvent runner_finish_event = 8;
- RunTestsFinishEvent run_tests_finish_event = 9;
- LocalDetectEvent local_detect_event = 11;
- }
-}
diff --git a/atest-py2/proto/external_user_log_pb2.py b/atest-py2/proto/external_user_log_pb2.py
deleted file mode 100644
index ba33fd4..0000000
--- a/atest-py2/proto/external_user_log_pb2.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/external_user_log.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from proto import common_pb2 as proto_dot_common__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/external_user_log.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x1dproto/external_user_log.proto\x1a\x12proto/common.proto\"\x8f\x08\n\x15\x41testLogEventExternal\x12\x10\n\x08user_key\x18\x01 \x01(\t\x12\x0e\n\x06run_id\x18\x02 \x01(\t\x12\x1c\n\tuser_type\x18\x03 \x01(\x0e\x32\t.UserType\x12\x11\n\ttool_name\x18\n \x01(\t\x12\x43\n\x11\x61test_start_event\x18\x04 \x01(\x0b\x32&.AtestLogEventExternal.AtestStartEventH\x00\x12\x41\n\x10\x61test_exit_event\x18\x05 \x01(\x0b\x32%.AtestLogEventExternal.AtestExitEventH\x00\x12L\n\x16\x66ind_test_finish_event\x18\x06 \x01(\x0b\x32*.AtestLogEventExternal.FindTestFinishEventH\x00\x12\x45\n\x12\x62uild_finish_event\x18\x07 \x01(\x0b\x32\'.AtestLogEventExternal.BuildFinishEventH\x00\x12G\n\x13runner_finish_event\x18\x08 \x01(\x0b\x32(.AtestLogEventExternal.RunnerFinishEventH\x00\x12L\n\x16run_tests_finish_event\x18\t \x01(\x0b\x32*.AtestLogEventExternal.RunTestsFinishEventH\x00\x12\x45\n\x12local_detect_event\x18\x0b \x01(\x0b\x32\'.AtestLogEventExternal.LocalDetectEventH\x00\x1a\x11\n\x0f\x41testStartEvent\x1a@\n\x0e\x41testExitEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x11\n\texit_code\x18\x02 \x01(\x05\x1a\x43\n\x13\x46indTestFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x1a@\n\x10\x42uildFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x1aV\n\x11RunnerFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x13\n\x0brunner_name\x18\x03 \x01(\t\x1a\x32\n\x13RunTestsFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x1a\x37\n\x10LocalDetectEvent\x12\x13\n\x0b\x64\x65tect_type\x18\x01 \x01(\x05\x12\x0e\n\x06result\x18\x02 \x01(\x05\x42\x07\n\x05\x65vent')
- ,
- dependencies=[proto_dot_common__pb2.DESCRIPTOR,])
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT = _descriptor.Descriptor(
- name='AtestStartEvent',
- full_name='AtestLogEventExternal.AtestStartEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=669,
- serialized_end=686,
-)
-
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT = _descriptor.Descriptor(
- name='AtestExitEvent',
- full_name='AtestLogEventExternal.AtestExitEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.AtestExitEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='exit_code', full_name='AtestLogEventExternal.AtestExitEvent.exit_code', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=688,
- serialized_end=752,
-)
-
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT = _descriptor.Descriptor(
- name='FindTestFinishEvent',
- full_name='AtestLogEventExternal.FindTestFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.FindTestFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.FindTestFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=754,
- serialized_end=821,
-)
-
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT = _descriptor.Descriptor(
- name='BuildFinishEvent',
- full_name='AtestLogEventExternal.BuildFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.BuildFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.BuildFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=823,
- serialized_end=887,
-)
-
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT = _descriptor.Descriptor(
- name='RunnerFinishEvent',
- full_name='AtestLogEventExternal.RunnerFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.RunnerFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.RunnerFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='runner_name', full_name='AtestLogEventExternal.RunnerFinishEvent.runner_name', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=889,
- serialized_end=975,
-)
-
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT = _descriptor.Descriptor(
- name='RunTestsFinishEvent',
- full_name='AtestLogEventExternal.RunTestsFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.RunTestsFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=977,
- serialized_end=1027,
-)
-
-_ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT = _descriptor.Descriptor(
- name='LocalDetectEvent',
- full_name='AtestLogEventExternal.LocalDetectEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='detect_type', full_name='AtestLogEventExternal.LocalDetectEvent.detect_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventExternal.LocalDetectEvent.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1029,
- serialized_end=1084,
-)
-
-_ATESTLOGEVENTEXTERNAL = _descriptor.Descriptor(
- name='AtestLogEventExternal',
- full_name='AtestLogEventExternal',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='user_key', full_name='AtestLogEventExternal.user_key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='run_id', full_name='AtestLogEventExternal.run_id', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='user_type', full_name='AtestLogEventExternal.user_type', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='tool_name', full_name='AtestLogEventExternal.tool_name', index=3,
- number=10, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='atest_start_event', full_name='AtestLogEventExternal.atest_start_event', index=4,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='atest_exit_event', full_name='AtestLogEventExternal.atest_exit_event', index=5,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='find_test_finish_event', full_name='AtestLogEventExternal.find_test_finish_event', index=6,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='build_finish_event', full_name='AtestLogEventExternal.build_finish_event', index=7,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='runner_finish_event', full_name='AtestLogEventExternal.runner_finish_event', index=8,
- number=8, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='run_tests_finish_event', full_name='AtestLogEventExternal.run_tests_finish_event', index=9,
- number=9, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='local_detect_event', full_name='AtestLogEventExternal.local_detect_event', index=10,
- number=11, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT, _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT, _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT, _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT, _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT, _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT, _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT, ],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='event', full_name='AtestLogEventExternal.event',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=54,
- serialized_end=1093,
-)
-
-_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL.fields_by_name['user_type'].enum_type = proto_dot_common__pb2._USERTYPE
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'].message_type = _ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'].message_type = _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'].message_type = _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-DESCRIPTOR.message_types_by_name['AtestLogEventExternal'] = _ATESTLOGEVENTEXTERNAL
-
-AtestLogEventExternal = _reflection.GeneratedProtocolMessageType('AtestLogEventExternal', (_message.Message,), dict(
-
- AtestStartEvent = _reflection.GeneratedProtocolMessageType('AtestStartEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.AtestStartEvent)
- ))
- ,
-
- AtestExitEvent = _reflection.GeneratedProtocolMessageType('AtestExitEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.AtestExitEvent)
- ))
- ,
-
- FindTestFinishEvent = _reflection.GeneratedProtocolMessageType('FindTestFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.FindTestFinishEvent)
- ))
- ,
-
- BuildFinishEvent = _reflection.GeneratedProtocolMessageType('BuildFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.BuildFinishEvent)
- ))
- ,
-
- RunnerFinishEvent = _reflection.GeneratedProtocolMessageType('RunnerFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.RunnerFinishEvent)
- ))
- ,
-
- RunTestsFinishEvent = _reflection.GeneratedProtocolMessageType('RunTestsFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.RunTestsFinishEvent)
- ))
- ,
-
- LocalDetectEvent = _reflection.GeneratedProtocolMessageType('LocalDetectEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.LocalDetectEvent)
- ))
- ,
- DESCRIPTOR = _ATESTLOGEVENTEXTERNAL,
- __module__ = 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal)
- ))
-_sym_db.RegisterMessage(AtestLogEventExternal)
-_sym_db.RegisterMessage(AtestLogEventExternal.AtestStartEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.AtestExitEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.FindTestFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.BuildFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.RunnerFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.RunTestsFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.LocalDetectEvent)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest-py2/proto/internal_user_log.proto b/atest-py2/proto/internal_user_log.proto
deleted file mode 100644
index 05d4dee..0000000
--- a/atest-py2/proto/internal_user_log.proto
+++ /dev/null
@@ -1,86 +0,0 @@
-syntax = "proto2";
-
-import "proto/common.proto";
-
-option java_package = "com.android.asuite.clearcut";
-
-// Proto used by Atest CLI Tool for internal Users
-message AtestLogEventInternal {
-
- // ------------------------
- // EVENT DEFINITIONS
- // ------------------------
- // Occurs immediately upon execution of atest
- message AtestStartEvent {
- optional string command_line = 1;
- repeated string test_references = 2;
- optional string cwd = 3;
- optional string os = 4;
- }
-
- // Occurs when atest exits for any reason
- message AtestExitEvent {
- optional Duration duration = 1;
- optional int32 exit_code = 2;
- optional string stacktrace = 3;
- optional string logs = 4;
- }
-
- // Occurs after a SINGLE test reference has been resolved to a test or
- // not found
- message FindTestFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- optional string test_reference = 3;
- repeated string test_finders = 4;
- optional string test_info = 5;
- }
-
- // Occurs after the build finishes, either successfully or not.
- message BuildFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- repeated string targets = 3;
- }
-
- // Occurs when a single test runner has completed
- message RunnerFinishEvent {
- optional Duration duration = 1;
- optional bool success = 2;
- optional string runner_name = 3;
- message Test {
- optional string name = 1;
- optional int32 result = 2;
- optional string stacktrace = 3;
- }
- repeated Test test = 4;
- }
-
- // Occurs after all test runners and tests have finished
- message RunTestsFinishEvent {
- optional Duration duration = 1;
- }
-
- // Occurs after detection of catching bug by atest have finished
- message LocalDetectEvent {
- optional int32 detect_type = 1;
- optional int32 result = 2;
- }
-
- // ------------------------
- // FIELDS FOR ATESTLOGEVENT
- // ------------------------
- optional string user_key = 1;
- optional string run_id = 2;
- optional UserType user_type = 3;
- optional string tool_name = 10;
- oneof event {
- AtestStartEvent atest_start_event = 4;
- AtestExitEvent atest_exit_event = 5;
- FindTestFinishEvent find_test_finish_event= 6;
- BuildFinishEvent build_finish_event = 7;
- RunnerFinishEvent runner_finish_event = 8;
- RunTestsFinishEvent run_tests_finish_event = 9;
- LocalDetectEvent local_detect_event = 11;
- }
-}
diff --git a/atest-py2/proto/internal_user_log_pb2.py b/atest-py2/proto/internal_user_log_pb2.py
deleted file mode 100644
index e8585dc..0000000
--- a/atest-py2/proto/internal_user_log_pb2.py
+++ /dev/null
@@ -1,618 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/internal_user_log.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from proto import common_pb2 as proto_dot_common__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/internal_user_log.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x1dproto/internal_user_log.proto\x1a\x12proto/common.proto\"\xc4\n\n\x15\x41testLogEventInternal\x12\x10\n\x08user_key\x18\x01 \x01(\t\x12\x0e\n\x06run_id\x18\x02 \x01(\t\x12\x1c\n\tuser_type\x18\x03 \x01(\x0e\x32\t.UserType\x12\x11\n\ttool_name\x18\n \x01(\t\x12\x43\n\x11\x61test_start_event\x18\x04 \x01(\x0b\x32&.AtestLogEventInternal.AtestStartEventH\x00\x12\x41\n\x10\x61test_exit_event\x18\x05 \x01(\x0b\x32%.AtestLogEventInternal.AtestExitEventH\x00\x12L\n\x16\x66ind_test_finish_event\x18\x06 \x01(\x0b\x32*.AtestLogEventInternal.FindTestFinishEventH\x00\x12\x45\n\x12\x62uild_finish_event\x18\x07 \x01(\x0b\x32\'.AtestLogEventInternal.BuildFinishEventH\x00\x12G\n\x13runner_finish_event\x18\x08 \x01(\x0b\x32(.AtestLogEventInternal.RunnerFinishEventH\x00\x12L\n\x16run_tests_finish_event\x18\t \x01(\x0b\x32*.AtestLogEventInternal.RunTestsFinishEventH\x00\x12\x45\n\x12local_detect_event\x18\x0b \x01(\x0b\x32\'.AtestLogEventInternal.LocalDetectEventH\x00\x1aY\n\x0f\x41testStartEvent\x12\x14\n\x0c\x63ommand_line\x18\x01 \x01(\t\x12\x17\n\x0ftest_references\x18\x02 \x03(\t\x12\x0b\n\x03\x63wd\x18\x03 \x01(\t\x12\n\n\x02os\x18\x04 \x01(\t\x1a\x62\n\x0e\x41testExitEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x11\n\texit_code\x18\x02 \x01(\x05\x12\x12\n\nstacktrace\x18\x03 \x01(\t\x12\x0c\n\x04logs\x18\x04 \x01(\t\x1a\x84\x01\n\x13\x46indTestFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x16\n\x0etest_reference\x18\x03 \x01(\t\x12\x14\n\x0ctest_finders\x18\x04 \x03(\t\x12\x11\n\ttest_info\x18\x05 \x01(\t\x1aQ\n\x10\x42uildFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07targets\x18\x03 \x03(\t\x1a\xcd\x01\n\x11RunnerFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x13\n\x0brunner_name\x18\x03 \x01(\t\x12;\n\x04test\x18\x04 \x03(\x0b\x32-.AtestLogEventInternal.RunnerFinishEvent.Test\x1a\x38\n\x04Test\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06result\x18\x02 \x01(\x05\x12\x12\n\nstacktrace\x18\x03 \x01(\t\x1a\x32\n\x13RunTestsFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x1a\x37\n\x10LocalDetectEvent\x12\x13\n\x0b\x64\x65tect_type\x18\x01 \x01(\x05\x12\x0e\n\x06result\x18\x02 \x01(\x05\x42\x07\n\x05\x65vent')
- ,
- dependencies=[proto_dot_common__pb2.DESCRIPTOR,])
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT = _descriptor.Descriptor(
- name='AtestStartEvent',
- full_name='AtestLogEventInternal.AtestStartEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='command_line', full_name='AtestLogEventInternal.AtestStartEvent.command_line', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='test_references', full_name='AtestLogEventInternal.AtestStartEvent.test_references', index=1,
- number=2, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='cwd', full_name='AtestLogEventInternal.AtestStartEvent.cwd', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='os', full_name='AtestLogEventInternal.AtestStartEvent.os', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=669,
- serialized_end=758,
-)
-
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT = _descriptor.Descriptor(
- name='AtestExitEvent',
- full_name='AtestLogEventInternal.AtestExitEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.AtestExitEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='exit_code', full_name='AtestLogEventInternal.AtestExitEvent.exit_code', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='stacktrace', full_name='AtestLogEventInternal.AtestExitEvent.stacktrace', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='logs', full_name='AtestLogEventInternal.AtestExitEvent.logs', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=760,
- serialized_end=858,
-)
-
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT = _descriptor.Descriptor(
- name='FindTestFinishEvent',
- full_name='AtestLogEventInternal.FindTestFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.FindTestFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.FindTestFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='test_reference', full_name='AtestLogEventInternal.FindTestFinishEvent.test_reference', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='test_finders', full_name='AtestLogEventInternal.FindTestFinishEvent.test_finders', index=3,
- number=4, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='test_info', full_name='AtestLogEventInternal.FindTestFinishEvent.test_info', index=4,
- number=5, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=861,
- serialized_end=993,
-)
-
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT = _descriptor.Descriptor(
- name='BuildFinishEvent',
- full_name='AtestLogEventInternal.BuildFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.BuildFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.BuildFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='targets', full_name='AtestLogEventInternal.BuildFinishEvent.targets', index=2,
- number=3, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=995,
- serialized_end=1076,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST = _descriptor.Descriptor(
- name='Test',
- full_name='AtestLogEventInternal.RunnerFinishEvent.Test',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='stacktrace', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.stacktrace', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1228,
- serialized_end=1284,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT = _descriptor.Descriptor(
- name='RunnerFinishEvent',
- full_name='AtestLogEventInternal.RunnerFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.RunnerFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.RunnerFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='runner_name', full_name='AtestLogEventInternal.RunnerFinishEvent.runner_name', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='test', full_name='AtestLogEventInternal.RunnerFinishEvent.test', index=3,
- number=4, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST, ],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1079,
- serialized_end=1284,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT = _descriptor.Descriptor(
- name='RunTestsFinishEvent',
- full_name='AtestLogEventInternal.RunTestsFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.RunTestsFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1286,
- serialized_end=1336,
-)
-
-_ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT = _descriptor.Descriptor(
- name='LocalDetectEvent',
- full_name='AtestLogEventInternal.LocalDetectEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='detect_type', full_name='AtestLogEventInternal.LocalDetectEvent.detect_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventInternal.LocalDetectEvent.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1338,
- serialized_end=1393,
-)
-
-_ATESTLOGEVENTINTERNAL = _descriptor.Descriptor(
- name='AtestLogEventInternal',
- full_name='AtestLogEventInternal',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='user_key', full_name='AtestLogEventInternal.user_key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='run_id', full_name='AtestLogEventInternal.run_id', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='user_type', full_name='AtestLogEventInternal.user_type', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='tool_name', full_name='AtestLogEventInternal.tool_name', index=3,
- number=10, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='atest_start_event', full_name='AtestLogEventInternal.atest_start_event', index=4,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='atest_exit_event', full_name='AtestLogEventInternal.atest_exit_event', index=5,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='find_test_finish_event', full_name='AtestLogEventInternal.find_test_finish_event', index=6,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='build_finish_event', full_name='AtestLogEventInternal.build_finish_event', index=7,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='runner_finish_event', full_name='AtestLogEventInternal.runner_finish_event', index=8,
- number=8, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='run_tests_finish_event', full_name='AtestLogEventInternal.run_tests_finish_event', index=9,
- number=9, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='local_detect_event', full_name='AtestLogEventInternal.local_detect_event', index=10,
- number=11, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT, _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT, _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT, _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT, _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT, _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT, _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT, ],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='event', full_name='AtestLogEventInternal.event',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=54,
- serialized_end=1402,
-)
-
-_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST.containing_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.fields_by_name['test'].message_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL.fields_by_name['user_type'].enum_type = proto_dot_common__pb2._USERTYPE
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'].message_type = _ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'].message_type = _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'].message_type = _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-DESCRIPTOR.message_types_by_name['AtestLogEventInternal'] = _ATESTLOGEVENTINTERNAL
-
-AtestLogEventInternal = _reflection.GeneratedProtocolMessageType('AtestLogEventInternal', (_message.Message,), dict(
-
- AtestStartEvent = _reflection.GeneratedProtocolMessageType('AtestStartEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.AtestStartEvent)
- ))
- ,
-
- AtestExitEvent = _reflection.GeneratedProtocolMessageType('AtestExitEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.AtestExitEvent)
- ))
- ,
-
- FindTestFinishEvent = _reflection.GeneratedProtocolMessageType('FindTestFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.FindTestFinishEvent)
- ))
- ,
-
- BuildFinishEvent = _reflection.GeneratedProtocolMessageType('BuildFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.BuildFinishEvent)
- ))
- ,
-
- RunnerFinishEvent = _reflection.GeneratedProtocolMessageType('RunnerFinishEvent', (_message.Message,), dict(
-
- Test = _reflection.GeneratedProtocolMessageType('Test', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunnerFinishEvent.Test)
- ))
- ,
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunnerFinishEvent)
- ))
- ,
-
- RunTestsFinishEvent = _reflection.GeneratedProtocolMessageType('RunTestsFinishEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunTestsFinishEvent)
- ))
- ,
-
- LocalDetectEvent = _reflection.GeneratedProtocolMessageType('LocalDetectEvent', (_message.Message,), dict(
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.LocalDetectEvent)
- ))
- ,
- DESCRIPTOR = _ATESTLOGEVENTINTERNAL,
- __module__ = 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal)
- ))
-_sym_db.RegisterMessage(AtestLogEventInternal)
-_sym_db.RegisterMessage(AtestLogEventInternal.AtestStartEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.AtestExitEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.FindTestFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.BuildFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunnerFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunnerFinishEvent.Test)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunTestsFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.LocalDetectEvent)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest-py2/result_reporter.py b/atest-py2/result_reporter.py
deleted file mode 100644
index 17032cd..0000000
--- a/atest-py2/result_reporter.py
+++ /dev/null
@@ -1,524 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Result Reporter
-
-The result reporter formats and prints test results.
-
-----
-Example Output for command to run following tests:
-CtsAnimationTestCases:EvaluatorTest, HelloWorldTests, and WmTests
-
-Running Tests ...
-
-CtsAnimationTestCases
----------------------
-
-android.animation.cts.EvaluatorTest.UnitTests (7 Tests)
-[1/7] android.animation.cts.EvaluatorTest#testRectEvaluator: PASSED (153ms)
-[2/7] android.animation.cts.EvaluatorTest#testIntArrayEvaluator: PASSED (0ms)
-[3/7] android.animation.cts.EvaluatorTest#testIntEvaluator: PASSED (0ms)
-[4/7] android.animation.cts.EvaluatorTest#testFloatArrayEvaluator: PASSED (1ms)
-[5/7] android.animation.cts.EvaluatorTest#testPointFEvaluator: PASSED (1ms)
-[6/7] android.animation.cts.EvaluatorTest#testArgbEvaluator: PASSED (0ms)
-[7/7] android.animation.cts.EvaluatorTest#testFloatEvaluator: PASSED (1ms)
-
-HelloWorldTests
----------------
-
-android.test.example.helloworld.UnitTests(2 Tests)
-[1/2] android.test.example.helloworld.HelloWorldTest#testHalloWelt: PASSED (0ms)
-[2/2] android.test.example.helloworld.HelloWorldTest#testHelloWorld: PASSED (1ms)
-
-WmTests
--------
-
-com.android.tradefed.targetprep.UnitTests (1 Test)
-RUNNER ERROR: com.android.tradefed.targetprep.TargetSetupError:
-Failed to install WmTests.apk on 127.0.0.1:54373. Reason:
- error message ...
-
-
-Summary
--------
-CtsAnimationTestCases: Passed: 7, Failed: 0
-HelloWorldTests: Passed: 2, Failed: 0
-WmTests: Passed: 0, Failed: 0 (Completed With ERRORS)
-
-1 test failed
-"""
-
-from __future__ import print_function
-from collections import OrderedDict
-
-import constants
-import atest_utils as au
-
-from test_runners import test_runner_base
-
-UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
-FAILURE_FLAG = 'RUNNER_FAILURE'
-BENCHMARK_ESSENTIAL_KEYS = {'repetition_index', 'cpu_time', 'name', 'repetitions',
- 'run_type', 'threads', 'time_unit', 'iterations',
- 'run_name', 'real_time'}
-# TODO(b/146875480): handle the optional benchmark events
-BENCHMARK_OPTIONAL_KEYS = {'bytes_per_second', 'label'}
-BENCHMARK_EVENT_KEYS = BENCHMARK_ESSENTIAL_KEYS.union(BENCHMARK_OPTIONAL_KEYS)
-INT_KEYS = {'cpu_time', 'real_time'}
-
-class PerfInfo(object):
- """Class for storing performance test of a test run."""
-
- def __init__(self):
- """Initialize a new instance of PerfInfo class."""
- # perf_info: A list of benchmark_info(dict).
- self.perf_info = []
-
- def update_perf_info(self, test):
- """Update perf_info with the given result of a single test.
-
- Args:
- test: A TestResult namedtuple.
- """
- all_additional_keys = set(test.additional_info.keys())
- # Ensure every key is in all_additional_keys.
- if not BENCHMARK_ESSENTIAL_KEYS.issubset(all_additional_keys):
- return
- benchmark_info = {}
- benchmark_info['test_name'] = test.test_name
- for key, data in test.additional_info.items():
- if key in INT_KEYS:
- data_to_int = data.split('.')[0]
- benchmark_info[key] = data_to_int
- elif key in BENCHMARK_EVENT_KEYS:
- benchmark_info[key] = data
- if benchmark_info:
- self.perf_info.append(benchmark_info)
-
- def print_perf_info(self):
- """Print summary of a perf_info."""
- if not self.perf_info:
- return
- classify_perf_info, max_len = self._classify_perf_info()
- separator = '-' * au.get_terminal_size()[0]
- print(separator)
- print("{:{name}} {:^{real_time}} {:^{cpu_time}} "
- "{:>{iterations}}".format(
- 'Benchmark', 'Time', 'CPU', 'Iteration',
- name=max_len['name']+3,
- real_time=max_len['real_time']+max_len['time_unit']+1,
- cpu_time=max_len['cpu_time']+max_len['time_unit']+1,
- iterations=max_len['iterations']))
- print(separator)
- for module_name, module_perf_info in classify_perf_info.items():
- print("{}:".format(module_name))
- for benchmark_info in module_perf_info:
- # BpfBenchMark/MapWriteNewEntry/1 1530 ns 1522 ns 460517
- print(" #{:{name}} {:>{real_time}} {:{time_unit}} "
- "{:>{cpu_time}} {:{time_unit}} "
- "{:>{iterations}}".format(benchmark_info['name'],
- benchmark_info['real_time'],
- benchmark_info['time_unit'],
- benchmark_info['cpu_time'],
- benchmark_info['time_unit'],
- benchmark_info['iterations'],
- name=max_len['name'],
- real_time=max_len['real_time'],
- time_unit=max_len['time_unit'],
- cpu_time=max_len['cpu_time'],
- iterations=max_len['iterations']))
-
- def _classify_perf_info(self):
- """Classify the perf_info by test module name.
-
- Returns:
- A tuple of (classified_perf_info, max_len), where
- classified_perf_info: A dict of perf_info and each perf_info are
- belong to different modules.
- e.g.
- { module_name_01: [perf_info of module_1],
- module_name_02: [perf_info of module_2], ...}
- max_len: A dict which stores the max length of each event.
- It contains the max string length of 'name', real_time',
- 'time_unit', 'cpu_time', 'iterations'.
- e.g.
- {name: 56, real_time: 9, time_unit: 2, cpu_time: 8,
- iterations: 12}
- """
- module_categories = set()
- max_len = {}
- all_name = []
- all_real_time = []
- all_time_unit = []
- all_cpu_time = []
- all_iterations = ['Iteration']
- for benchmark_info in self.perf_info:
- module_categories.add(benchmark_info['test_name'].split('#')[0])
- all_name.append(benchmark_info['name'])
- all_real_time.append(benchmark_info['real_time'])
- all_time_unit.append(benchmark_info['time_unit'])
- all_cpu_time.append(benchmark_info['cpu_time'])
- all_iterations.append(benchmark_info['iterations'])
- classified_perf_info = {}
- for module_name in module_categories:
- module_perf_info = []
- for benchmark_info in self.perf_info:
- if benchmark_info['test_name'].split('#')[0] == module_name:
- module_perf_info.append(benchmark_info)
- classified_perf_info[module_name] = module_perf_info
- max_len = {'name': len(max(all_name, key=len)),
- 'real_time': len(max(all_real_time, key=len)),
- 'time_unit': len(max(all_time_unit, key=len)),
- 'cpu_time': len(max(all_cpu_time, key=len)),
- 'iterations': len(max(all_iterations, key=len))}
- return classified_perf_info, max_len
-
-
-class RunStat(object):
- """Class for storing stats of a test run."""
-
- def __init__(self, passed=0, failed=0, ignored=0, run_errors=False,
- assumption_failed=0):
- """Initialize a new instance of RunStat class.
-
- Args:
- passed: Count of passing tests.
- failed: Count of failed tests.
- ignored: Count of ignored tests.
- assumption_failed: Count of assumption failure tests.
- run_errors: A boolean if there were run errors
- """
- # TODO(b/109822985): Track group and run estimated totals for updating
- # summary line
- self.passed = passed
- self.failed = failed
- self.ignored = ignored
- self.assumption_failed = assumption_failed
- self.perf_info = PerfInfo()
- # Run errors are not for particular tests, they are runner errors.
- self.run_errors = run_errors
-
- @property
- def total(self):
- """Getter for total tests actually ran. Accessed via self.total"""
- return self.passed + self.failed
-
-
-class ResultReporter(object):
- """Result Reporter class.
-
- As each test is run, the test runner will call self.process_test_result()
- with a TestResult namedtuple that contains the following information:
- - runner_name: Name of the test runner
- - group_name: Name of the test group if any.
- In Tradefed that's the Module name.
- - test_name: Name of the test.
- In Tradefed that's qualified.class#Method
- - status: The strings FAILED or PASSED.
- - stacktrace: The stacktrace if the test failed.
- - group_total: The total tests scheduled to be run for a group.
- In Tradefed this is provided when the Module starts.
- - runner_total: The total tests scheduled to be run for the runner.
- In Tradefed this is not available so is None.
-
- The Result Reporter will print the results of this test and then update
- its stats state.
-
- Test stats are stored in the following structure:
- - self.run_stats: Is RunStat instance containing stats for the overall run.
- This include pass/fail counts across ALL test runners.
-
- - self.runners: Is of the form: {RunnerName: {GroupName: RunStat Instance}}
- Where {} is an ordered dict.
-
- The stats instance contains stats for each test group.
- If the runner doesn't support groups, then the group
- name will be None.
-
- For example this could be a state of ResultReporter:
-
- run_stats: RunStat(passed:10, failed:5)
- runners: {'AtestTradefedTestRunner':
- {'Module1': RunStat(passed:1, failed:1),
- 'Module2': RunStat(passed:0, failed:4)},
- 'RobolectricTestRunner': {None: RunStat(passed:5, failed:0)},
- 'VtsTradefedTestRunner': {'Module1': RunStat(passed:4, failed:0)}}
- """
-
- def __init__(self, silent=False):
- """Init ResultReporter.
-
- Args:
- silent: A boolean of silence or not.
- """
- self.run_stats = RunStat()
- self.runners = OrderedDict()
- self.failed_tests = []
- self.all_test_results = []
- self.pre_test = None
- self.log_path = None
- self.silent = silent
- self.rerun_options = ''
-
- def process_test_result(self, test):
- """Given the results of a single test, update stats and print results.
-
- Args:
- test: A TestResult namedtuple.
- """
- if test.runner_name not in self.runners:
- self.runners[test.runner_name] = OrderedDict()
- assert self.runners[test.runner_name] != FAILURE_FLAG
- self.all_test_results.append(test)
- if test.group_name not in self.runners[test.runner_name]:
- self.runners[test.runner_name][test.group_name] = RunStat()
- self._print_group_title(test)
- self._update_stats(test,
- self.runners[test.runner_name][test.group_name])
- self._print_result(test)
-
- def runner_failure(self, runner_name, failure_msg):
- """Report a runner failure.
-
- Use instead of process_test_result() when runner fails separate from
- any particular test, e.g. during setup of runner.
-
- Args:
- runner_name: A string of the name of the runner.
- failure_msg: A string of the failure message to pass to user.
- """
- self.runners[runner_name] = FAILURE_FLAG
- print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
- print('Runner encountered a critical failure. Skipping.\n'
- 'FAILURE: %s' % failure_msg)
-
- def register_unsupported_runner(self, runner_name):
- """Register an unsupported runner.
-
- Prints the following to the screen:
-
- RunnerName
- ----------
- This runner does not support normal results formatting.
- Below is the raw output of the test runner.
-
- RAW OUTPUT:
- <Raw Runner Output>
-
- Args:
- runner_name: A String of the test runner's name.
- """
- assert runner_name not in self.runners
- self.runners[runner_name] = UNSUPPORTED_FLAG
- print('\n', runner_name, '\n', '-' * len(runner_name), sep='')
- print('This runner does not support normal results formatting. Below '
- 'is the raw output of the test runner.\n\nRAW OUTPUT:')
-
- def print_starting_text(self):
- """Print starting text for running tests."""
- print(au.colorize('\nRunning Tests...', constants.CYAN))
-
- def print_summary(self):
- """Print summary of all test runs.
-
- Returns:
- 0 if all tests pass, non-zero otherwise.
-
- """
- tests_ret = constants.EXIT_CODE_SUCCESS
- if not self.runners:
- return tests_ret
- print('\n%s' % au.colorize('Summary', constants.CYAN))
- print('-------')
- if self.rerun_options:
- print(self.rerun_options)
- failed_sum = len(self.failed_tests)
- for runner_name, groups in self.runners.items():
- if groups == UNSUPPORTED_FLAG:
- print(runner_name, 'Unsupported. See raw output above.')
- continue
- if groups == FAILURE_FLAG:
- tests_ret = constants.EXIT_CODE_TEST_FAILURE
- print(runner_name, 'Crashed. No results to report.')
- failed_sum += 1
- continue
- for group_name, stats in groups.items():
- name = group_name if group_name else runner_name
- summary = self.process_summary(name, stats)
- if stats.failed > 0:
- tests_ret = constants.EXIT_CODE_TEST_FAILURE
- if stats.run_errors:
- tests_ret = constants.EXIT_CODE_TEST_FAILURE
- failed_sum += 1 if not stats.failed else 0
- print(summary)
- self.run_stats.perf_info.print_perf_info()
- print()
- if tests_ret == constants.EXIT_CODE_SUCCESS:
- print(au.colorize('All tests passed!', constants.GREEN))
- else:
- message = '%d %s failed' % (failed_sum,
- 'tests' if failed_sum > 1 else 'test')
- print(au.colorize(message, constants.RED))
- print('-'*len(message))
- self.print_failed_tests()
- if self.log_path:
- print('Test Logs have saved in %s' % self.log_path)
- return tests_ret
-
- def print_failed_tests(self):
- """Print the failed tests if existed."""
- if self.failed_tests:
- for test_name in self.failed_tests:
- print('%s' % test_name)
-
- def process_summary(self, name, stats):
- """Process the summary line.
-
- Strategy:
- Error status happens ->
- SomeTests: Passed: 2, Failed: 0 <red>(Completed With ERRORS)</red>
- SomeTests: Passed: 2, <red>Failed</red>: 2 <red>(Completed With ERRORS)</red>
- More than 1 test fails ->
- SomeTests: Passed: 2, <red>Failed</red>: 5
- No test fails ->
- SomeTests: <green>Passed</green>: 2, Failed: 0
-
- Args:
- name: A string of test name.
- stats: A RunStat instance for a test group.
-
- Returns:
- A summary of the test result.
- """
- passed_label = 'Passed'
- failed_label = 'Failed'
- ignored_label = 'Ignored'
- assumption_failed_label = 'Assumption Failed'
- error_label = ''
- if stats.failed > 0:
- failed_label = au.colorize(failed_label, constants.RED)
- if stats.run_errors:
- error_label = au.colorize('(Completed With ERRORS)', constants.RED)
- elif stats.failed == 0:
- passed_label = au.colorize(passed_label, constants.GREEN)
- summary = '%s: %s: %s, %s: %s, %s: %s, %s: %s %s' % (name,
- passed_label,
- stats.passed,
- failed_label,
- stats.failed,
- ignored_label,
- stats.ignored,
- assumption_failed_label,
- stats.assumption_failed,
- error_label)
- return summary
-
- def _update_stats(self, test, group):
- """Given the results of a single test, update test run stats.
-
- Args:
- test: a TestResult namedtuple.
- group: a RunStat instance for a test group.
- """
- # TODO(109822985): Track group and run estimated totals for updating
- # summary line
- if test.status == test_runner_base.PASSED_STATUS:
- self.run_stats.passed += 1
- group.passed += 1
- elif test.status == test_runner_base.IGNORED_STATUS:
- self.run_stats.ignored += 1
- group.ignored += 1
- elif test.status == test_runner_base.ASSUMPTION_FAILED:
- self.run_stats.assumption_failed += 1
- group.assumption_failed += 1
- elif test.status == test_runner_base.FAILED_STATUS:
- self.run_stats.failed += 1
- self.failed_tests.append(test.test_name)
- group.failed += 1
- elif test.status == test_runner_base.ERROR_STATUS:
- self.run_stats.run_errors = True
- group.run_errors = True
- self.run_stats.perf_info.update_perf_info(test)
-
- def _print_group_title(self, test):
- """Print the title line for a test group.
-
- Test Group/Runner Name
- ----------------------
-
- Args:
- test: A TestResult namedtuple.
- """
- if self.silent:
- return
- title = test.group_name or test.runner_name
- underline = '-' * (len(title))
- print('\n%s\n%s' % (title, underline))
-
- def _print_result(self, test):
- """Print the results of a single test.
-
- Looks like:
- fully.qualified.class#TestMethod: PASSED/FAILED
-
- Args:
- test: a TestResult namedtuple.
- """
- if self.silent:
- return
- if not self.pre_test or (test.test_run_name !=
- self.pre_test.test_run_name):
- print('%s (%s %s)' % (au.colorize(test.test_run_name,
- constants.BLUE),
- test.group_total,
- 'Test' if test.group_total <= 1 else 'Tests'))
- if test.status == test_runner_base.ERROR_STATUS:
- print('RUNNER ERROR: %s\n' % test.details)
- self.pre_test = test
- return
- if test.test_name:
- if test.status == test_runner_base.PASSED_STATUS:
- # Example of output:
- # [78/92] test_name: PASSED (92ms)
- print('[%s/%s] %s: %s %s' % (test.test_count,
- test.group_total,
- test.test_name,
- au.colorize(
- test.status,
- constants.GREEN),
- test.test_time))
- for key, data in test.additional_info.items():
- if key not in BENCHMARK_EVENT_KEYS:
- print('\t%s: %s' % (au.colorize(key, constants.BLUE), data))
- elif test.status == test_runner_base.IGNORED_STATUS:
- # Example: [33/92] test_name: IGNORED (12ms)
- print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
- test.test_name, au.colorize(
- test.status, constants.MAGENTA),
- test.test_time))
- elif test.status == test_runner_base.ASSUMPTION_FAILED:
- # Example: [33/92] test_name: ASSUMPTION_FAILED (12ms)
- print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
- test.test_name, au.colorize(
- test.status, constants.MAGENTA),
- test.test_time))
- else:
- # Example: [26/92] test_name: FAILED (32ms)
- print('[%s/%s] %s: %s %s' % (test.test_count, test.group_total,
- test.test_name, au.colorize(
- test.status, constants.RED),
- test.test_time))
- if test.status == test_runner_base.FAILED_STATUS:
- print('\nSTACKTRACE:\n%s' % test.details)
- self.pre_test = test
diff --git a/atest-py2/result_reporter_unittest.py b/atest-py2/result_reporter_unittest.py
deleted file mode 100755
index 9c56dc5..0000000
--- a/atest-py2/result_reporter_unittest.py
+++ /dev/null
@@ -1,546 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for result_reporter."""
-
-import sys
-import unittest
-import mock
-
-import result_reporter
-from test_runners import test_runner_base
-
-if sys.version_info[0] == 2:
- from StringIO import StringIO
-else:
- from io import StringIO
-
-RESULT_PASSED_TEST = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='someClassName#sostName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_PASSED_TEST_MODULE_2 = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule2',
- test_name='someClassName#sostName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_PASSED_TEST_RUNNER_2_NO_MODULE = test_runner_base.TestResult(
- runner_name='someTestRunner2',
- group_name=None,
- test_name='someClassName#sostName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_FAILED_TEST = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='someClassName2#sestName2',
- status=test_runner_base.FAILED_STATUS,
- details='someTrace',
- test_count=1,
- test_time='',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_RUN_FAILURE = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='someClassName#sostName',
- status=test_runner_base.ERROR_STATUS,
- details='someRunFailureReason',
- test_count=1,
- test_time='',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_INVOCATION_FAILURE = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name=None,
- test_name=None,
- status=test_runner_base.ERROR_STATUS,
- details='someInvocationFailureReason',
- test_count=1,
- test_time='',
- runner_total=None,
- group_total=None,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_IGNORED_TEST = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='someClassName#sostName',
- status=test_runner_base.IGNORED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_ASSUMPTION_FAILED_TEST = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='someClassName#sostName',
- status=test_runner_base.ASSUMPTION_FAILED,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
-)
-
-ADDITIONAL_INFO_PERF01_TEST01 = {u'repetition_index': u'0',
- u'cpu_time': u'10001.10001',
- u'name': u'perfName01',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1001',
- u'run_name': u'perfName01',
- u'real_time': u'11001.11001'}
-
-RESULT_PERF01_TEST01 = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='somePerfClass01#perfName01',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info=ADDITIONAL_INFO_PERF01_TEST01,
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_PERF01_TEST02 = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='somePerfClass01#perfName02',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={u'repetition_index': u'0', u'cpu_time': u'10002.10002',
- u'name': u'perfName02',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1002',
- u'run_name': u'perfName02',
- u'real_time': u'11002.11002'},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_PERF01_TEST03_NO_CPU_TIME = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='somePerfClass01#perfName03',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={u'repetition_index': u'0',
- u'name': u'perfName03',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1003',
- u'run_name': u'perfName03',
- u'real_time': u'11003.11003'},
- test_run_name='com.android.UnitTests'
-)
-
-RESULT_PERF02_TEST01 = test_runner_base.TestResult(
- runner_name='someTestRunner',
- group_name='someTestModule',
- test_name='somePerfClass02#perfName11',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={u'repetition_index': u'0', u'cpu_time': u'20001.20001',
- u'name': u'perfName11',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'2001',
- u'run_name': u'perfName11',
- u'real_time': u'210001.21001'},
- test_run_name='com.android.UnitTests'
-)
-
-#pylint: disable=protected-access
-#pylint: disable=invalid-name
-class ResultReporterUnittests(unittest.TestCase):
- """Unit tests for result_reporter.py"""
-
- def setUp(self):
- self.rr = result_reporter.ResultReporter()
-
- def tearDown(self):
- mock.patch.stopall()
-
- @mock.patch.object(result_reporter.ResultReporter, '_print_group_title')
- @mock.patch.object(result_reporter.ResultReporter, '_update_stats')
- @mock.patch.object(result_reporter.ResultReporter, '_print_result')
- def test_process_test_result(self, mock_print, mock_update, mock_title):
- """Test process_test_result method."""
- # Passed Test
- self.assertTrue('someTestRunner' not in self.rr.runners)
- self.rr.process_test_result(RESULT_PASSED_TEST)
- self.assertTrue('someTestRunner' in self.rr.runners)
- group = self.rr.runners['someTestRunner'].get('someTestModule')
- self.assertIsNotNone(group)
- mock_title.assert_called_with(RESULT_PASSED_TEST)
- mock_update.assert_called_with(RESULT_PASSED_TEST, group)
- mock_print.assert_called_with(RESULT_PASSED_TEST)
- # Failed Test
- mock_title.reset_mock()
- self.rr.process_test_result(RESULT_FAILED_TEST)
- mock_title.assert_not_called()
- mock_update.assert_called_with(RESULT_FAILED_TEST, group)
- mock_print.assert_called_with(RESULT_FAILED_TEST)
- # Test with new Group
- mock_title.reset_mock()
- self.rr.process_test_result(RESULT_PASSED_TEST_MODULE_2)
- self.assertTrue('someTestModule2' in self.rr.runners['someTestRunner'])
- mock_title.assert_called_with(RESULT_PASSED_TEST_MODULE_2)
- # Test with new Runner
- mock_title.reset_mock()
- self.rr.process_test_result(RESULT_PASSED_TEST_RUNNER_2_NO_MODULE)
- self.assertTrue('someTestRunner2' in self.rr.runners)
- mock_title.assert_called_with(RESULT_PASSED_TEST_RUNNER_2_NO_MODULE)
-
- def test_print_result_run_name(self):
- """Test print run name function in print_result method."""
- try:
- rr = result_reporter.ResultReporter()
- capture_output = StringIO()
- sys.stdout = capture_output
- run_name = 'com.android.UnitTests'
- rr._print_result(test_runner_base.TestResult(
- runner_name='runner_name',
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.FAILED_STATUS,
- details='someTrace',
- test_count=2,
- test_time='(2h44m36.402s)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name=run_name
- ))
- # Make sure run name in the first line.
- capture_output_str = capture_output.getvalue().strip()
- self.assertTrue(run_name in capture_output_str.split('\n')[0])
- run_name2 = 'com.android.UnitTests2'
- capture_output = StringIO()
- sys.stdout = capture_output
- rr._print_result(test_runner_base.TestResult(
- runner_name='runner_name',
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.FAILED_STATUS,
- details='someTrace',
- test_count=2,
- test_time='(2h43m36.402s)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name=run_name2
- ))
- # Make sure run name in the first line.
- capture_output_str = capture_output.getvalue().strip()
- self.assertTrue(run_name2 in capture_output_str.split('\n')[0])
- finally:
- sys.stdout = sys.__stdout__
-
- def test_register_unsupported_runner(self):
- """Test register_unsupported_runner method."""
- self.rr.register_unsupported_runner('NotSupported')
- runner = self.rr.runners['NotSupported']
- self.assertIsNotNone(runner)
- self.assertEquals(runner, result_reporter.UNSUPPORTED_FLAG)
-
- def test_update_stats_passed(self):
- """Test _update_stats method."""
- # Passed Test
- group = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST, group)
- self.assertEquals(self.rr.run_stats.passed, 1)
- self.assertEquals(self.rr.run_stats.failed, 0)
- self.assertEquals(self.rr.run_stats.run_errors, False)
- self.assertEquals(self.rr.failed_tests, [])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 0)
- self.assertEquals(group.ignored, 0)
- self.assertEquals(group.run_errors, False)
- # Passed Test New Group
- group2 = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST_MODULE_2, group2)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 0)
- self.assertEquals(self.rr.run_stats.run_errors, False)
- self.assertEquals(self.rr.failed_tests, [])
- self.assertEquals(group2.passed, 1)
- self.assertEquals(group2.failed, 0)
- self.assertEquals(group.ignored, 0)
- self.assertEquals(group2.run_errors, False)
-
- def test_update_stats_failed(self):
- """Test _update_stats method."""
- # Passed Test
- group = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST, group)
- # Passed Test New Group
- group2 = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST_MODULE_2, group2)
- # Failed Test Old Group
- self.rr._update_stats(RESULT_FAILED_TEST, group)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 1)
- self.assertEquals(self.rr.run_stats.run_errors, False)
- self.assertEquals(self.rr.failed_tests, [RESULT_FAILED_TEST.test_name])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 1)
- self.assertEquals(group.ignored, 0)
- self.assertEquals(group.total, 2)
- self.assertEquals(group2.total, 1)
- self.assertEquals(group.run_errors, False)
- # Test Run Failure
- self.rr._update_stats(RESULT_RUN_FAILURE, group)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 1)
- self.assertEquals(self.rr.run_stats.run_errors, True)
- self.assertEquals(self.rr.failed_tests, [RESULT_FAILED_TEST.test_name])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 1)
- self.assertEquals(group.ignored, 0)
- self.assertEquals(group.run_errors, True)
- self.assertEquals(group2.run_errors, False)
- # Invocation Failure
- self.rr._update_stats(RESULT_INVOCATION_FAILURE, group)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 1)
- self.assertEquals(self.rr.run_stats.run_errors, True)
- self.assertEquals(self.rr.failed_tests, [RESULT_FAILED_TEST.test_name])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 1)
- self.assertEquals(group.ignored, 0)
- self.assertEquals(group.run_errors, True)
-
- def test_update_stats_ignored_and_assumption_failure(self):
- """Test _update_stats method."""
- # Passed Test
- group = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST, group)
- # Passed Test New Group
- group2 = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PASSED_TEST_MODULE_2, group2)
- # Failed Test Old Group
- self.rr._update_stats(RESULT_FAILED_TEST, group)
- # Test Run Failure
- self.rr._update_stats(RESULT_RUN_FAILURE, group)
- # Invocation Failure
- self.rr._update_stats(RESULT_INVOCATION_FAILURE, group)
- # Ignored Test
- self.rr._update_stats(RESULT_IGNORED_TEST, group)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 1)
- self.assertEquals(self.rr.run_stats.run_errors, True)
- self.assertEquals(self.rr.failed_tests, [RESULT_FAILED_TEST.test_name])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 1)
- self.assertEquals(group.ignored, 1)
- self.assertEquals(group.run_errors, True)
- # 2nd Ignored Test
- self.rr._update_stats(RESULT_IGNORED_TEST, group)
- self.assertEquals(self.rr.run_stats.passed, 2)
- self.assertEquals(self.rr.run_stats.failed, 1)
- self.assertEquals(self.rr.run_stats.run_errors, True)
- self.assertEquals(self.rr.failed_tests, [RESULT_FAILED_TEST.test_name])
- self.assertEquals(group.passed, 1)
- self.assertEquals(group.failed, 1)
- self.assertEquals(group.ignored, 2)
- self.assertEquals(group.run_errors, True)
-
- # Assumption_Failure test
- self.rr._update_stats(RESULT_ASSUMPTION_FAILED_TEST, group)
- self.assertEquals(group.assumption_failed, 1)
- # 2nd Assumption_Failure test
- self.rr._update_stats(RESULT_ASSUMPTION_FAILED_TEST, group)
- self.assertEquals(group.assumption_failed, 2)
-
- def test_print_summary_ret_val(self):
- """Test print_summary method's return value."""
- # PASS Case
- self.rr.process_test_result(RESULT_PASSED_TEST)
- self.assertEquals(0, self.rr.print_summary())
- # PASS Case + Fail Case
- self.rr.process_test_result(RESULT_FAILED_TEST)
- self.assertNotEqual(0, self.rr.print_summary())
- # PASS Case + Fail Case + PASS Case
- self.rr.process_test_result(RESULT_PASSED_TEST_MODULE_2)
- self.assertNotEqual(0, self.rr.print_summary())
-
- def test_print_summary_ret_val_err_stat(self):
- """Test print_summary method's return value."""
- # PASS Case
- self.rr.process_test_result(RESULT_PASSED_TEST)
- self.assertEquals(0, self.rr.print_summary())
- # PASS Case + Fail Case
- self.rr.process_test_result(RESULT_RUN_FAILURE)
- self.assertNotEqual(0, self.rr.print_summary())
- # PASS Case + Fail Case + PASS Case
- self.rr.process_test_result(RESULT_PASSED_TEST_MODULE_2)
- self.assertNotEqual(0, self.rr.print_summary())
-
- def test_update_perf_info(self):
- """Test update_perf_info method."""
- group = result_reporter.RunStat()
- # 1. Test PerfInfo after RESULT_PERF01_TEST01
- # _update_stats() will call _update_perf_info()
- self.rr._update_stats(RESULT_PERF01_TEST01, group)
- correct_perf_info = []
- # trim the time form 10001.10001 to 10001
- trim_perf01_test01 = {u'repetition_index': u'0', u'cpu_time': u'10001',
- u'name': u'perfName01',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1001',
- u'run_name': u'perfName01',
- u'real_time': u'11001',
- 'test_name': 'somePerfClass01#perfName01'}
- correct_perf_info.append(trim_perf01_test01)
- self.assertEquals(self.rr.run_stats.perf_info.perf_info,
- correct_perf_info)
- # 2. Test PerfInfo after RESULT_PERF01_TEST01
- self.rr._update_stats(RESULT_PERF01_TEST02, group)
- trim_perf01_test02 = {u'repetition_index': u'0', u'cpu_time': u'10002',
- u'name': u'perfName02',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1002',
- u'run_name': u'perfName02',
- u'real_time': u'11002',
- 'test_name': 'somePerfClass01#perfName02'}
- correct_perf_info.append(trim_perf01_test02)
- self.assertEquals(self.rr.run_stats.perf_info.perf_info,
- correct_perf_info)
- # 3. Test PerfInfo after RESULT_PERF02_TEST01
- self.rr._update_stats(RESULT_PERF02_TEST01, group)
- trim_perf02_test01 = {u'repetition_index': u'0', u'cpu_time': u'20001',
- u'name': u'perfName11',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'2001',
- u'run_name': u'perfName11',
- u'real_time': u'210001',
- 'test_name': 'somePerfClass02#perfName11'}
- correct_perf_info.append(trim_perf02_test01)
- self.assertEquals(self.rr.run_stats.perf_info.perf_info,
- correct_perf_info)
- # 4. Test PerfInfo after RESULT_PERF01_TEST03_NO_CPU_TIME
- self.rr._update_stats(RESULT_PERF01_TEST03_NO_CPU_TIME, group)
- # Nothing added since RESULT_PERF01_TEST03_NO_CPU_TIME lack of cpu_time
- self.assertEquals(self.rr.run_stats.perf_info.perf_info,
- correct_perf_info)
-
- def test_classify_perf_info(self):
- """Test _classify_perf_info method."""
- group = result_reporter.RunStat()
- self.rr._update_stats(RESULT_PERF01_TEST01, group)
- self.rr._update_stats(RESULT_PERF01_TEST02, group)
- self.rr._update_stats(RESULT_PERF02_TEST01, group)
- # trim the time form 10001.10001 to 10001
- trim_perf01_test01 = {u'repetition_index': u'0', u'cpu_time': u'10001',
- u'name': u'perfName01',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1001',
- u'run_name': u'perfName01',
- u'real_time': u'11001',
- 'test_name': 'somePerfClass01#perfName01'}
- trim_perf01_test02 = {u'repetition_index': u'0', u'cpu_time': u'10002',
- u'name': u'perfName02',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'1002',
- u'run_name': u'perfName02',
- u'real_time': u'11002',
- 'test_name': 'somePerfClass01#perfName02'}
- trim_perf02_test01 = {u'repetition_index': u'0', u'cpu_time': u'20001',
- u'name': u'perfName11',
- u'repetitions': u'0', u'run_type': u'iteration',
- u'label': u'2123', u'threads': u'1',
- u'time_unit': u'ns', u'iterations': u'2001',
- u'run_name': u'perfName11',
- u'real_time': u'210001',
- 'test_name': 'somePerfClass02#perfName11'}
- correct_classify_perf_info = {"somePerfClass01":[trim_perf01_test01,
- trim_perf01_test02],
- "somePerfClass02":[trim_perf02_test01]}
- classify_perf_info, max_len = self.rr.run_stats.perf_info._classify_perf_info()
- correct_max_len = {'real_time': 6, 'cpu_time': 5, 'name': 10,
- 'iterations': 9, 'time_unit': 2}
- self.assertEquals(max_len, correct_max_len)
- self.assertEquals(classify_perf_info, correct_classify_perf_info)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/run_atest_unittests.sh b/atest-py2/run_atest_unittests.sh
deleted file mode 100755
index db28ac5..0000000
--- a/atest-py2/run_atest_unittests.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# A simple helper script that runs all of the atest unit tests.
-# There are 2 situations that we take care of:
-# 1. User wants to invoke this script directly.
-# 2. PREUPLOAD hook invokes this script.
-
-ATEST_DIR=$(dirname $0)
-[ "$(uname -s)" == "Darwin" ] && { realpath(){ echo "$(cd $(dirname $1);pwd -P)/$(basename $1)"; }; }
-ATEST_REAL_PATH=$(realpath $ATEST_DIR)
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-NC='\033[0m' # No Color
-COVERAGE=false
-
-function get_pythonpath() {
- echo "$ATEST_REAL_PATH:$PYTHONPATH"
-}
-
-function print_summary() {
- local test_results=$1
- if [[ $COVERAGE == true ]]; then
- coverage report -m
- coverage html
- fi
- if [[ $test_results -eq 0 ]]; then
- echo -e "${GREEN}All unittests pass${NC}!"
- else
- echo -e "${RED}There was a unittest failure${NC}"
- fi
-}
-
-function run_atest_unittests() {
- echo "Running tests..."
- local run_cmd="python"
- local rc=0
- if [[ $COVERAGE == true ]]; then
- # Clear previously coverage data.
- python -m coverage erase
- # Collect coverage data.
- run_cmd="coverage run --source $ATEST_REAL_PATH --append"
- fi
-
- for test_file in $(find $ATEST_DIR -name "*_unittest.py"); do
- if ! PYTHONPATH=$(get_pythonpath) $run_cmd $test_file; then
- rc=1
- echo -e "${RED}$t failed${NC}"
- fi
- done
- echo
- print_summary $rc
- return $rc
-}
-
-# Let's check if anything is passed in, if not we assume the user is invoking
-# script, but if we get a list of files, assume it's the PREUPLOAD hook.
-read -ra PREUPLOAD_FILES <<< "$@"
-if [[ ${#PREUPLOAD_FILES[@]} -eq 0 ]]; then
- run_atest_unittests; exit $?
-elif [[ "${#PREUPLOAD_FILES[@]}" -eq 1 && "${PREUPLOAD_FILES}" == "coverage" ]]; then
- COVERAGE=true run_atest_unittests; exit $?
-else
- for f in ${PREUPLOAD_FILES[@]}; do
- # We only want to run this unittest if atest files have been touched.
- if [[ $f == atest/* ]]; then
- run_atest_unittests; exit $?
- fi
- done
-fi
diff --git a/atest-py2/test_data/test_commands.json b/atest-py2/test_data/test_commands.json
deleted file mode 100644
index ec64e16..0000000
--- a/atest-py2/test_data/test_commands.json
+++ /dev/null
@@ -1,59 +0,0 @@
-{
-"hello_world_test": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter hello_world_test --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"packages/apps/Car/Messenger/tests/robotests/src/com/android/car/messenger/MessengerDelegateTest.java": [
-"./build/soong/soong_ui.bash --make-mode RunCarMessengerRoboTests"
-],
-"CtsAnimationTestCases:AnimatorTest": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsAnimationTestCases --atest-include-filter CtsAnimationTestCases:android.animation.cts.AnimatorTest --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsSampleDeviceTestCases --atest-include-filter CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"CtsAnimationTestCases CtsSampleDeviceTestCases": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsAnimationTestCases --include-filter CtsSampleDeviceTestCases --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"AnimatorTest": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsAnimationTestCases --atest-include-filter CtsAnimationTestCases:android.animation.cts.AnimatorTest --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"PacketFragmenterTest": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter net_test_hci --atest-include-filter net_test_hci:PacketFragmenterTest.* --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"android.animation.cts": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsAnimationTestCases --atest-include-filter CtsAnimationTestCases:android.animation.cts --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"platform_testing/tests/example/native/Android.bp": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter hello_world_test --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"tools/tradefederation/core/res/config/native-benchmark.xml": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter native-benchmark --log-level WARN --logcat-on-failure --no-enable-granular-attempts"
-],
-"native-benchmark": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter native-benchmark --log-level WARN --logcat-on-failure --no-enable-granular-attempts"
-],
-"platform_testing/tests/example/native": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter hello_world_test --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"VtsCodelabHelloWorldTest": [
-"vts10-tradefed run commandAndExit vts-staging-default -m VtsCodelabHelloWorldTest --skip-all-system-status-check --skip-preconditions --primary-abi-only"
-],
-"aidegen_unittests": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --atest-log-file-path=/tmp/atest_run_1568627341_v33kdA/log --include-filter aidegen_unittests --log-level WARN"
-],
-"HelloWorldTests": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter HelloWorldTests --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsSampleDeviceTestCases --atest-include-filter CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceTest#testSharedPreferences --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"CtsSampleDeviceTestCases:android.sample.cts": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter CtsSampleDeviceTestCases --atest-include-filter CtsSampleDeviceTestCases:android.sample.cts --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"PacketFragmenterTest#test_no_fragment_necessary,test_ble_fragment_necessary": [
-"atest_tradefed.sh template/atest_local_min --template:map test=atest --include-filter net_test_hci --atest-include-filter net_test_hci:PacketFragmenterTest.test_ble_fragment_necessary:PacketFragmenterTest.test_no_fragment_necessary --log-level WARN --skip-loading-config-jar --logcat-on-failure --no-enable-granular-attempts"
-],
-"CarMessengerRoboTests": [
-"./build/soong/soong_ui.bash --make-mode RunCarMessengerRoboTests"
-]
-}
diff --git a/atest-py2/test_finder_handler.py b/atest-py2/test_finder_handler.py
deleted file mode 100644
index 5736e1d..0000000
--- a/atest-py2/test_finder_handler.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Test Finder Handler module.
-"""
-
-import logging
-
-import atest_enum
-from test_finders import cache_finder
-from test_finders import test_finder_base
-from test_finders import suite_plan_finder
-from test_finders import tf_integration_finder
-from test_finders import module_finder
-
-# List of default test finder classes.
-_TEST_FINDERS = {
- suite_plan_finder.SuitePlanFinder,
- tf_integration_finder.TFIntegrationFinder,
- module_finder.ModuleFinder,
- cache_finder.CacheFinder,
-}
-
-# Explanation of REFERENCE_TYPEs:
-# ----------------------------------
-# 0. MODULE: LOCAL_MODULE or LOCAL_PACKAGE_NAME value in Android.mk/Android.bp.
-# 1. CLASS: Names which the same with a ClassName.java/kt file.
-# 2. QUALIFIED_CLASS: String like "a.b.c.ClassName".
-# 3. MODULE_CLASS: Combo of MODULE and CLASS as "module:class".
-# 4. PACKAGE: Package in java file. Same as file path to java file.
-# 5. MODULE_PACKAGE: Combo of MODULE and PACKAGE as "module:package".
-# 6. MODULE_FILE_PATH: File path to dir of tests or test itself.
-# 7. INTEGRATION_FILE_PATH: File path to config xml in one of the 4 integration
-# config directories.
-# 8. INTEGRATION: xml file name in one of the 4 integration config directories.
-# 9. SUITE: Value of the "run-suite-tag" in xml config file in 4 config dirs.
-# Same as value of "test-suite-tag" in AndroidTest.xml files.
-# 10. CC_CLASS: Test case in cc file.
-# 11. SUITE_PLAN: Suite name such as cts.
-# 12. SUITE_PLAN_FILE_PATH: File path to config xml in the suite config directories.
-# 13. CACHE: A pseudo type that runs cache_finder without finding test in real.
-_REFERENCE_TYPE = atest_enum.AtestEnum(['MODULE', 'CLASS', 'QUALIFIED_CLASS',
- 'MODULE_CLASS', 'PACKAGE',
- 'MODULE_PACKAGE', 'MODULE_FILE_PATH',
- 'INTEGRATION_FILE_PATH', 'INTEGRATION',
- 'SUITE', 'CC_CLASS', 'SUITE_PLAN',
- 'SUITE_PLAN_FILE_PATH', 'CACHE'])
-
-_REF_TYPE_TO_FUNC_MAP = {
- _REFERENCE_TYPE.MODULE: module_finder.ModuleFinder.find_test_by_module_name,
- _REFERENCE_TYPE.CLASS: module_finder.ModuleFinder.find_test_by_class_name,
- _REFERENCE_TYPE.MODULE_CLASS: module_finder.ModuleFinder.find_test_by_module_and_class,
- _REFERENCE_TYPE.QUALIFIED_CLASS: module_finder.ModuleFinder.find_test_by_class_name,
- _REFERENCE_TYPE.PACKAGE: module_finder.ModuleFinder.find_test_by_package_name,
- _REFERENCE_TYPE.MODULE_PACKAGE: module_finder.ModuleFinder.find_test_by_module_and_package,
- _REFERENCE_TYPE.MODULE_FILE_PATH: module_finder.ModuleFinder.find_test_by_path,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH:
- tf_integration_finder.TFIntegrationFinder.find_int_test_by_path,
- _REFERENCE_TYPE.INTEGRATION:
- tf_integration_finder.TFIntegrationFinder.find_test_by_integration_name,
- _REFERENCE_TYPE.CC_CLASS:
- module_finder.ModuleFinder.find_test_by_cc_class_name,
- _REFERENCE_TYPE.SUITE_PLAN:suite_plan_finder.SuitePlanFinder.find_test_by_suite_name,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH:
- suite_plan_finder.SuitePlanFinder.find_test_by_suite_path,
- _REFERENCE_TYPE.CACHE: cache_finder.CacheFinder.find_test_by_cache,
-}
-
-
-def _get_finder_instance_dict(module_info):
- """Return dict of finder instances.
-
- Args:
- module_info: ModuleInfo for finder classes to use.
-
- Returns:
- Dict of finder instances keyed by their name.
- """
- instance_dict = {}
- for finder in _get_test_finders():
- instance_dict[finder.NAME] = finder(module_info=module_info)
- return instance_dict
-
-
-def _get_test_finders():
- """Returns the test finders.
-
- If external test types are defined outside atest, they can be try-except
- imported into here.
-
- Returns:
- Set of test finder classes.
- """
- test_finders_list = _TEST_FINDERS
- # Example import of external test finder:
- try:
- from test_finders import example_finder
- test_finders_list.add(example_finder.ExampleFinder)
- except ImportError:
- pass
- return test_finders_list
-
-# pylint: disable=too-many-return-statements
-def _get_test_reference_types(ref):
- """Determine type of test reference based on the content of string.
-
- Examples:
- The string 'SequentialRWTest' could be a reference to
- a Module or a Class name.
-
- The string 'cts/tests/filesystem' could be a Path, Integration
- or Suite reference.
-
- Args:
- ref: A string referencing a test.
-
- Returns:
- A list of possible REFERENCE_TYPEs (ints) for reference string.
- """
- if ref.startswith('.') or '..' in ref:
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
- if '/' in ref:
- if ref.startswith('/'):
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH,
- # TODO: Uncomment in SUITE when it's supported
- # _REFERENCE_TYPE.SUITE
- ]
- if '.' in ref:
- ref_end = ref.rsplit('.', 1)[-1]
- ref_end_is_upper = ref_end[0].isupper()
- if ':' in ref:
- if '.' in ref:
- if ref_end_is_upper:
- # Module:fully.qualified.Class or Integration:fully.q.Class
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION,
- _REFERENCE_TYPE.MODULE_CLASS]
- # Module:some.package
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.MODULE_PACKAGE,
- _REFERENCE_TYPE.MODULE_CLASS]
- # Module:Class or IntegrationName:Class
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION,
- _REFERENCE_TYPE.MODULE_CLASS]
- if '.' in ref:
- # The string of ref_end possibly includes specific mathods, e.g.
- # foo.java#method, so let ref_end be the first part of splitting '#'.
- if "#" in ref_end:
- ref_end = ref_end.split('#')[0]
- if ref_end in ('java', 'kt', 'bp', 'mk', 'cc', 'cpp'):
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.MODULE_FILE_PATH]
- if ref_end == 'xml':
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
- if ref_end_is_upper:
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.QUALIFIED_CLASS]
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE,
- _REFERENCE_TYPE.PACKAGE]
- # Note: We assume that if you're referencing a file in your cwd,
- # that file must have a '.' in its name, i.e. foo.java, foo.xml.
- # If this ever becomes not the case, then we need to include path below.
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION,
- # TODO: Uncomment in SUITE when it's supported
- # _REFERENCE_TYPE.SUITE,
- _REFERENCE_TYPE.MODULE,
- _REFERENCE_TYPE.SUITE_PLAN,
- _REFERENCE_TYPE.CLASS,
- _REFERENCE_TYPE.CC_CLASS]
-
-
-def _get_registered_find_methods(module_info):
- """Return list of registered find methods.
-
- This is used to return find methods that were not listed in the
- default find methods but just registered in the finder classes. These
- find methods will run before the default find methods.
-
- Args:
- module_info: ModuleInfo for finder classes to instantiate with.
-
- Returns:
- List of registered find methods.
- """
- find_methods = []
- finder_instance_dict = _get_finder_instance_dict(module_info)
- for finder in _get_test_finders():
- finder_instance = finder_instance_dict[finder.NAME]
- for find_method_info in finder_instance.get_all_find_methods():
- find_methods.append(test_finder_base.Finder(
- finder_instance, find_method_info.find_method, finder.NAME))
- return find_methods
-
-
-def _get_default_find_methods(module_info, test):
- """Default find methods to be used based on the given test name.
-
- Args:
- module_info: ModuleInfo for finder instances to use.
- test: String of test name to help determine which find methods
- to utilize.
-
- Returns:
- List of find methods to use.
- """
- find_methods = []
- finder_instance_dict = _get_finder_instance_dict(module_info)
- test_ref_types = _get_test_reference_types(test)
- logging.debug('Resolved input to possible references: %s', [
- _REFERENCE_TYPE[t] for t in test_ref_types])
- for test_ref_type in test_ref_types:
- find_method = _REF_TYPE_TO_FUNC_MAP[test_ref_type]
- finder_instance = finder_instance_dict[find_method.im_class.NAME]
- finder_info = _REFERENCE_TYPE[test_ref_type]
- find_methods.append(test_finder_base.Finder(finder_instance,
- find_method,
- finder_info))
- return find_methods
-
-
-def get_find_methods_for_test(module_info, test):
- """Return a list of ordered find methods.
-
- Args:
- test: String of test name to get find methods for.
-
- Returns:
- List of ordered find methods.
- """
- registered_find_methods = _get_registered_find_methods(module_info)
- default_find_methods = _get_default_find_methods(module_info, test)
- return registered_find_methods + default_find_methods
diff --git a/atest-py2/test_finder_handler_unittest.py b/atest-py2/test_finder_handler_unittest.py
deleted file mode 100755
index 9fc1ef8..0000000
--- a/atest-py2/test_finder_handler_unittest.py
+++ /dev/null
@@ -1,265 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for test_finder_handler."""
-
-import unittest
-import mock
-
-import atest_error
-import test_finder_handler
-from test_finders import test_info
-from test_finders import test_finder_base
-
-#pylint: disable=protected-access
-REF_TYPE = test_finder_handler._REFERENCE_TYPE
-
-_EXAMPLE_FINDER_A = 'EXAMPLE_A'
-
-
-#pylint: disable=no-self-use
-@test_finder_base.find_method_register
-class ExampleFinderA(test_finder_base.TestFinderBase):
- """Example finder class A."""
- NAME = _EXAMPLE_FINDER_A
- _TEST_RUNNER = 'TEST_RUNNER'
-
- @test_finder_base.register()
- def registered_find_method_from_example_finder(self, test):
- """Registered Example find method."""
- if test == 'ExampleFinderATrigger':
- return test_info.TestInfo(test_name=test,
- test_runner=self._TEST_RUNNER,
- build_targets=set())
- return None
-
- def unregistered_find_method_from_example_finder(self, _test):
- """Unregistered Example find method, should never be called."""
- raise atest_error.ShouldNeverBeCalledError()
-
-
-_TEST_FINDERS_PATCH = {
- ExampleFinderA,
-}
-
-
-_FINDER_INSTANCES = {
- _EXAMPLE_FINDER_A: ExampleFinderA(),
-}
-
-
-class TestFinderHandlerUnittests(unittest.TestCase):
- """Unit tests for test_finder_handler.py"""
-
- def setUp(self):
- """Set up for testing."""
- # pylint: disable=invalid-name
- # This is so we can see the full diffs when there are mismatches.
- self.maxDiff = None
- self.empty_mod_info = None
- # We want to control the finders we return.
- mock.patch('test_finder_handler._get_test_finders',
- lambda: _TEST_FINDERS_PATCH).start()
- # Since we're going to be comparing instance objects, we'll need to keep
- # track of the objects so they align.
- mock.patch('test_finder_handler._get_finder_instance_dict',
- lambda x: _FINDER_INSTANCES).start()
- # We want to mock out the default find methods to make sure we got all
- # the methods we expect.
- mock.patch('test_finder_handler._get_default_find_methods',
- lambda x, y: [test_finder_base.Finder(
- _FINDER_INSTANCES[_EXAMPLE_FINDER_A],
- ExampleFinderA.unregistered_find_method_from_example_finder,
- _EXAMPLE_FINDER_A)]).start()
-
- def tearDown(self):
- """Tear down."""
- mock.patch.stopall()
-
- def test_get_test_reference_types(self):
- """Test _get_test_reference_types parses reference types correctly."""
- self.assertEqual(
- test_finder_handler._get_test_reference_types('ModuleOrClassName'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE,
- REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS, REF_TYPE.CC_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('Module_or_Class_name'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE,
- REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS, REF_TYPE.CC_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SuiteName'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE,
- REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS, REF_TYPE.CC_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('Suite-Name'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE,
- REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS, REF_TYPE.CC_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('some.package'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE, REF_TYPE.PACKAGE]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('fully.q.Class'),
- [REF_TYPE.CACHE, REF_TYPE.QUALIFIED_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('Integration.xml'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SomeClass.java'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SomeClass.kt'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('Android.mk'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('Android.bp'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SomeTest.cc'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SomeTest.cpp'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('SomeTest.cc#method'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('module:Class'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('module:f.q.Class'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('module:a.package'),
- [REF_TYPE.CACHE, REF_TYPE.MODULE_PACKAGE, REF_TYPE.MODULE_CLASS]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('.'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('..'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('./rel/path/to/test'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('rel/path/to/test'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.INTEGRATION,
- REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('/abs/path/to/test'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('int/test'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.INTEGRATION,
- REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('int/test:fully.qual.Class#m'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.INTEGRATION,
- REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('int/test:Class#method'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION_FILE_PATH,
- REF_TYPE.MODULE_FILE_PATH, REF_TYPE.INTEGRATION,
- REF_TYPE.SUITE_PLAN_FILE_PATH]
- )
- self.assertEqual(
- test_finder_handler._get_test_reference_types('int_name_no_slash:Class#m'),
- [REF_TYPE.CACHE, REF_TYPE.INTEGRATION, REF_TYPE.MODULE_CLASS]
- )
-
- def test_get_registered_find_methods(self):
- """Test that we get the registered find methods."""
- empty_mod_info = None
- example_finder_a_instance = test_finder_handler._get_finder_instance_dict(
- empty_mod_info)[_EXAMPLE_FINDER_A]
- should_equal = [
- test_finder_base.Finder(
- example_finder_a_instance,
- ExampleFinderA.registered_find_method_from_example_finder,
- _EXAMPLE_FINDER_A)]
- should_not_equal = [
- test_finder_base.Finder(
- example_finder_a_instance,
- ExampleFinderA.unregistered_find_method_from_example_finder,
- _EXAMPLE_FINDER_A)]
- # Let's make sure we see the registered method.
- self.assertEqual(
- should_equal,
- test_finder_handler._get_registered_find_methods(empty_mod_info)
- )
- # Make sure we don't see the unregistered method here.
- self.assertNotEqual(
- should_not_equal,
- test_finder_handler._get_registered_find_methods(empty_mod_info)
- )
-
- def test_get_find_methods_for_test(self):
- """Test that we get the find methods we expect."""
- # Let's see that we get the unregistered and registered find methods in
- # the order we expect.
- test = ''
- registered_find_methods = [
- test_finder_base.Finder(
- _FINDER_INSTANCES[_EXAMPLE_FINDER_A],
- ExampleFinderA.registered_find_method_from_example_finder,
- _EXAMPLE_FINDER_A)]
- default_find_methods = [
- test_finder_base.Finder(
- _FINDER_INSTANCES[_EXAMPLE_FINDER_A],
- ExampleFinderA.unregistered_find_method_from_example_finder,
- _EXAMPLE_FINDER_A)]
- should_equal = registered_find_methods + default_find_methods
- self.assertEqual(
- should_equal,
- test_finder_handler.get_find_methods_for_test(self.empty_mod_info,
- test))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_finders/__init__.py b/atest-py2/test_finders/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/test_finders/__init__.py
+++ /dev/null
diff --git a/atest-py2/test_finders/cache_finder.py b/atest-py2/test_finders/cache_finder.py
deleted file mode 100644
index 5b7bd07..0000000
--- a/atest-py2/test_finders/cache_finder.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Cache Finder class.
-"""
-
-import atest_utils
-from test_finders import test_finder_base
-from test_finders import test_info
-
-class CacheFinder(test_finder_base.TestFinderBase):
- """Cache Finder class."""
- NAME = 'CACHE'
-
- def __init__(self, **kwargs):
- super(CacheFinder, self).__init__()
-
- def _is_latest_testinfos(self, test_infos):
- """Check whether test_infos are up-to-date.
-
- Args:
- test_infos: A list of TestInfo.
-
- Returns:
- True if all keys in test_infos and TestInfo object are equal.
- Otherwise, False.
- """
- sorted_base_ti = sorted(
- vars(test_info.TestInfo(None, None, None)).keys())
- for cached_test_info in test_infos:
- sorted_cache_ti = sorted(vars(cached_test_info).keys())
- if not sorted_cache_ti == sorted_base_ti:
- return False
- return True
-
- def find_test_by_cache(self, test_reference):
- """Find the matched test_infos in saved caches.
-
- Args:
- test_reference: A string of the path to the test's file or dir.
-
- Returns:
- A list of TestInfo namedtuple if cache found and is in latest
- TestInfo format, else None.
- """
- test_infos = atest_utils.load_test_info_cache(test_reference)
- if test_infos and self._is_latest_testinfos(test_infos):
- return test_infos
- return None
diff --git a/atest-py2/test_finders/cache_finder_unittest.py b/atest-py2/test_finders/cache_finder_unittest.py
deleted file mode 100755
index 7797ea3..0000000
--- a/atest-py2/test_finders/cache_finder_unittest.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for cache_finder."""
-
-import unittest
-import os
-import mock
-
-# pylint: disable=import-error
-import atest_utils
-import unittest_constants as uc
-from test_finders import cache_finder
-
-
-#pylint: disable=protected-access
-class CacheFinderUnittests(unittest.TestCase):
- """Unit tests for cache_finder.py"""
- def setUp(self):
- """Set up stuff for testing."""
- self.cache_finder = cache_finder.CacheFinder()
-
- @mock.patch.object(atest_utils, 'get_test_info_cache_path')
- def test_find_test_by_cache(self, mock_get_cache_path):
- """Test find_test_by_cache method."""
- uncached_test = 'mytest1'
- cached_test = 'hello_world_test'
- uncached_test2 = 'mytest2'
- test_cache_root = os.path.join(uc.TEST_DATA_DIR, 'cache_root')
- # Hit matched cache file but no original_finder in it,
- # should return None.
- mock_get_cache_path.return_value = os.path.join(
- test_cache_root,
- 'cd66f9f5ad63b42d0d77a9334de6bb73.cache')
- self.assertIsNone(self.cache_finder.find_test_by_cache(uncached_test))
- # Hit matched cache file and original_finder is in it,
- # should return cached test infos.
- mock_get_cache_path.return_value = os.path.join(
- test_cache_root,
- '78ea54ef315f5613f7c11dd1a87f10c7.cache')
- self.assertIsNotNone(self.cache_finder.find_test_by_cache(cached_test))
- # Does not hit matched cache file, should return cached test infos.
- mock_get_cache_path.return_value = os.path.join(
- test_cache_root,
- '39488b7ac83c56d5a7d285519fe3e3fd.cache')
- self.assertIsNone(self.cache_finder.find_test_by_cache(uncached_test2))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_finders/example_finder.py b/atest-py2/test_finders/example_finder.py
deleted file mode 100644
index d1fc33b..0000000
--- a/atest-py2/test_finders/example_finder.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Example Finder class.
-"""
-
-# pylint: disable=import-error
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_runners import example_test_runner
-
-
-@test_finder_base.find_method_register
-class ExampleFinder(test_finder_base.TestFinderBase):
- """Example finder class."""
- NAME = 'EXAMPLE'
- _TEST_RUNNER = example_test_runner.ExampleTestRunner.NAME
-
- @test_finder_base.register()
- def find_method_from_example_finder(self, test):
- """Example find method to demonstrate how to register it."""
- if test == 'ExampleFinderTest':
- return test_info.TestInfo(test_name=test,
- test_runner=self._TEST_RUNNER,
- build_targets=set())
- return None
diff --git a/atest-py2/test_finders/module_finder.py b/atest-py2/test_finders/module_finder.py
deleted file mode 100644
index 049658e..0000000
--- a/atest-py2/test_finders/module_finder.py
+++ /dev/null
@@ -1,652 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Module Finder class.
-"""
-
-import logging
-import os
-
-# pylint: disable=import-error
-import atest_error
-import atest_utils
-import constants
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_runners import atest_tf_test_runner
-from test_runners import robolectric_test_runner
-from test_runners import vts_tf_test_runner
-
-_MODULES_IN = 'MODULES-IN-%s'
-_ANDROID_MK = 'Android.mk'
-
-# These are suites in LOCAL_COMPATIBILITY_SUITE that aren't really suites so
-# we can ignore them.
-_SUITES_TO_IGNORE = frozenset({'general-tests', 'device-tests', 'tests'})
-
-class ModuleFinder(test_finder_base.TestFinderBase):
- """Module finder class."""
- NAME = 'MODULE'
- _TEST_RUNNER = atest_tf_test_runner.AtestTradefedTestRunner.NAME
- _ROBOLECTRIC_RUNNER = robolectric_test_runner.RobolectricTestRunner.NAME
- _VTS_TEST_RUNNER = vts_tf_test_runner.VtsTradefedTestRunner.NAME
-
- def __init__(self, module_info=None):
- super(ModuleFinder, self).__init__()
- self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- self.module_info = module_info
-
- def _determine_testable_module(self, path):
- """Determine which module the user is trying to test.
-
- Returns the module to test. If there are multiple possibilities, will
- ask the user. Otherwise will return the only module found.
-
- Args:
- path: String path of module to look for.
-
- Returns:
- A list of the module names.
- """
- testable_modules = []
- for mod in self.module_info.get_module_names(path):
- mod_info = self.module_info.get_module_info(mod)
- # Robolectric tests always exist in pairs of 2, one module to build
- # the test and another to run it. For now, we are assuming they are
- # isolated in their own folders and will return if we find one.
- if self.module_info.is_robolectric_test(mod):
- # return a list with one module name if it is robolectric.
- return [mod]
- if self.module_info.is_testable_module(mod_info):
- testable_modules.append(mod_info.get(constants.MODULE_NAME))
- return test_finder_utils.extract_test_from_tests(testable_modules)
-
- def _is_vts_module(self, module_name):
- """Returns True if the module is a vts10 module, else False."""
- mod_info = self.module_info.get_module_info(module_name)
- suites = []
- if mod_info:
- suites = mod_info.get('compatibility_suites', [])
- # Pull out all *ts (cts, tvts, etc) suites.
- suites = [suite for suite in suites if suite not in _SUITES_TO_IGNORE]
- return len(suites) == 1 and 'vts10' in suites
-
- def _update_to_vts_test_info(self, test):
- """Fill in the fields with vts10 specific info.
-
- We need to update the runner to use the vts10 runner and also find the
- test specific dependencies.
-
- Args:
- test: TestInfo to update with vts10 specific details.
-
- Return:
- TestInfo that is ready for the vts10 test runner.
- """
- test.test_runner = self._VTS_TEST_RUNNER
- config_file = os.path.join(self.root_dir,
- test.data[constants.TI_REL_CONFIG])
- # Need to get out dir (special logic is to account for custom out dirs).
- # The out dir is used to construct the build targets for the test deps.
- out_dir = os.environ.get(constants.ANDROID_HOST_OUT)
- custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
- # If we're not an absolute custom out dir, get relative out dir path.
- if custom_out_dir is None or not os.path.isabs(custom_out_dir):
- out_dir = os.path.relpath(out_dir, self.root_dir)
- vts_out_dir = os.path.join(out_dir, 'vts10', 'android-vts10', 'testcases')
- # Parse dependency of default staging plans.
- xml_paths = test_finder_utils.search_integration_dirs(
- constants.VTS_STAGING_PLAN,
- self.module_info.get_paths(constants.VTS_TF_MODULE))
- vts_xmls = set()
- vts_xmls.add(config_file)
- for xml_path in xml_paths:
- vts_xmls |= test_finder_utils.get_plans_from_vts_xml(xml_path)
- for config_file in vts_xmls:
- # Add in vts10 test build targets.
- test.build_targets |= test_finder_utils.get_targets_from_vts_xml(
- config_file, vts_out_dir, self.module_info)
- test.build_targets.add('vts-test-core')
- test.build_targets.add(test.test_name)
- return test
-
- def _update_to_robolectric_test_info(self, test):
- """Update the fields for a robolectric test.
-
- Args:
- test: TestInfo to be updated with robolectric fields.
-
- Returns:
- TestInfo with robolectric fields.
- """
- test.test_runner = self._ROBOLECTRIC_RUNNER
- test.test_name = self.module_info.get_robolectric_test_name(test.test_name)
- return test
-
- def _process_test_info(self, test):
- """Process the test info and return some fields updated/changed.
-
- We need to check if the test found is a special module (like vts10) and
- update the test_info fields (like test_runner) appropriately.
-
- Args:
- test: TestInfo that has been filled out by a find method.
-
- Return:
- TestInfo that has been modified as needed and return None if
- this module can't be found in the module_info.
- """
- module_name = test.test_name
- mod_info = self.module_info.get_module_info(module_name)
- if not mod_info:
- return None
- test.module_class = mod_info['class']
- test.install_locations = test_finder_utils.get_install_locations(
- mod_info['installed'])
- # Check if this is only a vts10 module.
- if self._is_vts_module(test.test_name):
- return self._update_to_vts_test_info(test)
- elif self.module_info.is_robolectric_test(test.test_name):
- return self._update_to_robolectric_test_info(test)
- rel_config = test.data[constants.TI_REL_CONFIG]
- test.build_targets = self._get_build_targets(module_name, rel_config)
- return test
-
- def _get_build_targets(self, module_name, rel_config):
- """Get the test deps.
-
- Args:
- module_name: name of the test.
- rel_config: XML for the given test.
-
- Returns:
- Set of build targets.
- """
- targets = set()
- if not self.module_info.is_auto_gen_test_config(module_name):
- config_file = os.path.join(self.root_dir, rel_config)
- targets = test_finder_utils.get_targets_from_xml(config_file,
- self.module_info)
- if constants.VTS_CORE_SUITE in self.module_info.get_module_info(
- module_name).get(constants.MODULE_COMPATIBILITY_SUITES, []):
- targets.add(constants.VTS_CORE_TF_MODULE)
- for module_path in self.module_info.get_paths(module_name):
- mod_dir = module_path.replace('/', '-')
- targets.add(_MODULES_IN % mod_dir)
- # (b/156457698) Force add vts_kernel_tests as build target if our test
- # belong to REQUIRED_KERNEL_TEST_MODULES due to required_module option
- # not working for sh_test in soong.
- if module_name in constants.REQUIRED_KERNEL_TEST_MODULES:
- targets.add('vts_kernel_tests')
- return targets
-
- def _get_module_test_config(self, module_name, rel_config=None):
- """Get the value of test_config in module_info.
-
- Get the value of 'test_config' in module_info if its
- auto_test_config is not true.
- In this case, the test_config is specified by user.
- If not, return rel_config.
-
- Args:
- module_name: A string of the test's module name.
- rel_config: XML for the given test.
-
- Returns:
- A string of test_config path if found, else return rel_config.
- """
- mod_info = self.module_info.get_module_info(module_name)
- if mod_info:
- test_config = ''
- test_config_list = mod_info.get(constants.MODULE_TEST_CONFIG, [])
- if test_config_list:
- test_config = test_config_list[0]
- if not self.module_info.is_auto_gen_test_config(module_name) and test_config != '':
- return test_config
- return rel_config
-
- def _get_test_info_filter(self, path, methods, **kwargs):
- """Get test info filter.
-
- Args:
- path: A string of the test's path.
- methods: A set of method name strings.
- rel_module_dir: Optional. A string of the module dir relative to
- root.
- class_name: Optional. A string of the class name.
- is_native_test: Optional. A boolean variable of whether to search
- for a native test or not.
-
- Returns:
- A set of test info filter.
- """
- _, file_name = test_finder_utils.get_dir_path_and_filename(path)
- ti_filter = frozenset()
- if kwargs.get('is_native_test', None):
- ti_filter = frozenset([test_info.TestFilter(
- test_finder_utils.get_cc_filter(
- kwargs.get('class_name', '*'), methods), frozenset())])
- # Path to java file.
- elif file_name and constants.JAVA_EXT_RE.match(file_name):
- full_class_name = test_finder_utils.get_fully_qualified_class_name(
- path)
- ti_filter = frozenset(
- [test_info.TestFilter(full_class_name, methods)])
- # Path to cc file.
- elif file_name and constants.CC_EXT_RE.match(file_name):
- if not test_finder_utils.has_cc_class(path):
- raise atest_error.MissingCCTestCaseError(
- "Can't find CC class in %s" % path)
- if methods:
- ti_filter = frozenset(
- [test_info.TestFilter(test_finder_utils.get_cc_filter(
- kwargs.get('class_name', '*'), methods), frozenset())])
- # Path to non-module dir, treat as package.
- elif (not file_name
- and kwargs.get('rel_module_dir', None) !=
- os.path.relpath(path, self.root_dir)):
- dir_items = [os.path.join(path, f) for f in os.listdir(path)]
- for dir_item in dir_items:
- if constants.JAVA_EXT_RE.match(dir_item):
- package_name = test_finder_utils.get_package_name(dir_item)
- if package_name:
- # methods should be empty frozenset for package.
- if methods:
- raise atest_error.MethodWithoutClassError(
- '%s: Method filtering requires class'
- % str(methods))
- ti_filter = frozenset(
- [test_info.TestFilter(package_name, methods)])
- break
- return ti_filter
-
- def _get_rel_config(self, test_path):
- """Get config file's relative path.
-
- Args:
- test_path: A string of the test absolute path.
-
- Returns:
- A string of config's relative path, else None.
- """
- test_dir = os.path.dirname(test_path)
- rel_module_dir = test_finder_utils.find_parent_module_dir(
- self.root_dir, test_dir, self.module_info)
- if rel_module_dir:
- return os.path.join(rel_module_dir, constants.MODULE_CONFIG)
- return None
-
- def _get_test_infos(self, test_path, rel_config, module_name, test_filter):
- """Get test_info for test_path.
-
- Args:
- test_path: A string of the test path.
- rel_config: A string of rel path of config.
- module_name: A string of the module name to use.
- test_filter: A test info filter.
-
- Returns:
- A list of TestInfo namedtuple if found, else None.
- """
- if not rel_config:
- rel_config = self._get_rel_config(test_path)
- if not rel_config:
- return None
- if module_name:
- module_names = [module_name]
- else:
- module_names = self._determine_testable_module(
- os.path.dirname(rel_config))
- test_infos = []
- if module_names:
- for mname in module_names:
- # The real test config might be record in module-info.
- rel_config = self._get_module_test_config(mname,
- rel_config=rel_config)
- mod_info = self.module_info.get_module_info(mname)
- tinfo = self._process_test_info(test_info.TestInfo(
- test_name=mname,
- test_runner=self._TEST_RUNNER,
- build_targets=set(),
- data={constants.TI_FILTER: test_filter,
- constants.TI_REL_CONFIG: rel_config},
- compatibility_suites=mod_info.get(
- constants.MODULE_COMPATIBILITY_SUITES, [])))
- if tinfo:
- test_infos.append(tinfo)
- return test_infos
-
- def find_test_by_module_name(self, module_name):
- """Find test for the given module name.
-
- Args:
- module_name: A string of the test's module name.
-
- Returns:
- A list that includes only 1 populated TestInfo namedtuple
- if found, otherwise None.
- """
- mod_info = self.module_info.get_module_info(module_name)
- if self.module_info.is_testable_module(mod_info):
- # path is a list with only 1 element.
- rel_config = os.path.join(mod_info['path'][0],
- constants.MODULE_CONFIG)
- rel_config = self._get_module_test_config(module_name, rel_config=rel_config)
- tinfo = self._process_test_info(test_info.TestInfo(
- test_name=module_name,
- test_runner=self._TEST_RUNNER,
- build_targets=set(),
- data={constants.TI_REL_CONFIG: rel_config,
- constants.TI_FILTER: frozenset()},
- compatibility_suites=mod_info.get(
- constants.MODULE_COMPATIBILITY_SUITES, [])))
- if tinfo:
- return [tinfo]
- return None
-
- def find_test_by_kernel_class_name(self, module_name, class_name):
- """Find kernel test for the given class name.
-
- Args:
- module_name: A string of the module name to use.
- class_name: A string of the test's class name.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None.
- """
- class_name, methods = test_finder_utils.split_methods(class_name)
- test_config = self._get_module_test_config(module_name)
- test_config_path = os.path.join(self.root_dir, test_config)
- mod_info = self.module_info.get_module_info(module_name)
- ti_filter = frozenset(
- [test_info.TestFilter(class_name, methods)])
- if test_finder_utils.is_test_from_kernel_xml(test_config_path, class_name):
- tinfo = self._process_test_info(test_info.TestInfo(
- test_name=module_name,
- test_runner=self._TEST_RUNNER,
- build_targets=set(),
- data={constants.TI_REL_CONFIG: test_config,
- constants.TI_FILTER: ti_filter},
- compatibility_suites=mod_info.get(
- constants.MODULE_COMPATIBILITY_SUITES, [])))
- if tinfo:
- return [tinfo]
- return None
-
- def find_test_by_class_name(self, class_name, module_name=None,
- rel_config=None, is_native_test=False):
- """Find test files given a class name.
-
- If module_name and rel_config not given it will calculate it determine
- it by looking up the tree from the class file.
-
- Args:
- class_name: A string of the test's class name.
- module_name: Optional. A string of the module name to use.
- rel_config: Optional. A string of module dir relative to repo root.
- is_native_test: A boolean variable of whether to search for a
- native test or not.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None.
- """
- class_name, methods = test_finder_utils.split_methods(class_name)
- if rel_config:
- search_dir = os.path.join(self.root_dir,
- os.path.dirname(rel_config))
- else:
- search_dir = self.root_dir
- test_paths = test_finder_utils.find_class_file(search_dir, class_name,
- is_native_test, methods)
- if not test_paths and rel_config:
- logging.info('Did not find class (%s) under module path (%s), '
- 'researching from repo root.', class_name, rel_config)
- test_paths = test_finder_utils.find_class_file(self.root_dir,
- class_name,
- is_native_test,
- methods)
- if not test_paths:
- return None
- tinfos = []
- for test_path in test_paths:
- test_filter = self._get_test_info_filter(
- test_path, methods, class_name=class_name,
- is_native_test=is_native_test)
- tinfo = self._get_test_infos(test_path, rel_config,
- module_name, test_filter)
- if tinfo:
- tinfos.extend(tinfo)
- return tinfos
-
- def find_test_by_module_and_class(self, module_class):
- """Find the test info given a MODULE:CLASS string.
-
- Args:
- module_class: A string of form MODULE:CLASS or MODULE:CLASS#METHOD.
-
- Returns:
- A list of populated TestInfo namedtuple if found, else None.
- """
- if ':' not in module_class:
- return None
- module_name, class_name = module_class.split(':')
- # module_infos is a list with at most 1 element.
- module_infos = self.find_test_by_module_name(module_name)
- module_info = module_infos[0] if module_infos else None
- if not module_info:
- return None
- find_result = None
- # If the target module is NATIVE_TEST, search CC classes only.
- if not self.module_info.is_native_test(module_name):
- # Find by java class.
- find_result = self.find_test_by_class_name(
- class_name, module_info.test_name,
- module_info.data.get(constants.TI_REL_CONFIG))
- # kernel target test is also define as NATIVE_TEST in build system.
- # TODO (b/157210083) Update find_test_by_kernel_class_name method to
- # support gen_rule use case.
- if not find_result:
- find_result = self.find_test_by_kernel_class_name(
- module_name, class_name)
- # Find by cc class.
- if not find_result:
- find_result = self.find_test_by_cc_class_name(
- class_name, module_info.test_name,
- module_info.data.get(constants.TI_REL_CONFIG))
- return find_result
-
- def find_test_by_package_name(self, package, module_name=None,
- rel_config=None):
- """Find the test info given a PACKAGE string.
-
- Args:
- package: A string of the package name.
- module_name: Optional. A string of the module name.
- ref_config: Optional. A string of rel path of config.
-
- Returns:
- A list of populated TestInfo namedtuple if found, else None.
- """
- _, methods = test_finder_utils.split_methods(package)
- if methods:
- raise atest_error.MethodWithoutClassError('%s: Method filtering '
- 'requires class' % (
- methods))
- # Confirm that packages exists and get user input for multiples.
- if rel_config:
- search_dir = os.path.join(self.root_dir,
- os.path.dirname(rel_config))
- else:
- search_dir = self.root_dir
- package_paths = test_finder_utils.run_find_cmd(
- test_finder_utils.FIND_REFERENCE_TYPE.PACKAGE, search_dir, package)
- # Package path will be the full path to the dir represented by package.
- if not package_paths:
- return None
- test_filter = frozenset([test_info.TestFilter(package, frozenset())])
- test_infos = []
- for package_path in package_paths:
- tinfo = self._get_test_infos(package_path, rel_config,
- module_name, test_filter)
- if tinfo:
- test_infos.extend(tinfo)
- return test_infos
-
- def find_test_by_module_and_package(self, module_package):
- """Find the test info given a MODULE:PACKAGE string.
-
- Args:
- module_package: A string of form MODULE:PACKAGE
-
- Returns:
- A list of populated TestInfo namedtuple if found, else None.
- """
- module_name, package = module_package.split(':')
- # module_infos is a list with at most 1 element.
- module_infos = self.find_test_by_module_name(module_name)
- module_info = module_infos[0] if module_infos else None
- if not module_info:
- return None
- return self.find_test_by_package_name(
- package, module_info.test_name,
- module_info.data.get(constants.TI_REL_CONFIG))
-
- def find_test_by_path(self, path):
- """Find the first test info matching the given path.
-
- Strategy:
- path_to_java_file --> Resolve to CLASS
- path_to_cc_file --> Resolve to CC CLASS
- path_to_module_file -> Resolve to MODULE
- path_to_module_dir -> Resolve to MODULE
- path_to_dir_with_class_files--> Resolve to PACKAGE
- path_to_any_other_dir --> Resolve as MODULE
-
- Args:
- path: A string of the test's path.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None
- """
- logging.debug('Finding test by path: %s', path)
- path, methods = test_finder_utils.split_methods(path)
- # TODO: See if this can be generalized and shared with methods above
- # create absolute path from cwd and remove symbolic links
- path = os.path.realpath(path)
- if not os.path.exists(path):
- return None
- if (methods and
- not test_finder_utils.has_method_in_file(path, methods)):
- return None
- dir_path, _ = test_finder_utils.get_dir_path_and_filename(path)
- # Module/Class
- rel_module_dir = test_finder_utils.find_parent_module_dir(
- self.root_dir, dir_path, self.module_info)
- if not rel_module_dir:
- return None
- rel_config = os.path.join(rel_module_dir, constants.MODULE_CONFIG)
- test_filter = self._get_test_info_filter(path, methods,
- rel_module_dir=rel_module_dir)
- return self._get_test_infos(path, rel_config, None, test_filter)
-
- def find_test_by_cc_class_name(self, class_name, module_name=None,
- rel_config=None):
- """Find test files given a cc class name.
-
- If module_name and rel_config not given, test will be determined
- by looking up the tree for files which has input class.
-
- Args:
- class_name: A string of the test's class name.
- module_name: Optional. A string of the module name to use.
- rel_config: Optional. A string of module dir relative to repo root.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None.
- """
- # Check if class_name is prepended with file name. If so, trim the
- # prefix and keep only the class_name.
- if '.' in class_name:
- # Assume the class name has a format of file_name.class_name
- class_name = class_name[class_name.rindex('.')+1:]
- logging.info('Search with updated class name: %s', class_name)
- return self.find_test_by_class_name(
- class_name, module_name, rel_config, is_native_test=True)
-
- def get_testable_modules_with_ld(self, user_input, ld_range=0):
- """Calculate the edit distances of the input and testable modules.
-
- The user input will be calculated across all testable modules and
- results in integers generated by Levenshtein Distance algorithm.
- To increase the speed of the calculation, a bound can be applied to
- this method to prevent from calculating every testable modules.
-
- Guessing from typos, e.g. atest atest_unitests, implies a tangible range
- of length that Atest only needs to search within it, and the default of
- the bound is 2.
-
- Guessing from keywords however, e.g. atest --search Camera, means that
- the uncertainty of the module name is way higher, and Atest should walk
- through all testable modules and return the highest possibilities.
-
- Args:
- user_input: A string of the user input.
- ld_range: An integer that range the searching scope. If the length of
- user_input is 10, then Atest will calculate modules of which
- length is between 8 and 12. 0 is equivalent to unlimited.
-
- Returns:
- A List of LDs and possible module names. If the user_input is "fax",
- the output will be like:
- [[2, "fog"], [2, "Fix"], [4, "duck"], [7, "Duckies"]]
-
- Which means the most lilely names of "fax" are fog and Fix(LD=2),
- while Dickies is the most unlikely one(LD=7).
- """
- atest_utils.colorful_print('\nSearching for similar module names using '
- 'fuzzy search...', constants.CYAN)
- testable_modules = sorted(self.module_info.get_testable_modules(), key=len)
- lower_bound = len(user_input) - ld_range
- upper_bound = len(user_input) + ld_range
- testable_modules_with_ld = []
- for module_name in testable_modules:
- # Dispose those too short or too lengthy.
- if ld_range != 0:
- if len(module_name) < lower_bound:
- continue
- elif len(module_name) > upper_bound:
- break
- testable_modules_with_ld.append(
- [test_finder_utils.get_levenshtein_distance(
- user_input, module_name), module_name])
- return testable_modules_with_ld
-
- def get_fuzzy_searching_results(self, user_input):
- """Give results which have no more than allowance of edit distances.
-
- Args:
- user_input: the target module name for fuzzy searching.
-
- Return:
- A list of guessed modules.
- """
- modules_with_ld = self.get_testable_modules_with_ld(user_input,
- ld_range=constants.LD_RANGE)
- guessed_modules = []
- for _distance, _module in modules_with_ld:
- if _distance <= abs(constants.LD_RANGE):
- guessed_modules.append(_module)
- return guessed_modules
diff --git a/atest-py2/test_finders/module_finder_unittest.py b/atest-py2/test_finders/module_finder_unittest.py
deleted file mode 100755
index 20d99e4..0000000
--- a/atest-py2/test_finders/module_finder_unittest.py
+++ /dev/null
@@ -1,594 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for module_finder."""
-
-import re
-import unittest
-import os
-import mock
-
-# pylint: disable=import-error
-import atest_error
-import constants
-import module_info
-import unittest_constants as uc
-import unittest_utils
-from test_finders import module_finder
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import atest_tf_test_runner as atf_tr
-
-MODULE_CLASS = '%s:%s' % (uc.MODULE_NAME, uc.CLASS_NAME)
-MODULE_PACKAGE = '%s:%s' % (uc.MODULE_NAME, uc.PACKAGE)
-CC_MODULE_CLASS = '%s:%s' % (uc.CC_MODULE_NAME, uc.CC_CLASS_NAME)
-KERNEL_TEST_CLASS = 'test_class_1'
-KERNEL_TEST_CONFIG = 'KernelTest.xml'
-KERNEL_MODULE_CLASS = '%s:%s' % (constants.REQUIRED_KERNEL_TEST_MODULES[0],
- KERNEL_TEST_CLASS)
-KERNEL_CONFIG_FILE = os.path.join(uc.TEST_DATA_DIR, KERNEL_TEST_CONFIG)
-KERNEL_CLASS_FILTER = test_info.TestFilter(KERNEL_TEST_CLASS, frozenset())
-KERNEL_MODULE_CLASS_DATA = {constants.TI_REL_CONFIG: KERNEL_CONFIG_FILE,
- constants.TI_FILTER: frozenset([KERNEL_CLASS_FILTER])}
-KERNEL_MODULE_CLASS_INFO = test_info.TestInfo(
- constants.REQUIRED_KERNEL_TEST_MODULES[0],
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.CLASS_BUILD_TARGETS, KERNEL_MODULE_CLASS_DATA)
-FLAT_METHOD_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.MODULE_BUILD_TARGETS,
- data={constants.TI_FILTER: frozenset([uc.FLAT_METHOD_FILTER]),
- constants.TI_REL_CONFIG: uc.CONFIG_FILE})
-MODULE_CLASS_METHOD = '%s#%s' % (MODULE_CLASS, uc.METHOD_NAME)
-CC_MODULE_CLASS_METHOD = '%s#%s' % (CC_MODULE_CLASS, uc.CC_METHOD_NAME)
-CLASS_INFO_MODULE_2 = test_info.TestInfo(
- uc.MODULE2_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.CLASS_BUILD_TARGETS,
- data={constants.TI_FILTER: frozenset([uc.CLASS_FILTER]),
- constants.TI_REL_CONFIG: uc.CONFIG2_FILE})
-CC_CLASS_INFO_MODULE_2 = test_info.TestInfo(
- uc.CC_MODULE2_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.CLASS_BUILD_TARGETS,
- data={constants.TI_FILTER: frozenset([uc.CC_CLASS_FILTER]),
- constants.TI_REL_CONFIG: uc.CC_CONFIG2_FILE})
-DEFAULT_INSTALL_PATH = ['/path/to/install']
-ROBO_MOD_PATH = ['/shared/robo/path']
-NON_RUN_ROBO_MOD_NAME = 'robo_mod'
-RUN_ROBO_MOD_NAME = 'run_robo_mod'
-NON_RUN_ROBO_MOD = {constants.MODULE_NAME: NON_RUN_ROBO_MOD_NAME,
- constants.MODULE_PATH: ROBO_MOD_PATH,
- constants.MODULE_CLASS: ['random_class']}
-RUN_ROBO_MOD = {constants.MODULE_NAME: RUN_ROBO_MOD_NAME,
- constants.MODULE_PATH: ROBO_MOD_PATH,
- constants.MODULE_CLASS: [constants.MODULE_CLASS_ROBOLECTRIC]}
-
-SEARCH_DIR_RE = re.compile(r'^find ([^ ]*).*$')
-
-#pylint: disable=unused-argument
-def classoutside_side_effect(find_cmd, shell=False):
- """Mock the check output of a find cmd where class outside module path."""
- search_dir = SEARCH_DIR_RE.match(find_cmd).group(1).strip()
- if search_dir == uc.ROOT:
- return uc.FIND_ONE
- return None
-
-
-#pylint: disable=protected-access
-class ModuleFinderUnittests(unittest.TestCase):
- """Unit tests for module_finder.py"""
-
- def setUp(self):
- """Set up stuff for testing."""
- self.mod_finder = module_finder.ModuleFinder()
- self.mod_finder.module_info = mock.Mock(spec=module_info.ModuleInfo)
- self.mod_finder.module_info.path_to_module_info = {}
- self.mod_finder.root_dir = uc.ROOT
-
- def test_is_vts_module(self):
- """Test _load_module_info_file regular operation."""
- mod_name = 'mod'
- is_vts_module_info = {'compatibility_suites': ['vts10', 'tests']}
- self.mod_finder.module_info.get_module_info.return_value = is_vts_module_info
- self.assertTrue(self.mod_finder._is_vts_module(mod_name))
-
- is_not_vts_module = {'compatibility_suites': ['vts10', 'cts']}
- self.mod_finder.module_info.get_module_info.return_value = is_not_vts_module
- self.assertFalse(self.mod_finder._is_vts_module(mod_name))
-
- # pylint: disable=unused-argument
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
- return_value=uc.MODULE_BUILD_TARGETS)
- def test_find_test_by_module_name(self, _get_targ):
- """Test find_test_by_module_name."""
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mod_info = {'installed': ['/path/to/install'],
- 'path': [uc.MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- t_infos = self.mod_finder.find_test_by_module_name(uc.MODULE_NAME)
- unittest_utils.assert_equal_testinfos(
- self,
- t_infos[0],
- uc.MODULE_INFO)
- self.mod_finder.module_info.get_module_info.return_value = None
- self.mod_finder.module_info.is_testable_module.return_value = False
- self.assertIsNone(self.mod_finder.find_test_by_module_name('Not_Module'))
-
- @mock.patch.object(test_finder_utils, 'has_method_in_file',
- return_value=True)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_ONE)
- @mock.patch.object(test_finder_utils, 'get_fully_qualified_class_name',
- return_value=uc.FULL_CLASS_NAME)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- @mock.patch('os.path.isdir', return_value=True)
- #pylint: disable=unused-argument
- def test_find_test_by_class_name(self, _isdir, _isfile, _fqcn,
- mock_checkoutput, mock_build,
- _vts, _has_method_in_file):
- """Test find_test_by_class_name."""
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- t_infos = self.mod_finder.find_test_by_class_name(uc.CLASS_NAME)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.CLASS_INFO)
-
- # with method
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- class_with_method = '%s#%s' % (uc.CLASS_NAME, uc.METHOD_NAME)
- t_infos = self.mod_finder.find_test_by_class_name(class_with_method)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.METHOD_INFO)
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- class_methods = '%s,%s' % (class_with_method, uc.METHOD2_NAME)
- t_infos = self.mod_finder.find_test_by_class_name(class_methods)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- FLAT_METHOD_INFO)
- # module and rel_config passed in
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_class_name(
- uc.CLASS_NAME, uc.MODULE_NAME, uc.CONFIG_FILE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.CLASS_INFO)
- # find output fails to find class file
- mock_checkoutput.return_value = ''
- self.assertIsNone(self.mod_finder.find_test_by_class_name('Not class'))
- # class is outside given module path
- mock_checkoutput.side_effect = classoutside_side_effect
- t_infos = self.mod_finder.find_test_by_class_name(uc.CLASS_NAME,
- uc.MODULE2_NAME,
- uc.CONFIG2_FILE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- CLASS_INFO_MODULE_2)
-
- @mock.patch.object(test_finder_utils, 'has_method_in_file',
- return_value=True)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_ONE)
- @mock.patch.object(test_finder_utils, 'get_fully_qualified_class_name',
- return_value=uc.FULL_CLASS_NAME)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- #pylint: disable=unused-argument
- def test_find_test_by_module_and_class(self, _isfile, _fqcn,
- mock_checkoutput, mock_build,
- _vts, _has_method_in_file):
- """Test find_test_by_module_and_class."""
- # Native test was tested in test_find_test_by_cc_class_name().
- self.mod_finder.module_info.is_native_test.return_value = False
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- mod_info = {constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_PATH: [uc.MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- t_infos = self.mod_finder.find_test_by_module_and_class(MODULE_CLASS)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.CLASS_INFO)
- # with method
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_module_and_class(MODULE_CLASS_METHOD)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.METHOD_INFO)
- self.mod_finder.module_info.is_testable_module.return_value = False
- # bad module, good class, returns None
- bad_module = '%s:%s' % ('BadMod', uc.CLASS_NAME)
- self.mod_finder.module_info.get_module_info.return_value = None
- self.assertIsNone(self.mod_finder.find_test_by_module_and_class(bad_module))
- # find output fails to find class file
- mock_checkoutput.return_value = ''
- bad_class = '%s:%s' % (uc.MODULE_NAME, 'Anything')
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- self.assertIsNone(self.mod_finder.find_test_by_module_and_class(bad_class))
-
- @mock.patch.object(module_finder.ModuleFinder, 'find_test_by_kernel_class_name',
- return_value=None)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_CC_ONE)
- @mock.patch.object(test_finder_utils, 'find_class_file',
- side_effect=[None, None, '/'])
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- #pylint: disable=unused-argument
- def test_find_test_by_module_and_class_part_2(self, _isfile, mock_fcf,
- mock_checkoutput, mock_build,
- _vts, _find_kernel):
- """Test find_test_by_module_and_class for MODULE:CC_CLASS."""
- # Native test was tested in test_find_test_by_cc_class_name()
- self.mod_finder.module_info.is_native_test.return_value = False
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- mod_info = {constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_PATH: [uc.CC_MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- t_infos = self.mod_finder.find_test_by_module_and_class(CC_MODULE_CLASS)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.CC_MODULE_CLASS_INFO)
- # with method
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- mock_fcf.side_effect = [None, None, '/']
- t_infos = self.mod_finder.find_test_by_module_and_class(CC_MODULE_CLASS_METHOD)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.CC_METHOD_INFO)
- # bad module, good class, returns None
- bad_module = '%s:%s' % ('BadMod', uc.CC_CLASS_NAME)
- self.mod_finder.module_info.get_module_info.return_value = None
- self.mod_finder.module_info.is_testable_module.return_value = False
- self.assertIsNone(self.mod_finder.find_test_by_module_and_class(bad_module))
-
- @mock.patch.object(module_finder.ModuleFinder, '_get_module_test_config',
- return_value=KERNEL_CONFIG_FILE)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_CC_ONE)
- @mock.patch.object(test_finder_utils, 'find_class_file',
- side_effect=[None, None, '/'])
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- #pylint: disable=unused-argument
- def test_find_test_by_module_and_class_for_kernel_test(
- self, _isfile, mock_fcf, mock_checkoutput, mock_build, _vts,
- _test_config):
- """Test find_test_by_module_and_class for MODULE:CC_CLASS."""
- # Kernel test was tested in find_test_by_kernel_class_name()
- self.mod_finder.module_info.is_native_test.return_value = False
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- mod_info = {constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_PATH: [uc.CC_MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- t_infos = self.mod_finder.find_test_by_module_and_class(KERNEL_MODULE_CLASS)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], KERNEL_MODULE_CLASS_INFO)
-
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_PKG)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- @mock.patch('os.path.isdir', return_value=True)
- #pylint: disable=unused-argument
- def test_find_test_by_package_name(self, _isdir, _isfile, mock_checkoutput,
- mock_build, _vts):
- """Test find_test_by_package_name."""
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []
- }
- t_infos = self.mod_finder.find_test_by_package_name(uc.PACKAGE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- uc.PACKAGE_INFO)
- # with method, should raise
- pkg_with_method = '%s#%s' % (uc.PACKAGE, uc.METHOD_NAME)
- self.assertRaises(atest_error.MethodWithoutClassError,
- self.mod_finder.find_test_by_package_name,
- pkg_with_method)
- # module and rel_config passed in
- t_infos = self.mod_finder.find_test_by_package_name(
- uc.PACKAGE, uc.MODULE_NAME, uc.CONFIG_FILE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.PACKAGE_INFO)
- # find output fails to find class file
- mock_checkoutput.return_value = ''
- self.assertIsNone(self.mod_finder.find_test_by_package_name('Not pkg'))
-
- @mock.patch('os.path.isdir', return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.FIND_PKG)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- #pylint: disable=unused-argument
- def test_find_test_by_module_and_package(self, _isfile, mock_checkoutput,
- mock_build, _vts, _isdir):
- """Test find_test_by_module_and_package."""
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- mod_info = {constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_PATH: [uc.MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- t_infos = self.mod_finder.find_test_by_module_and_package(MODULE_PACKAGE)
- self.assertEqual(t_infos, None)
- _isdir.return_value = True
- t_infos = self.mod_finder.find_test_by_module_and_package(MODULE_PACKAGE)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.PACKAGE_INFO)
-
- # with method, raises
- module_pkg_with_method = '%s:%s#%s' % (uc.MODULE2_NAME, uc.PACKAGE,
- uc.METHOD_NAME)
- self.assertRaises(atest_error.MethodWithoutClassError,
- self.mod_finder.find_test_by_module_and_package,
- module_pkg_with_method)
- # bad module, good pkg, returns None
- self.mod_finder.module_info.is_testable_module.return_value = False
- bad_module = '%s:%s' % ('BadMod', uc.PACKAGE)
- self.mod_finder.module_info.get_module_info.return_value = None
- self.assertIsNone(self.mod_finder.find_test_by_module_and_package(bad_module))
- # find output fails to find package path
- mock_checkoutput.return_value = ''
- bad_pkg = '%s:%s' % (uc.MODULE_NAME, 'Anything')
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- self.assertIsNone(self.mod_finder.find_test_by_module_and_package(bad_pkg))
-
- @mock.patch.object(test_finder_utils, 'has_method_in_file',
- return_value=True)
- @mock.patch.object(test_finder_utils, 'has_cc_class',
- return_value=True)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(test_finder_utils, 'get_fully_qualified_class_name',
- return_value=uc.FULL_CLASS_NAME)
- @mock.patch('os.path.realpath',
- side_effect=unittest_utils.realpath_side_effect)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- @mock.patch.object(test_finder_utils, 'find_parent_module_dir')
- @mock.patch('os.path.exists')
- #pylint: disable=unused-argument
- def test_find_test_by_path(self, mock_pathexists, mock_dir, _isfile, _real,
- _fqcn, _vts, mock_build, _has_cc_class,
- _has_method_in_file):
- """Test find_test_by_path."""
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mock_build.return_value = set()
- # Check that we don't return anything with invalid test references.
- mock_pathexists.return_value = False
- unittest_utils.assert_equal_testinfos(
- self, None, self.mod_finder.find_test_by_path('bad/path'))
- mock_pathexists.return_value = True
- mock_dir.return_value = None
- unittest_utils.assert_equal_testinfos(
- self, None, self.mod_finder.find_test_by_path('no/module'))
- self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
-
- # Happy path testing.
- mock_dir.return_value = uc.MODULE_DIR
-
- class_path = '%s.kt' % uc.CLASS_NAME
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_path(class_path)
- unittest_utils.assert_equal_testinfos(
- self, uc.CLASS_INFO, t_infos[0])
-
- class_path = '%s.java' % uc.CLASS_NAME
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_path(class_path)
- unittest_utils.assert_equal_testinfos(
- self, uc.CLASS_INFO, t_infos[0])
-
- class_with_method = '%s#%s' % (class_path, uc.METHOD_NAME)
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_path(class_with_method)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.METHOD_INFO)
-
- class_with_methods = '%s,%s' % (class_with_method, uc.METHOD2_NAME)
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_path(class_with_methods)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- FLAT_METHOD_INFO)
-
- # Cc path testing.
- self.mod_finder.module_info.get_module_names.return_value = [uc.CC_MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.CC_MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- mock_dir.return_value = uc.CC_MODULE_DIR
- class_path = '%s' % uc.CC_PATH
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_path(class_path)
- unittest_utils.assert_equal_testinfos(
- self, uc.CC_PATH_INFO2, t_infos[0])
-
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
- return_value=uc.MODULE_BUILD_TARGETS)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(test_finder_utils, 'find_parent_module_dir',
- return_value=os.path.relpath(uc.TEST_DATA_DIR, uc.ROOT))
- #pylint: disable=unused-argument
- def test_find_test_by_path_part_2(self, _find_parent, _is_vts, _get_build):
- """Test find_test_by_path for directories."""
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- # Dir with java files in it, should run as package
- class_dir = os.path.join(uc.TEST_DATA_DIR, 'path_testing')
- self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- t_infos = self.mod_finder.find_test_by_path(class_dir)
- unittest_utils.assert_equal_testinfos(
- self, uc.PATH_INFO, t_infos[0])
- # Dir with no java files in it, should run whole module
- empty_dir = os.path.join(uc.TEST_DATA_DIR, 'path_testing_empty')
- t_infos = self.mod_finder.find_test_by_path(empty_dir)
- unittest_utils.assert_equal_testinfos(
- self, uc.EMPTY_PATH_INFO,
- t_infos[0])
- # Dir with cc files in it, should run as cc class
- class_dir = os.path.join(uc.TEST_DATA_DIR, 'cc_path_testing')
- self.mod_finder.module_info.get_module_names.return_value = [uc.CC_MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.CC_MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- t_infos = self.mod_finder.find_test_by_path(class_dir)
- unittest_utils.assert_equal_testinfos(
- self, uc.CC_PATH_INFO, t_infos[0])
-
- @mock.patch.object(test_finder_utils, 'has_method_in_file',
- return_value=True)
- @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
- return_value=False)
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
- @mock.patch('subprocess.check_output', return_value=uc.CC_FIND_ONE)
- @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
- @mock.patch('os.path.isdir', return_value=True)
- #pylint: disable=unused-argument
- def test_find_test_by_cc_class_name(self, _isdir, _isfile,
- mock_checkoutput, mock_build,
- _vts, _has_method):
- """Test find_test_by_cc_class_name."""
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- self.mod_finder.module_info.get_module_names.return_value = [uc.CC_MODULE_NAME]
- self.mod_finder.module_info.get_module_info.return_value = {
- constants.MODULE_INSTALLED: DEFAULT_INSTALL_PATH,
- constants.MODULE_NAME: uc.CC_MODULE_NAME,
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: []}
- t_infos = self.mod_finder.find_test_by_cc_class_name(uc.CC_CLASS_NAME)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.CC_CLASS_INFO)
-
- # with method
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- class_with_method = '%s#%s' % (uc.CC_CLASS_NAME, uc.CC_METHOD_NAME)
- t_infos = self.mod_finder.find_test_by_cc_class_name(class_with_method)
- unittest_utils.assert_equal_testinfos(
- self,
- t_infos[0],
- uc.CC_METHOD_INFO)
- mock_build.return_value = uc.MODULE_BUILD_TARGETS
- class_methods = '%s,%s' % (class_with_method, uc.CC_METHOD2_NAME)
- t_infos = self.mod_finder.find_test_by_cc_class_name(class_methods)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- uc.CC_METHOD2_INFO)
- # module and rel_config passed in
- mock_build.return_value = uc.CLASS_BUILD_TARGETS
- t_infos = self.mod_finder.find_test_by_cc_class_name(
- uc.CC_CLASS_NAME, uc.CC_MODULE_NAME, uc.CC_CONFIG_FILE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.CC_CLASS_INFO)
- # find output fails to find class file
- mock_checkoutput.return_value = ''
- self.assertIsNone(self.mod_finder.find_test_by_cc_class_name(
- 'Not class'))
- # class is outside given module path
- mock_checkoutput.return_value = uc.CC_FIND_ONE
- t_infos = self.mod_finder.find_test_by_cc_class_name(
- uc.CC_CLASS_NAME,
- uc.CC_MODULE2_NAME,
- uc.CC_CONFIG2_FILE)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0],
- CC_CLASS_INFO_MODULE_2)
-
- def test_get_testable_modules_with_ld(self):
- """Test get_testable_modules_with_ld"""
- self.mod_finder.module_info.get_testable_modules.return_value = [
- uc.MODULE_NAME, uc.MODULE2_NAME]
- # Without a misfit constraint
- ld1 = self.mod_finder.get_testable_modules_with_ld(uc.TYPO_MODULE_NAME)
- self.assertEqual([[16, uc.MODULE2_NAME], [1, uc.MODULE_NAME]], ld1)
- # With a misfit constraint
- ld2 = self.mod_finder.get_testable_modules_with_ld(uc.TYPO_MODULE_NAME, 2)
- self.assertEqual([[1, uc.MODULE_NAME]], ld2)
-
- def test_get_fuzzy_searching_modules(self):
- """Test get_fuzzy_searching_modules"""
- self.mod_finder.module_info.get_testable_modules.return_value = [
- uc.MODULE_NAME, uc.MODULE2_NAME]
- result = self.mod_finder.get_fuzzy_searching_results(uc.TYPO_MODULE_NAME)
- self.assertEqual(uc.MODULE_NAME, result[0])
-
- def test_get_build_targets_w_vts_core(self):
- """Test _get_build_targets."""
- self.mod_finder.module_info.is_auto_gen_test_config.return_value = True
- self.mod_finder.module_info.get_paths.return_value = []
- mod_info = {constants.MODULE_COMPATIBILITY_SUITES:
- [constants.VTS_CORE_SUITE]}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- self.assertEqual(self.mod_finder._get_build_targets('', ''),
- {constants.VTS_CORE_TF_MODULE})
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_finders/suite_plan_finder.py b/atest-py2/test_finders/suite_plan_finder.py
deleted file mode 100644
index a33da2d..0000000
--- a/atest-py2/test_finders/suite_plan_finder.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Suite Plan Finder class.
-"""
-
-import logging
-import os
-import re
-
-# pylint: disable=import-error
-import constants
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import suite_plan_test_runner
-
-_SUITE_PLAN_NAME_RE = re.compile(r'^.*\/(?P<suite>.*)-tradefed\/res\/config\/'
- r'(?P<suite_plan_name>.*).xml$')
-
-
-class SuitePlanFinder(test_finder_base.TestFinderBase):
- """Suite Plan Finder class."""
- NAME = 'SUITE_PLAN'
- _SUITE_PLAN_TEST_RUNNER = suite_plan_test_runner.SuitePlanTestRunner.NAME
-
- def __init__(self, module_info=None):
- super(SuitePlanFinder, self).__init__()
- self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- self.mod_info = module_info
- self.suite_plan_dirs = self._get_suite_plan_dirs()
-
- def _get_mod_paths(self, module_name):
- """Return the paths of the given module name."""
- if self.mod_info:
- return self.mod_info.get_paths(module_name)
- return []
-
- def _get_suite_plan_dirs(self):
- """Get suite plan dirs from MODULE_INFO based on targets.
-
- Strategy:
- Search module-info.json using SUITE_PLANS to get all the suite
- plan dirs.
-
- Returns:
- A tuple of lists of strings of suite plan dir rel to repo root.
- None if the path can not be found in module-info.json.
- """
- return [d for x in constants.SUITE_PLANS for d in
- self._get_mod_paths(x+'-tradefed') if d is not None]
-
- def _get_test_info_from_path(self, path, suite_name=None):
- """Get the test info from the result of using regular expression
- matching with the give path.
-
- Args:
- path: A string of the test's absolute or relative path.
- suite_name: A string of the suite name.
-
- Returns:
- A populated TestInfo namedtuple if regular expression
- matches, else None.
- """
- # Don't use names that simply match the path,
- # must be the actual name used by *TS to run the test.
- match = _SUITE_PLAN_NAME_RE.match(path)
- if not match:
- logging.error('Suite plan test outside config dir: %s', path)
- return None
- suite = match.group('suite')
- suite_plan_name = match.group('suite_plan_name')
- if suite_name:
- if suite_plan_name != suite_name:
- logging.warn('Input (%s) not valid suite plan name, '
- 'did you mean: %s?', suite_name, suite_plan_name)
- return None
- return test_info.TestInfo(
- test_name=suite_plan_name,
- test_runner=self._SUITE_PLAN_TEST_RUNNER,
- build_targets=set([suite]),
- suite=suite)
-
- def find_test_by_suite_path(self, suite_path):
- """Find the first test info matching the given path.
-
- Strategy:
- If suite_path is to file --> Return TestInfo if the file
- exists in the suite plan dirs, else return None.
- If suite_path is to dir --> Return None
-
- Args:
- suite_path: A string of the path to the test's file or dir.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None.
- This is a list with at most 1 element.
- """
- path, _ = test_finder_utils.split_methods(suite_path)
- # Make sure we're looking for a config.
- if not path.endswith('.xml'):
- return None
- path = os.path.realpath(path)
- suite_plan_dir = test_finder_utils.get_int_dir_from_path(
- path, self.suite_plan_dirs)
- if suite_plan_dir:
- rel_config = os.path.relpath(path, self.root_dir)
- return [self._get_test_info_from_path(rel_config)]
- return None
-
- def find_test_by_suite_name(self, suite_name):
- """Find the test for the given suite name.
-
- Strategy:
- If suite_name is cts --> Return TestInfo to indicate suite runner
- to make cts and run test using cts-tradefed.
- If suite_name is cts-common --> Return TestInfo to indicate suite
- runner to make cts and run test using cts-tradefed if file exists
- in the suite plan dirs, else return None.
-
- Args:
- suite_name: A string of suite name.
-
- Returns:
- A list of populated TestInfo namedtuple if suite_name matches
- a suite in constants.SUITE_PLAN, else check if the file
- existing in the suite plan dirs, else return None.
- """
- logging.debug('Finding test by suite: %s', suite_name)
- test_infos = []
- if suite_name in constants.SUITE_PLANS:
- test_infos.append(test_info.TestInfo(
- test_name=suite_name,
- test_runner=self._SUITE_PLAN_TEST_RUNNER,
- build_targets=set([suite_name]),
- suite=suite_name))
- else:
- test_files = test_finder_utils.search_integration_dirs(
- suite_name, self.suite_plan_dirs)
- if not test_files:
- return None
- for test_file in test_files:
- _test_info = self._get_test_info_from_path(test_file, suite_name)
- if _test_info:
- test_infos.append(_test_info)
- return test_infos
diff --git a/atest-py2/test_finders/suite_plan_finder_unittest.py b/atest-py2/test_finders/suite_plan_finder_unittest.py
deleted file mode 100755
index 0fed2d2..0000000
--- a/atest-py2/test_finders/suite_plan_finder_unittest.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Unittests for suite_plan_finder."""
-
-import os
-import unittest
-import mock
-
-# pylint: disable=import-error
-import unittest_constants as uc
-import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_finders import suite_plan_finder
-from test_runners import suite_plan_test_runner
-
-
-# pylint: disable=protected-access
-class SuitePlanFinderUnittests(unittest.TestCase):
- """Unit tests for suite_plan_finder.py"""
-
- def setUp(self):
- """Set up stuff for testing."""
- self.suite_plan_finder = suite_plan_finder.SuitePlanFinder()
- self.suite_plan_finder.suite_plan_dirs = [os.path.join(uc.ROOT, uc.CTS_INT_DIR)]
- self.suite_plan_finder.root_dir = uc.ROOT
-
- def test_get_test_info_from_path(self):
- """Test _get_test_info_from_path.
- Strategy:
- If suite_path is to cts file -->
- test_info: test_name=cts,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts']
- suite='cts')
- If suite_path is to cts-common file -->
- test_info: test_name=cts-common,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts']
- suite='cts')
- If suite_path is to common file --> test_info: None
- If suite_path is to non-existing file --> test_info: None
- """
- suite_plan = 'cts'
- path = os.path.join(uc.ROOT, uc.CTS_INT_DIR, suite_plan+'.xml')
- want_info = test_info.TestInfo(test_name=suite_plan,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets={suite_plan},
- suite=suite_plan)
- unittest_utils.assert_equal_testinfos(
- self, want_info, self.suite_plan_finder._get_test_info_from_path(path))
-
- suite_plan = 'cts-common'
- path = os.path.join(uc.ROOT, uc.CTS_INT_DIR, suite_plan+'.xml')
- want_info = test_info.TestInfo(test_name=suite_plan,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets={'cts'},
- suite='cts')
- unittest_utils.assert_equal_testinfos(
- self, want_info, self.suite_plan_finder._get_test_info_from_path(path))
-
- suite_plan = 'common'
- path = os.path.join(uc.ROOT, uc.CTS_INT_DIR, 'cts-common.xml')
- want_info = None
- unittest_utils.assert_equal_testinfos(
- self, want_info, self.suite_plan_finder._get_test_info_from_path(path, suite_plan))
-
- path = os.path.join(uc.ROOT, 'cts-common.xml')
- want_info = None
- unittest_utils.assert_equal_testinfos(
- self, want_info, self.suite_plan_finder._get_test_info_from_path(path))
-
- @mock.patch.object(test_finder_utils, 'search_integration_dirs')
- def test_find_test_by_suite_name(self, _search):
- """Test find_test_by_suite_name.
- Strategy:
- suite_name: cts --> test_info: test_name=cts,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts']
- suite='cts')
- suite_name: CTS --> test_info: None
- suite_name: cts-common --> test_info: test_name=cts-common,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts'],
- suite='cts')
- """
- suite_name = 'cts'
- t_info = self.suite_plan_finder.find_test_by_suite_name(suite_name)
- want_info = test_info.TestInfo(test_name=suite_name,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets={suite_name},
- suite=suite_name)
- unittest_utils.assert_equal_testinfos(self, t_info[0], want_info)
-
- suite_name = 'CTS'
- _search.return_value = None
- t_info = self.suite_plan_finder.find_test_by_suite_name(suite_name)
- want_info = None
- unittest_utils.assert_equal_testinfos(self, t_info, want_info)
-
- suite_name = 'cts-common'
- suite = 'cts'
- _search.return_value = [os.path.join(uc.ROOT, uc.CTS_INT_DIR, suite_name + '.xml')]
- t_info = self.suite_plan_finder.find_test_by_suite_name(suite_name)
- want_info = test_info.TestInfo(test_name=suite_name,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets=set([suite]),
- suite=suite)
- unittest_utils.assert_equal_testinfos(self, t_info[0], want_info)
-
- @mock.patch('os.path.realpath',
- side_effect=unittest_utils.realpath_side_effect)
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch.object(test_finder_utils, 'get_int_dir_from_path')
- @mock.patch('os.path.exists', return_value=True)
- def test_find_suite_plan_test_by_suite_path(self, _exists, _find, _isfile, _isdir, _real):
- """Test find_test_by_suite_name.
- Strategy:
- suite_name: cts.xml --> test_info:
- test_name=cts,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts']
- suite='cts')
- suite_name: cts-common.xml --> test_info:
- test_name=cts-common,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts'],
- suite='cts')
- suite_name: cts-camera.xml --> test_info:
- test_name=cts-camera,
- test_runner=TestSuiteTestRunner,
- build_target=set(['cts'],
- suite='cts')
- """
- suite_int_name = 'cts'
- suite = 'cts'
- path = os.path.join(uc.CTS_INT_DIR, suite_int_name + '.xml')
- _find.return_value = uc.CTS_INT_DIR
- t_info = self.suite_plan_finder.find_test_by_suite_path(path)
- want_info = test_info.TestInfo(test_name=suite_int_name,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets=set([suite]),
- suite=suite)
- unittest_utils.assert_equal_testinfos(self, t_info[0], want_info)
-
- suite_int_name = 'cts-common'
- suite = 'cts'
- path = os.path.join(uc.CTS_INT_DIR, suite_int_name + '.xml')
- _find.return_value = uc.CTS_INT_DIR
- t_info = self.suite_plan_finder.find_test_by_suite_path(path)
- want_info = test_info.TestInfo(test_name=suite_int_name,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets=set([suite]),
- suite=suite)
- unittest_utils.assert_equal_testinfos(self, t_info[0], want_info)
-
- suite_int_name = 'cts-camera'
- suite = 'cts'
- path = os.path.join(uc.CTS_INT_DIR, suite_int_name + '.xml')
- _find.return_value = uc.CTS_INT_DIR
- t_info = self.suite_plan_finder.find_test_by_suite_path(path)
- want_info = test_info.TestInfo(test_name=suite_int_name,
- test_runner=suite_plan_test_runner.SuitePlanTestRunner.NAME,
- build_targets=set([suite]),
- suite=suite)
- unittest_utils.assert_equal_testinfos(self, t_info[0], want_info)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_finders/test_finder_base.py b/atest-py2/test_finders/test_finder_base.py
deleted file mode 100644
index 14fc079..0000000
--- a/atest-py2/test_finders/test_finder_base.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Test finder base class.
-"""
-from collections import namedtuple
-
-
-Finder = namedtuple('Finder', ['test_finder_instance', 'find_method',
- 'finder_info'])
-
-
-def find_method_register(cls):
- """Class decorater to find all registered find methods."""
- cls.find_methods = []
- cls.get_all_find_methods = lambda x: x.find_methods
- for methodname in dir(cls):
- method = getattr(cls, methodname)
- if hasattr(method, '_registered'):
- cls.find_methods.append(Finder(None, method, None))
- return cls
-
-
-def register():
- """Decorator to register find methods."""
-
- def wrapper(func):
- """Wrapper for the register decorator."""
- #pylint: disable=protected-access
- func._registered = True
- return func
- return wrapper
-
-
-# This doesn't really do anything since there are no find methods defined but
-# it's here anyways as an example for other test type classes.
-@find_method_register
-class TestFinderBase(object):
- """Base class for test finder class."""
-
- def __init__(self, *args, **kwargs):
- pass
diff --git a/atest-py2/test_finders/test_finder_utils.py b/atest-py2/test_finders/test_finder_utils.py
deleted file mode 100644
index 681d77a..0000000
--- a/atest-py2/test_finders/test_finder_utils.py
+++ /dev/null
@@ -1,984 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Utils for finder classes.
-"""
-
-from __future__ import print_function
-import logging
-import multiprocessing
-import os
-import pickle
-import re
-import subprocess
-import time
-import xml.etree.ElementTree as ET
-
-# pylint: disable=import-error
-import atest_decorator
-import atest_error
-import atest_enum
-import constants
-
-from metrics import metrics_utils
-
-# Helps find apk files listed in a test config (AndroidTest.xml) file.
-# Matches "filename.apk" in <option name="foo", value="filename.apk" />
-# We want to make sure we don't grab apks with paths in their name since we
-# assume the apk name is the build target.
-_APK_RE = re.compile(r'^[^/]+\.apk$', re.I)
-# RE for checking if TEST or TEST_F is in a cc file or not.
-_CC_CLASS_RE = re.compile(r'^[ ]*TEST(_F|_P)?[ ]*\(', re.I)
-# RE for checking if there exists one of the methods in java file.
-_JAVA_METHODS_PATTERN = r'.*[ ]+({0})\(.*'
-# RE for checking if there exists one of the methods in cc file.
-_CC_METHODS_PATTERN = r'^[ ]*TEST(_F|_P)?[ ]*\(.*,[ ]*({0})\).*'
-# Parse package name from the package declaration line of a java or a kotlin file.
-# Group matches "foo.bar" of line "package foo.bar;" or "package foo.bar"
-_PACKAGE_RE = re.compile(r'\s*package\s+(?P<package>[^(;|\s)]+)\s*', re.I)
-# Matches install paths in module_info to install location(host or device).
-_HOST_PATH_RE = re.compile(r'.*\/host\/.*', re.I)
-_DEVICE_PATH_RE = re.compile(r'.*\/target\/.*', re.I)
-
-# Explanation of FIND_REFERENCE_TYPEs:
-# ----------------------------------
-# 0. CLASS: Name of a java/kotlin class, usually file is named the same
-# (HostTest lives in HostTest.java or HostTest.kt)
-# 1. QUALIFIED_CLASS: Like CLASS but also contains the package in front like
-# com.android.tradefed.testtype.HostTest.
-# 2. PACKAGE: Name of a java package.
-# 3. INTEGRATION: XML file name in one of the 4 integration config directories.
-# 4. CC_CLASS: Name of a cc class.
-
-FIND_REFERENCE_TYPE = atest_enum.AtestEnum(['CLASS', 'QUALIFIED_CLASS',
- 'PACKAGE', 'INTEGRATION', 'CC_CLASS'])
-# Get cpu count.
-_CPU_COUNT = 0 if os.uname()[0] == 'Linux' else multiprocessing.cpu_count()
-
-# Unix find commands for searching for test files based on test type input.
-# Note: Find (unlike grep) exits with status 0 if nothing found.
-FIND_CMDS = {
- FIND_REFERENCE_TYPE.CLASS: r"find {0} {1} -type f"
- r"| egrep '.*/{2}\.(kt|java)$' || true",
- FIND_REFERENCE_TYPE.QUALIFIED_CLASS: r"find {0} {1} -type f"
- r"| egrep '.*{2}\.(kt|java)$' || true",
- FIND_REFERENCE_TYPE.PACKAGE: r"find {0} {1} -wholename "
- r"'*{2}' -type d -print",
- FIND_REFERENCE_TYPE.INTEGRATION: r"find {0} {1} -wholename "
- r"'*{2}.xml' -print",
- # Searching a test among files where the absolute paths contain *test*.
- # If users complain atest couldn't find a CC_CLASS, ask them to follow the
- # convention that the filename or dirname must contain *test*, where *test*
- # is case-insensitive.
- FIND_REFERENCE_TYPE.CC_CLASS: r"find {0} {1} -type f -print"
- r"| egrep -i '/*test.*\.(cc|cpp)$'"
- r"| xargs -P" + str(_CPU_COUNT) +
- r" egrep -sH '^[ ]*TEST(_F|_P)?[ ]*\({2}' || true"
-}
-
-# Map ref_type with its index file.
-FIND_INDEXES = {
- FIND_REFERENCE_TYPE.CLASS: constants.CLASS_INDEX,
- FIND_REFERENCE_TYPE.QUALIFIED_CLASS: constants.QCLASS_INDEX,
- FIND_REFERENCE_TYPE.PACKAGE: constants.PACKAGE_INDEX,
- FIND_REFERENCE_TYPE.INTEGRATION: constants.INT_INDEX,
- FIND_REFERENCE_TYPE.CC_CLASS: constants.CC_CLASS_INDEX
-}
-
-# XML parsing related constants.
-_COMPATIBILITY_PACKAGE_PREFIX = "com.android.compatibility"
-_CTS_JAR = "cts-tradefed"
-_XML_PUSH_DELIM = '->'
-_APK_SUFFIX = '.apk'
-# Setup script for device perf tests.
-_PERF_SETUP_LABEL = 'perf-setup.sh'
-
-# XML tags.
-_XML_NAME = 'name'
-_XML_VALUE = 'value'
-
-# VTS xml parsing constants.
-_VTS_TEST_MODULE = 'test-module-name'
-_VTS_MODULE = 'module-name'
-_VTS_BINARY_SRC = 'binary-test-source'
-_VTS_PUSH_GROUP = 'push-group'
-_VTS_PUSH = 'push'
-_VTS_BINARY_SRC_DELIM = '::'
-_VTS_PUSH_DIR = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP, ''),
- 'test', 'vts', 'tools', 'vts-tradefed', 'res',
- 'push_groups')
-_VTS_PUSH_SUFFIX = '.push'
-_VTS_BITNESS = 'append-bitness'
-_VTS_BITNESS_TRUE = 'true'
-_VTS_BITNESS_32 = '32'
-_VTS_BITNESS_64 = '64'
-_VTS_TEST_FILE = 'test-file-name'
-_VTS_APK = 'apk'
-# Matches 'DATA/target' in '_32bit::DATA/target'
-_VTS_BINARY_SRC_DELIM_RE = re.compile(r'.*::(?P<target>.*)$')
-_VTS_OUT_DATA_APP_PATH = 'DATA/app'
-
-# pylint: disable=inconsistent-return-statements
-def split_methods(user_input):
- """Split user input string into test reference and list of methods.
-
- Args:
- user_input: A string of the user's input.
- Examples:
- class_name
- class_name#method1,method2
- path
- path#method1,method2
- Returns:
- A tuple. First element is String of test ref and second element is
- a set of method name strings or empty list if no methods included.
- Exception:
- atest_error.TooManyMethodsError raised when input string is trying to
- specify too many methods in a single positional argument.
-
- Examples of unsupported input strings:
- module:class#method,class#method
- class1#method,class2#method
- path1#method,path2#method
- """
- parts = user_input.split('#')
- if len(parts) == 1:
- return parts[0], frozenset()
- elif len(parts) == 2:
- return parts[0], frozenset(parts[1].split(','))
- raise atest_error.TooManyMethodsError(
- 'Too many methods specified with # character in user input: %s.'
- '\n\nOnly one class#method combination supported per positional'
- ' argument. Multiple classes should be separated by spaces: '
- 'class#method class#method')
-
-
-# pylint: disable=inconsistent-return-statements
-def get_fully_qualified_class_name(test_path):
- """Parse the fully qualified name from the class java file.
-
- Args:
- test_path: A string of absolute path to the java class file.
-
- Returns:
- A string of the fully qualified class name.
-
- Raises:
- atest_error.MissingPackageName if no class name can be found.
- """
- with open(test_path) as class_file:
- for line in class_file:
- match = _PACKAGE_RE.match(line)
- if match:
- package = match.group('package')
- cls = os.path.splitext(os.path.split(test_path)[1])[0]
- return '%s.%s' % (package, cls)
- raise atest_error.MissingPackageNameError('%s: Test class java file'
- 'does not contain a package'
- 'name.'% test_path)
-
-
-def has_cc_class(test_path):
- """Find out if there is any test case in the cc file.
-
- Args:
- test_path: A string of absolute path to the cc file.
-
- Returns:
- Boolean: has cc class in test_path or not.
- """
- with open(test_path) as class_file:
- for line in class_file:
- match = _CC_CLASS_RE.match(line)
- if match:
- return True
- return False
-
-
-def get_package_name(file_name):
- """Parse the package name from a java file.
-
- Args:
- file_name: A string of the absolute path to the java file.
-
- Returns:
- A string of the package name or None
- """
- with open(file_name) as data:
- for line in data:
- match = _PACKAGE_RE.match(line)
- if match:
- return match.group('package')
-
-
-def has_method_in_file(test_path, methods):
- """Find out if there is at least one method in the file.
-
- Note: This method doesn't handle if method is in comment sections or not.
- If the file has any method(even in comment sections), it will return True.
-
- Args:
- test_path: A string of absolute path to the test file.
- methods: A set of method names.
-
- Returns:
- Boolean: there is at least one method in test_path.
- """
- if not os.path.isfile(test_path):
- return False
- methods_re = None
- if constants.JAVA_EXT_RE.match(test_path):
- methods_re = re.compile(_JAVA_METHODS_PATTERN.format(
- '|'.join([r'%s' % x for x in methods])))
- elif constants.CC_EXT_RE.match(test_path):
- methods_re = re.compile(_CC_METHODS_PATTERN.format(
- '|'.join([r'%s' % x for x in methods])))
- if methods_re:
- with open(test_path) as test_file:
- for line in test_file:
- match = re.match(methods_re, line)
- if match:
- return True
- return False
-
-
-def extract_test_path(output, methods=None):
- """Extract the test path from the output of a unix 'find' command.
-
- Example of find output for CLASS find cmd:
- /<some_root>/cts/tests/jank/src/android/jank/cts/ui/CtsDeviceJankUi.java
-
- Args:
- output: A string or list output of a unix 'find' command.
- methods: A set of method names.
-
- Returns:
- A list of the test paths or None if output is '' or None.
- """
- if not output:
- return None
- verified_tests = set()
- if isinstance(output, str):
- output = output.splitlines()
- for test in output:
- # compare CC_OUTPUT_RE with output
- match_obj = constants.CC_OUTPUT_RE.match(test)
- if match_obj:
- # cc/cpp
- fpath = match_obj.group('file_path')
- if not methods or match_obj.group('method_name') in methods:
- verified_tests.add(fpath)
- else:
- # TODO (b/138997521) - Atest checks has_method_in_file of a class
- # without traversing its parent classes. A workaround for this is
- # do not check has_method_in_file. Uncomment below when a solution
- # to it is applied.
- # java/kt
- #if not methods or has_method_in_file(test, methods):
- verified_tests.add(test)
- return extract_test_from_tests(list(verified_tests))
-
-
-def extract_test_from_tests(tests):
- """Extract the test path from the tests.
-
- Return the test to run from tests. If more than one option, prompt the user
- to select multiple ones. Supporting formats:
- - An integer. E.g. 0
- - Comma-separated integers. E.g. 1,3,5
- - A range of integers denoted by the starting integer separated from
- the end integer by a dash, '-'. E.g. 1-3
-
- Args:
- tests: A string list which contains multiple test paths.
-
- Returns:
- A string list of paths.
- """
- count = len(tests)
- if count <= 1:
- return tests if count else None
- mtests = set()
- try:
- numbered_list = ['%s: %s' % (i, t) for i, t in enumerate(tests)]
- numbered_list.append('%s: All' % count)
- print('Multiple tests found:\n{0}'.format('\n'.join(numbered_list)))
- test_indices = raw_input("Please enter numbers of test to use. "
- "If none of above option matched, keep "
- "searching for other possible tests."
- "\n(multiple selection is supported,"
- " e.g. '1' or '0,1' or '0-2'): ")
- for idx in re.sub(r'(\s)', '', test_indices).split(','):
- indices = idx.split('-')
- len_indices = len(indices)
- if len_indices > 0:
- start_index = min(int(indices[0]), int(indices[len_indices-1]))
- end_index = max(int(indices[0]), int(indices[len_indices-1]))
- # One of input is 'All', return all options.
- if start_index == count or end_index == count:
- return tests
- mtests.update(tests[start_index:(end_index+1)])
- except (ValueError, IndexError, AttributeError, TypeError) as err:
- logging.debug('%s', err)
- print('None of above option matched, keep searching for other'
- ' possible tests...')
- return list(mtests)
-
-
-@atest_decorator.static_var("cached_ignore_dirs", [])
-def _get_ignored_dirs():
- """Get ignore dirs in find command.
-
- Since we can't construct a single find cmd to find the target and
- filter-out the dir with .out-dir, .find-ignore and $OUT-DIR. We have
- to run the 1st find cmd to find these dirs. Then, we can use these
- results to generate the real find cmd.
-
- Return:
- A list of the ignore dirs.
- """
- out_dirs = _get_ignored_dirs.cached_ignore_dirs
- if not out_dirs:
- build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
- find_out_dir_cmd = (r'find %s -maxdepth 2 '
- r'-type f \( -name ".out-dir" -o -name '
- r'".find-ignore" \)') % build_top
- out_files = subprocess.check_output(find_out_dir_cmd, shell=True)
- # Get all dirs with .out-dir or .find-ignore
- if out_files:
- out_files = out_files.splitlines()
- for out_file in out_files:
- if out_file:
- out_dirs.append(os.path.dirname(out_file.strip()))
- # Get the out folder if user specified $OUT_DIR
- custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
- if custom_out_dir:
- user_out_dir = None
- if os.path.isabs(custom_out_dir):
- user_out_dir = custom_out_dir
- else:
- user_out_dir = os.path.join(build_top, custom_out_dir)
- # only ignore the out_dir when it under $ANDROID_BUILD_TOP
- if build_top in user_out_dir:
- if user_out_dir not in out_dirs:
- out_dirs.append(user_out_dir)
- _get_ignored_dirs.cached_ignore_dirs = out_dirs
- return out_dirs
-
-
-def _get_prune_cond_of_ignored_dirs():
- """Get the prune condition of ignore dirs.
-
- Generation a string of the prune condition in the find command.
- It will filter-out the dir with .out-dir, .find-ignore and $OUT-DIR.
- Because they are the out dirs, we don't have to find them.
-
- Return:
- A string of the prune condition of the ignore dirs.
- """
- out_dirs = _get_ignored_dirs()
- prune_cond = r'-type d \( -name ".*"'
- for out_dir in out_dirs:
- prune_cond += r' -o -path %s' % out_dir
- prune_cond += r' \) -prune -o'
- return prune_cond
-
-
-def run_find_cmd(ref_type, search_dir, target, methods=None):
- """Find a path to a target given a search dir and a target name.
-
- Args:
- ref_type: An AtestEnum of the reference type.
- search_dir: A string of the dirpath to search in.
- target: A string of what you're trying to find.
- methods: A set of method names.
-
- Return:
- A list of the path to the target.
- If the search_dir is inexistent, None will be returned.
- """
- # If module_info.json is outdated, finding in the search_dir can result in
- # raising exception. Return null immediately can guild users to run
- # --rebuild-module-info to resolve the problem.
- if not os.path.isdir(search_dir):
- logging.debug('\'%s\' does not exist!', search_dir)
- return None
- ref_name = FIND_REFERENCE_TYPE[ref_type]
- start = time.time()
- if os.path.isfile(FIND_INDEXES[ref_type]):
- _dict, out = {}, None
- with open(FIND_INDEXES[ref_type], 'rb') as index:
- try:
- _dict = pickle.load(index)
- except (IOError, EOFError, pickle.UnpicklingError) as err:
- logging.debug('Exception raised: %s', err)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.ACCESS_CACHE_FAILURE)
- os.remove(FIND_INDEXES[ref_type])
- if _dict.get(target):
- logging.debug('Found %s in %s', target, FIND_INDEXES[ref_type])
- out = [path for path in _dict.get(target) if search_dir in path]
- else:
- prune_cond = _get_prune_cond_of_ignored_dirs()
- if '.' in target:
- target = target.replace('.', '/')
- find_cmd = FIND_CMDS[ref_type].format(search_dir, prune_cond, target)
- logging.debug('Executing %s find cmd: %s', ref_name, find_cmd)
- out = subprocess.check_output(find_cmd, shell=True)
- logging.debug('%s find cmd out: %s', ref_name, out)
- logging.debug('%s find completed in %ss', ref_name, time.time() - start)
- return extract_test_path(out, methods)
-
-
-def find_class_file(search_dir, class_name, is_native_test=False, methods=None):
- """Find a path to a class file given a search dir and a class name.
-
- Args:
- search_dir: A string of the dirpath to search in.
- class_name: A string of the class to search for.
- is_native_test: A boolean variable of whether to search for a native
- test or not.
- methods: A set of method names.
-
- Return:
- A list of the path to the java/cc file.
- """
- if is_native_test:
- ref_type = FIND_REFERENCE_TYPE.CC_CLASS
- elif '.' in class_name:
- ref_type = FIND_REFERENCE_TYPE.QUALIFIED_CLASS
- else:
- ref_type = FIND_REFERENCE_TYPE.CLASS
- return run_find_cmd(ref_type, search_dir, class_name, methods)
-
-
-def is_equal_or_sub_dir(sub_dir, parent_dir):
- """Return True sub_dir is sub dir or equal to parent_dir.
-
- Args:
- sub_dir: A string of the sub directory path.
- parent_dir: A string of the parent directory path.
-
- Returns:
- A boolean of whether both are dirs and sub_dir is sub of parent_dir
- or is equal to parent_dir.
- """
- # avoid symlink issues with real path
- parent_dir = os.path.realpath(parent_dir)
- sub_dir = os.path.realpath(sub_dir)
- if not os.path.isdir(sub_dir) or not os.path.isdir(parent_dir):
- return False
- return os.path.commonprefix([sub_dir, parent_dir]) == parent_dir
-
-
-def find_parent_module_dir(root_dir, start_dir, module_info):
- """From current dir search up file tree until root dir for module dir.
-
- Args:
- root_dir: A string of the dir that is the parent of the start dir.
- start_dir: A string of the dir to start searching up from.
- module_info: ModuleInfo object containing module information from the
- build system.
-
- Returns:
- A string of the module dir relative to root, None if no Module Dir
- found. There may be multiple testable modules at this level.
-
- Exceptions:
- ValueError: Raised if cur_dir not dir or not subdir of root dir.
- """
- if not is_equal_or_sub_dir(start_dir, root_dir):
- raise ValueError('%s not in repo %s' % (start_dir, root_dir))
- auto_gen_dir = None
- current_dir = start_dir
- while current_dir != root_dir:
- # TODO (b/112904944) - migrate module_finder functions to here and
- # reuse them.
- rel_dir = os.path.relpath(current_dir, root_dir)
- # Check if actual config file here
- if os.path.isfile(os.path.join(current_dir, constants.MODULE_CONFIG)):
- return rel_dir
- # Check module_info if auto_gen config or robo (non-config) here
- for mod in module_info.path_to_module_info.get(rel_dir, []):
- if module_info.is_robolectric_module(mod):
- return rel_dir
- for test_config in mod.get(constants.MODULE_TEST_CONFIG, []):
- if os.path.isfile(os.path.join(root_dir, test_config)):
- return rel_dir
- if mod.get('auto_test_config'):
- auto_gen_dir = rel_dir
- # Don't return for auto_gen, keep checking for real config, because
- # common in cts for class in apk that's in hostside test setup.
- current_dir = os.path.dirname(current_dir)
- return auto_gen_dir
-
-
-def get_targets_from_xml(xml_file, module_info):
- """Retrieve build targets from the given xml.
-
- Just a helper func on top of get_targets_from_xml_root.
-
- Args:
- xml_file: abs path to xml file.
- module_info: ModuleInfo class used to verify targets are valid modules.
-
- Returns:
- A set of build targets based on the signals found in the xml file.
- """
- xml_root = ET.parse(xml_file).getroot()
- return get_targets_from_xml_root(xml_root, module_info)
-
-
-def _get_apk_target(apk_target):
- """Return the sanitized apk_target string from the xml.
-
- The apk_target string can be of 2 forms:
- - apk_target.apk
- - apk_target.apk->/path/to/install/apk_target.apk
-
- We want to return apk_target in both cases.
-
- Args:
- apk_target: String of target name to clean.
-
- Returns:
- String of apk_target to build.
- """
- apk = apk_target.split(_XML_PUSH_DELIM, 1)[0].strip()
- return apk[:-len(_APK_SUFFIX)]
-
-
-def _is_apk_target(name, value):
- """Return True if XML option is an apk target.
-
- We have some scenarios where an XML option can be an apk target:
- - value is an apk file.
- - name is a 'push' option where value holds the apk_file + other stuff.
-
- Args:
- name: String name of XML option.
- value: String value of the XML option.
-
- Returns:
- True if it's an apk target we should build, False otherwise.
- """
- if _APK_RE.match(value):
- return True
- if name == 'push' and value.endswith(_APK_SUFFIX):
- return True
- return False
-
-
-def get_targets_from_xml_root(xml_root, module_info):
- """Retrieve build targets from the given xml root.
-
- We're going to pull the following bits of info:
- - Parse any .apk files listed in the config file.
- - Parse option value for "test-module-name" (for vts10 tests).
- - Look for the perf script.
-
- Args:
- module_info: ModuleInfo class used to verify targets are valid modules.
- xml_root: ElementTree xml_root for us to look through.
-
- Returns:
- A set of build targets based on the signals found in the xml file.
- """
- targets = set()
- option_tags = xml_root.findall('.//option')
- for tag in option_tags:
- target_to_add = None
- name = tag.attrib[_XML_NAME].strip()
- value = tag.attrib[_XML_VALUE].strip()
- if _is_apk_target(name, value):
- target_to_add = _get_apk_target(value)
- elif _PERF_SETUP_LABEL in value:
- targets.add(_PERF_SETUP_LABEL)
- continue
-
- # Let's make sure we can actually build the target.
- if target_to_add and module_info.is_module(target_to_add):
- targets.add(target_to_add)
- elif target_to_add:
- logging.warning('Build target (%s) not present in module info, '
- 'skipping build', target_to_add)
-
- # TODO (b/70813166): Remove this lookup once all runtime dependencies
- # can be listed as a build dependencies or are in the base test harness.
- nodes_with_class = xml_root.findall(".//*[@class]")
- for class_attr in nodes_with_class:
- fqcn = class_attr.attrib['class'].strip()
- if fqcn.startswith(_COMPATIBILITY_PACKAGE_PREFIX):
- targets.add(_CTS_JAR)
- logging.debug('Targets found in config file: %s', targets)
- return targets
-
-
-def _get_vts_push_group_targets(push_file, rel_out_dir):
- """Retrieve vts10 push group build targets.
-
- A push group file is a file that list out test dependencies and other push
- group files. Go through the push file and gather all the test deps we need.
-
- Args:
- push_file: Name of the push file in the VTS
- rel_out_dir: Abs path to the out dir to help create vts10 build targets.
-
- Returns:
- Set of string which represent build targets.
- """
- targets = set()
- full_push_file_path = os.path.join(_VTS_PUSH_DIR, push_file)
- # pylint: disable=invalid-name
- with open(full_push_file_path) as f:
- for line in f:
- target = line.strip()
- # Skip empty lines.
- if not target:
- continue
-
- # This is a push file, get the targets from it.
- if target.endswith(_VTS_PUSH_SUFFIX):
- targets |= _get_vts_push_group_targets(line.strip(),
- rel_out_dir)
- continue
- sanitized_target = target.split(_XML_PUSH_DELIM, 1)[0].strip()
- targets.add(os.path.join(rel_out_dir, sanitized_target))
- return targets
-
-
-def _specified_bitness(xml_root):
- """Check if the xml file contains the option append-bitness.
-
- Args:
- xml_root: abs path to xml file.
-
- Returns:
- True if xml specifies to append-bitness, False otherwise.
- """
- option_tags = xml_root.findall('.//option')
- for tag in option_tags:
- value = tag.attrib[_XML_VALUE].strip()
- name = tag.attrib[_XML_NAME].strip()
- if name == _VTS_BITNESS and value == _VTS_BITNESS_TRUE:
- return True
- return False
-
-
-def _get_vts_binary_src_target(value, rel_out_dir):
- """Parse out the vts10 binary src target.
-
- The value can be in the following pattern:
- - {_32bit,_64bit,_IPC32_32bit}::DATA/target (DATA/target)
- - DATA/target->/data/target (DATA/target)
- - out/host/linx-x86/bin/VtsSecuritySelinuxPolicyHostTest (the string as
- is)
-
- Args:
- value: String of the XML option value to parse.
- rel_out_dir: String path of out dir to prepend to target when required.
-
- Returns:
- String of the target to build.
- """
- # We'll assume right off the bat we can use the value as is and modify it if
- # necessary, e.g. out/host/linux-x86/bin...
- target = value
- # _32bit::DATA/target
- match = _VTS_BINARY_SRC_DELIM_RE.match(value)
- if match:
- target = os.path.join(rel_out_dir, match.group('target'))
- # DATA/target->/data/target
- elif _XML_PUSH_DELIM in value:
- target = value.split(_XML_PUSH_DELIM, 1)[0].strip()
- target = os.path.join(rel_out_dir, target)
- return target
-
-
-def get_plans_from_vts_xml(xml_file):
- """Get configs which are included by xml_file.
-
- We're looking for option(include) to get all dependency plan configs.
-
- Args:
- xml_file: Absolute path to xml file.
-
- Returns:
- A set of plan config paths which are depended by xml_file.
- """
- if not os.path.exists(xml_file):
- raise atest_error.XmlNotExistError('%s: The xml file does'
- 'not exist' % xml_file)
- plans = set()
- xml_root = ET.parse(xml_file).getroot()
- plans.add(xml_file)
- option_tags = xml_root.findall('.//include')
- if not option_tags:
- return plans
- # Currently, all vts10 xmls live in the same dir :
- # https://android.googlesource.com/platform/test/vts/+/master/tools/vts-tradefed/res/config/
- # If the vts10 plans start using folders to organize the plans, the logic here
- # should be changed.
- xml_dir = os.path.dirname(xml_file)
- for tag in option_tags:
- name = tag.attrib[_XML_NAME].strip()
- plans |= get_plans_from_vts_xml(os.path.join(xml_dir, name + ".xml"))
- return plans
-
-
-def get_targets_from_vts_xml(xml_file, rel_out_dir, module_info):
- """Parse a vts10 xml for test dependencies we need to build.
-
- We have a separate vts10 parsing function because we make a big assumption
- on the targets (the way they're formatted and what they represent) and we
- also create these build targets in a very special manner as well.
- The 6 options we're looking for are:
- - binary-test-source
- - push-group
- - push
- - test-module-name
- - test-file-name
- - apk
-
- Args:
- module_info: ModuleInfo class used to verify targets are valid modules.
- rel_out_dir: Abs path to the out dir to help create vts10 build targets.
- xml_file: abs path to xml file.
-
- Returns:
- A set of build targets based on the signals found in the xml file.
- """
- xml_root = ET.parse(xml_file).getroot()
- targets = set()
- option_tags = xml_root.findall('.//option')
- for tag in option_tags:
- value = tag.attrib[_XML_VALUE].strip()
- name = tag.attrib[_XML_NAME].strip()
- if name in [_VTS_TEST_MODULE, _VTS_MODULE]:
- if module_info.is_module(value):
- targets.add(value)
- else:
- logging.warning('vts10 test module (%s) not present in module '
- 'info, skipping build', value)
- elif name == _VTS_BINARY_SRC:
- targets.add(_get_vts_binary_src_target(value, rel_out_dir))
- elif name == _VTS_PUSH_GROUP:
- # Look up the push file and parse out build artifacts (as well as
- # other push group files to parse).
- targets |= _get_vts_push_group_targets(value, rel_out_dir)
- elif name == _VTS_PUSH:
- # Parse out the build artifact directly.
- push_target = value.split(_XML_PUSH_DELIM, 1)[0].strip()
- # If the config specified append-bitness, append the bits suffixes
- # to the target.
- if _specified_bitness(xml_root):
- targets.add(os.path.join(rel_out_dir, push_target + _VTS_BITNESS_32))
- targets.add(os.path.join(rel_out_dir, push_target + _VTS_BITNESS_64))
- else:
- targets.add(os.path.join(rel_out_dir, push_target))
- elif name == _VTS_TEST_FILE:
- # The _VTS_TEST_FILE values can be set in 2 possible ways:
- # 1. test_file.apk
- # 2. DATA/app/test_file/test_file.apk
- # We'll assume that test_file.apk (#1) is in an expected path (but
- # that is not true, see b/76158619) and create the full path for it
- # and then append the _VTS_TEST_FILE value to targets to build.
- target = os.path.join(rel_out_dir, value)
- # If value is just an APK, specify the path that we expect it to be in
- # e.g. out/host/linux-x86/vts10/android-vts10/testcases/DATA/app/test_file/test_file.apk
- head, _ = os.path.split(value)
- if not head:
- target = os.path.join(rel_out_dir, _VTS_OUT_DATA_APP_PATH,
- _get_apk_target(value), value)
- targets.add(target)
- elif name == _VTS_APK:
- targets.add(os.path.join(rel_out_dir, value))
- logging.debug('Targets found in config file: %s', targets)
- return targets
-
-
-def get_dir_path_and_filename(path):
- """Return tuple of dir and file name from given path.
-
- Args:
- path: String of path to break up.
-
- Returns:
- Tuple of (dir, file) paths.
- """
- if os.path.isfile(path):
- dir_path, file_path = os.path.split(path)
- else:
- dir_path, file_path = path, None
- return dir_path, file_path
-
-
-def get_cc_filter(class_name, methods):
- """Get the cc filter.
-
- Args:
- class_name: class name of the cc test.
- methods: a list of method names.
-
- Returns:
- A formatted string for cc filter.
- Ex: "class1.method1:class1.method2" or "class1.*"
- """
- if methods:
- return ":".join(["%s.%s" % (class_name, x) for x in methods])
- return "%s.*" % class_name
-
-
-def search_integration_dirs(name, int_dirs):
- """Search integration dirs for name and return full path.
-
- Args:
- name: A string of plan name needed to be found.
- int_dirs: A list of path needed to be searched.
-
- Returns:
- A list of the test path.
- Ask user to select if multiple tests are found.
- None if no matched test found.
- """
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- test_files = []
- for integration_dir in int_dirs:
- abs_path = os.path.join(root_dir, integration_dir)
- test_paths = run_find_cmd(FIND_REFERENCE_TYPE.INTEGRATION, abs_path,
- name)
- if test_paths:
- test_files.extend(test_paths)
- return extract_test_from_tests(test_files)
-
-
-def get_int_dir_from_path(path, int_dirs):
- """Search integration dirs for the given path and return path of dir.
-
- Args:
- path: A string of path needed to be found.
- int_dirs: A list of path needed to be searched.
-
- Returns:
- A string of the test dir. None if no matched path found.
- """
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- if not os.path.exists(path):
- return None
- dir_path, file_name = get_dir_path_and_filename(path)
- int_dir = None
- for possible_dir in int_dirs:
- abs_int_dir = os.path.join(root_dir, possible_dir)
- if is_equal_or_sub_dir(dir_path, abs_int_dir):
- int_dir = abs_int_dir
- break
- if not file_name:
- logging.warn('Found dir (%s) matching input (%s).'
- ' Referencing an entire Integration/Suite dir'
- ' is not supported. If you are trying to reference'
- ' a test by its path, please input the path to'
- ' the integration/suite config file itself.',
- int_dir, path)
- return None
- return int_dir
-
-
-def get_install_locations(installed_paths):
- """Get install locations from installed paths.
-
- Args:
- installed_paths: List of installed_paths from module_info.
-
- Returns:
- Set of install locations from module_info installed_paths. e.g.
- set(['host', 'device'])
- """
- install_locations = set()
- for path in installed_paths:
- if _HOST_PATH_RE.match(path):
- install_locations.add(constants.DEVICELESS_TEST)
- elif _DEVICE_PATH_RE.match(path):
- install_locations.add(constants.DEVICE_TEST)
- return install_locations
-
-
-def get_levenshtein_distance(test_name, module_name, dir_costs=constants.COST_TYPO):
- """Return an edit distance between test_name and module_name.
-
- Levenshtein Distance has 3 actions: delete, insert and replace.
- dis_costs makes each action weigh differently.
-
- Args:
- test_name: A keyword from the users.
- module_name: A testable module name.
- dir_costs: A tuple which contains 3 integer, where dir represents
- Deletion, Insertion and Replacement respectively.
- For guessing typos: (1, 1, 1) gives the best result.
- For searching keywords, (8, 1, 5) gives the best result.
-
- Returns:
- An edit distance integer between test_name and module_name.
- """
- rows = len(test_name) + 1
- cols = len(module_name) + 1
- deletion, insertion, replacement = dir_costs
-
- # Creating a Dynamic Programming Matrix and weighting accordingly.
- dp_matrix = [[0 for _ in range(cols)] for _ in range(rows)]
- # Weigh rows/deletion
- for row in range(1, rows):
- dp_matrix[row][0] = row * deletion
- # Weigh cols/insertion
- for col in range(1, cols):
- dp_matrix[0][col] = col * insertion
- # The core logic of LD
- for col in range(1, cols):
- for row in range(1, rows):
- if test_name[row-1] == module_name[col-1]:
- cost = 0
- else:
- cost = replacement
- dp_matrix[row][col] = min(dp_matrix[row-1][col] + deletion,
- dp_matrix[row][col-1] + insertion,
- dp_matrix[row-1][col-1] + cost)
-
- return dp_matrix[row][col]
-
-
-def is_test_from_kernel_xml(xml_file, test_name):
- """Check if test defined in xml_file.
-
- A kernel test can be defined like:
- <option name="test-command-line" key="test_class_1" value="command 1" />
- where key is the name of test class and method of the runner. This method
- returns True if the test_name was defined in the given xml_file.
-
- Args:
- xml_file: Absolute path to xml file.
- test_name: test_name want to find.
-
- Returns:
- True if test_name in xml_file, False otherwise.
- """
- if not os.path.exists(xml_file):
- raise atest_error.XmlNotExistError('%s: The xml file does'
- 'not exist' % xml_file)
- xml_root = ET.parse(xml_file).getroot()
- option_tags = xml_root.findall('.//option')
- for option_tag in option_tags:
- if option_tag.attrib['name'] == 'test-command-line':
- if option_tag.attrib['key'] == test_name:
- return True
- return False
diff --git a/atest-py2/test_finders/test_finder_utils_unittest.py b/atest-py2/test_finders/test_finder_utils_unittest.py
deleted file mode 100755
index d35f25a..0000000
--- a/atest-py2/test_finders/test_finder_utils_unittest.py
+++ /dev/null
@@ -1,584 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for test_finder_utils."""
-
-import os
-import unittest
-import mock
-
-# pylint: disable=import-error
-import atest_error
-import constants
-import module_info
-import unittest_constants as uc
-import unittest_utils
-from test_finders import test_finder_utils
-
-CLASS_DIR = 'foo/bar/jank/src/android/jank/cts/ui'
-OTHER_DIR = 'other/dir/'
-OTHER_CLASS_NAME = 'test.java'
-CLASS_NAME3 = 'test2'
-INT_DIR1 = os.path.join(uc.TEST_DATA_DIR, 'integration_dir_testing/int_dir1')
-INT_DIR2 = os.path.join(uc.TEST_DATA_DIR, 'integration_dir_testing/int_dir2')
-INT_FILE_NAME = 'int_dir_testing'
-FIND_TWO = uc.ROOT + 'other/dir/test.java\n' + uc.FIND_ONE
-FIND_THREE = '/a/b/c.java\n/d/e/f.java\n/g/h/i.java'
-FIND_THREE_LIST = ['/a/b/c.java', '/d/e/f.java', '/g/h/i.java']
-VTS_XML = 'VtsAndroidTest.xml'
-VTS_BITNESS_XML = 'VtsBitnessAndroidTest.xml'
-VTS_PUSH_DIR = 'vts_push_files'
-VTS_PLAN_DIR = 'vts_plan_files'
-VTS_XML_TARGETS = {'VtsTestName',
- 'DATA/nativetest/vts_treble_vintf_test/vts_treble_vintf_test',
- 'DATA/nativetest64/vts_treble_vintf_test/vts_treble_vintf_test',
- 'DATA/lib/libhidl-gen-hash.so',
- 'DATA/lib64/libhidl-gen-hash.so',
- 'hal-hidl-hash/frameworks/hardware/interfaces/current.txt',
- 'hal-hidl-hash/hardware/interfaces/current.txt',
- 'hal-hidl-hash/system/hardware/interfaces/current.txt',
- 'hal-hidl-hash/system/libhidl/transport/current.txt',
- 'target_with_delim',
- 'out/dir/target',
- 'push_file1_target1',
- 'push_file1_target2',
- 'push_file2_target1',
- 'push_file2_target2',
- 'CtsDeviceInfo.apk',
- 'DATA/app/sl4a/sl4a.apk'}
-VTS_PLAN_TARGETS = {os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-staging-default.xml'),
- os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-aa.xml'),
- os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-bb.xml'),
- os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-cc.xml'),
- os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-dd.xml')}
-XML_TARGETS = {'CtsJankDeviceTestCases', 'perf-setup.sh', 'cts-tradefed',
- 'GtsEmptyTestApp'}
-PATH_TO_MODULE_INFO_WITH_AUTOGEN = {
- 'foo/bar/jank' : [{'auto_test_config' : True}]}
-PATH_TO_MODULE_INFO_WITH_MULTI_AUTOGEN = {
- 'foo/bar/jank' : [{'auto_test_config' : True},
- {'auto_test_config' : True}]}
-PATH_TO_MODULE_INFO_WITH_MULTI_AUTOGEN_AND_ROBO = {
- 'foo/bar' : [{'auto_test_config' : True},
- {'auto_test_config' : True}],
- 'foo/bar/jank': [{constants.MODULE_CLASS : [constants.MODULE_CLASS_ROBOLECTRIC]}]}
-
-#pylint: disable=protected-access
-class TestFinderUtilsUnittests(unittest.TestCase):
- """Unit tests for test_finder_utils.py"""
-
- def test_split_methods(self):
- """Test _split_methods method."""
- # Class
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('Class.Name'),
- ('Class.Name', set()))
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('Class.Name#Method'),
- ('Class.Name', {'Method'}))
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('Class.Name#Method,Method2'),
- ('Class.Name', {'Method', 'Method2'}))
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('Class.Name#Method,Method2'),
- ('Class.Name', {'Method', 'Method2'}))
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('Class.Name#Method,Method2'),
- ('Class.Name', {'Method', 'Method2'}))
- self.assertRaises(
- atest_error.TooManyMethodsError, test_finder_utils.split_methods,
- 'class.name#Method,class.name.2#method')
- # Path
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('foo/bar/class.java'),
- ('foo/bar/class.java', set()))
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.split_methods('foo/bar/class.java#Method'),
- ('foo/bar/class.java', {'Method'}))
-
- @mock.patch.object(test_finder_utils, 'has_method_in_file',
- return_value=False)
- @mock.patch('__builtin__.raw_input', return_value='1')
- def test_extract_test_path(self, _, has_method):
- """Test extract_test_dir method."""
- paths = [os.path.join(uc.ROOT, CLASS_DIR, uc.CLASS_NAME + '.java')]
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(uc.FIND_ONE), paths)
- paths = [os.path.join(uc.ROOT, CLASS_DIR, uc.CLASS_NAME + '.java')]
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(FIND_TWO), paths)
- has_method.return_value = True
- paths = [os.path.join(uc.ROOT, CLASS_DIR, uc.CLASS_NAME + '.java')]
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(uc.FIND_ONE, 'method'), paths)
-
- def test_has_method_in_file(self):
- """Test has_method_in_file method."""
- test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
- 'hello_world_test.cc')
- self.assertTrue(test_finder_utils.has_method_in_file(
- test_path, frozenset(['PrintHelloWorld'])))
- self.assertFalse(test_finder_utils.has_method_in_file(
- test_path, frozenset(['PrintHelloWorld1'])))
- test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
- 'hello_world_test.java')
- self.assertTrue(test_finder_utils.has_method_in_file(
- test_path, frozenset(['testMethod1'])))
- test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
- 'hello_world_test.java')
- self.assertTrue(test_finder_utils.has_method_in_file(
- test_path, frozenset(['testMethod', 'testMethod2'])))
- test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
- 'hello_world_test.java')
- self.assertFalse(test_finder_utils.has_method_in_file(
- test_path, frozenset(['testMethod'])))
-
- @mock.patch('__builtin__.raw_input', return_value='1')
- def test_extract_test_from_tests(self, mock_input):
- """Test method extract_test_from_tests method."""
- tests = []
- self.assertEquals(test_finder_utils.extract_test_from_tests(tests), None)
- paths = [os.path.join(uc.ROOT, CLASS_DIR, uc.CLASS_NAME + '.java')]
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(uc.FIND_ONE), paths)
- paths = [os.path.join(uc.ROOT, OTHER_DIR, OTHER_CLASS_NAME)]
- mock_input.return_value = '0'
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(FIND_TWO), paths)
- # Test inputing out-of-range integer or a string
- mock_input.return_value = '100'
- self.assertEquals(test_finder_utils.extract_test_from_tests(
- uc.CLASS_NAME), [])
- mock_input.return_value = 'lOO'
- self.assertEquals(test_finder_utils.extract_test_from_tests(
- uc.CLASS_NAME), [])
-
- @mock.patch('__builtin__.raw_input', return_value='1')
- def test_extract_test_from_multiselect(self, mock_input):
- """Test method extract_test_from_tests method."""
- # selecting 'All'
- paths = ['/a/b/c.java', '/d/e/f.java', '/g/h/i.java']
- mock_input.return_value = '3'
- unittest_utils.assert_strict_equal(
- self, sorted(test_finder_utils.extract_test_from_tests(
- FIND_THREE_LIST)), sorted(paths))
- # multi-select
- paths = ['/a/b/c.java', '/g/h/i.java']
- mock_input.return_value = '0,2'
- unittest_utils.assert_strict_equal(
- self, sorted(test_finder_utils.extract_test_from_tests(
- FIND_THREE_LIST)), sorted(paths))
- # selecting a range
- paths = ['/d/e/f.java', '/g/h/i.java']
- mock_input.return_value = '1-2'
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_from_tests(FIND_THREE_LIST), paths)
- # mixed formats
- paths = ['/a/b/c.java', '/d/e/f.java', '/g/h/i.java']
- mock_input.return_value = '0,1-2'
- unittest_utils.assert_strict_equal(
- self, sorted(test_finder_utils.extract_test_from_tests(
- FIND_THREE_LIST)), sorted(paths))
- # input unsupported formats, return empty
- paths = []
- mock_input.return_value = '?/#'
- unittest_utils.assert_strict_equal(
- self, test_finder_utils.extract_test_path(FIND_THREE), paths)
-
- @mock.patch('os.path.isdir')
- def test_is_equal_or_sub_dir(self, mock_isdir):
- """Test is_equal_or_sub_dir method."""
- self.assertTrue(test_finder_utils.is_equal_or_sub_dir('/a/b/c', '/'))
- self.assertTrue(test_finder_utils.is_equal_or_sub_dir('/a/b/c', '/a'))
- self.assertTrue(test_finder_utils.is_equal_or_sub_dir('/a/b/c',
- '/a/b/c'))
- self.assertFalse(test_finder_utils.is_equal_or_sub_dir('/a/b',
- '/a/b/c'))
- self.assertFalse(test_finder_utils.is_equal_or_sub_dir('/a', '/f'))
- mock_isdir.return_value = False
- self.assertFalse(test_finder_utils.is_equal_or_sub_dir('/a/b', '/a'))
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile',
- side_effect=unittest_utils.isfile_side_effect)
- def test_find_parent_module_dir(self, _isfile, _isdir):
- """Test _find_parent_module_dir method."""
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.path_to_module_info = {}
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- uc.MODULE_DIR)
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=False)
- def test_find_parent_module_dir_with_autogen_config(self, _isfile, _isdir):
- """Test _find_parent_module_dir method."""
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.path_to_module_info = PATH_TO_MODULE_INFO_WITH_AUTOGEN
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- uc.MODULE_DIR)
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', side_effect=[False] * 5 + [True])
- def test_find_parent_module_dir_with_autogen_subconfig(self, _isfile, _isdir):
- """Test _find_parent_module_dir method.
-
- This case is testing when the auto generated config is in a
- sub-directory of a larger test that contains a test config in a parent
- directory.
- """
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.path_to_module_info = (
- PATH_TO_MODULE_INFO_WITH_MULTI_AUTOGEN)
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- uc.MODULE_DIR)
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=False)
- def test_find_parent_module_dir_with_multi_autogens(self, _isfile, _isdir):
- """Test _find_parent_module_dir method.
-
- This case returns folders with multiple autogenerated configs defined.
- """
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.path_to_module_info = (
- PATH_TO_MODULE_INFO_WITH_MULTI_AUTOGEN)
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- uc.MODULE_DIR)
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=False)
- def test_find_parent_module_dir_with_robo_and_autogens(self, _isfile,
- _isdir):
- """Test _find_parent_module_dir method.
-
- This case returns folders with multiple autogenerated configs defined
- with a Robo test above them, which is the expected result.
- """
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.path_to_module_info = (
- PATH_TO_MODULE_INFO_WITH_MULTI_AUTOGEN_AND_ROBO)
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- uc.MODULE_DIR)
-
-
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=False)
- def test_find_parent_module_dir_robo(self, _isfile, _isdir):
- """Test _find_parent_module_dir method.
-
- Make sure we behave as expected when we encounter a robo module path.
- """
- abs_class_dir = '/%s' % CLASS_DIR
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.is_robolectric_module.return_value = True
- rel_class_dir_path = os.path.relpath(abs_class_dir, uc.ROOT)
- mock_module_info.path_to_module_info = {rel_class_dir_path: [{}]}
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.find_parent_module_dir(uc.ROOT,
- abs_class_dir,
- mock_module_info),
- rel_class_dir_path)
-
- def test_get_targets_from_xml(self):
- """Test get_targets_from_xml method."""
- # Mocking Etree is near impossible, so use a real file, but mocking
- # ModuleInfo is still fine. Just have it return False when it finds a
- # module that states it's not a module.
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.is_module.side_effect = lambda module: (
- not module == 'is_not_module')
- xml_file = os.path.join(uc.TEST_DATA_DIR, constants.MODULE_CONFIG)
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.get_targets_from_xml(xml_file, mock_module_info),
- XML_TARGETS)
-
- @mock.patch.object(test_finder_utils, '_VTS_PUSH_DIR',
- os.path.join(uc.TEST_DATA_DIR, VTS_PUSH_DIR))
- def test_get_targets_from_vts_xml(self):
- """Test get_targets_from_xml method."""
- # Mocking Etree is near impossible, so use a real file, but mock out
- # ModuleInfo,
- mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.is_module.return_value = True
- xml_file = os.path.join(uc.TEST_DATA_DIR, VTS_XML)
- unittest_utils.assert_strict_equal(
- self,
- test_finder_utils.get_targets_from_vts_xml(xml_file, '',
- mock_module_info),
- VTS_XML_TARGETS)
-
- @mock.patch('subprocess.check_output')
- def test_get_ignored_dirs(self, _mock_check_output):
- """Test _get_ignored_dirs method."""
-
- # Clean cached value for test.
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
-
- build_top = '/a/b'
- _mock_check_output.return_value = ('/a/b/c/.find-ignore\n'
- '/a/b/out/.out-dir\n'
- '/a/b/d/.out-dir\n\n')
- # Case 1: $OUT_DIR = ''. No customized out dir.
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: ''}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 2: $OUT_DIR = 'out2'
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'out2'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d', '/a/b/out2']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 3: The $OUT_DIR is abs dir but not under $ANDROID_BUILD_TOP
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: '/x/y/e/g'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 4: The $OUT_DIR is abs dir and under $ANDROID_BUILD_TOP
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: '/a/b/e/g'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d', '/a/b/e/g']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 5: There is a file of '.out-dir' under $OUT_DIR.
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'out'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 6: Testing cache. All of the changes are useless.
- _mock_check_output.return_value = ('/a/b/X/.find-ignore\n'
- '/a/b/YY/.out-dir\n'
- '/a/b/d/.out-dir\n\n')
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'new'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- cached_answer = ['/a/b/c', '/a/b/out', '/a/b/d']
- none_cached_answer = ['/a/b/X', '/a/b/YY', '/a/b/d', 'a/b/new']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, cached_answer)
- self.assertNotEqual(ignore_dirs, none_cached_answer)
-
- @mock.patch('__builtin__.raw_input', return_value='0')
- def test_search_integration_dirs(self, mock_input):
- """Test search_integration_dirs."""
- mock_input.return_value = '0'
- paths = [os.path.join(uc.ROOT, INT_DIR1, INT_FILE_NAME+'.xml')]
- int_dirs = [INT_DIR1]
- test_result = test_finder_utils.search_integration_dirs(INT_FILE_NAME, int_dirs)
- unittest_utils.assert_strict_equal(self, test_result, paths)
- int_dirs = [INT_DIR1, INT_DIR2]
- test_result = test_finder_utils.search_integration_dirs(INT_FILE_NAME, int_dirs)
- unittest_utils.assert_strict_equal(self, test_result, paths)
-
- @mock.patch('os.path.isfile', return_value=False)
- @mock.patch('os.environ.get', return_value=uc.TEST_CONFIG_DATA_DIR)
- @mock.patch('__builtin__.raw_input', return_value='0')
- # pylint: disable=too-many-statements
- def test_find_class_file(self, mock_input, _mock_env, _mock_isfile):
- """Test find_class_file."""
- # 1. Java class(find).
- java_tmp_test_result = []
- mock_input.return_value = '0'
- java_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_TESTCASE_JAVA + '.java')
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_JAVA))
- mock_input.return_value = '1'
- kt_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_TESTCASE_JAVA + '.kt')
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_JAVA))
- self.assertTrue(java_class in java_tmp_test_result)
- self.assertTrue(kt_class in java_tmp_test_result)
-
- # 2. Java class(read index).
- del java_tmp_test_result[:]
- mock_input.return_value = '0'
- _mock_isfile = True
- test_finder_utils.FIND_INDEXES['CLASS'] = uc.CLASS_INDEX
- java_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_TESTCASE_JAVA + '.java')
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_JAVA))
- mock_input.return_value = '1'
- kt_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_TESTCASE_JAVA + '.kt')
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_JAVA))
- self.assertTrue(java_class in java_tmp_test_result)
- self.assertTrue(kt_class in java_tmp_test_result)
-
- # 3. Qualified Java class(find).
- del java_tmp_test_result[:]
- mock_input.return_value = '0'
- _mock_isfile = False
- java_qualified_class = '{0}.{1}'.format(uc.FIND_PATH_FOLDER, uc.FIND_PATH_TESTCASE_JAVA)
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- java_qualified_class))
- mock_input.return_value = '1'
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- java_qualified_class))
- self.assertTrue(java_class in java_tmp_test_result)
- self.assertTrue(kt_class in java_tmp_test_result)
-
- # 4. Qualified Java class(read index).
- del java_tmp_test_result[:]
- mock_input.return_value = '0'
- _mock_isfile = True
- test_finder_utils.FIND_INDEXES['QUALIFIED_CLASS'] = uc.QCLASS_INDEX
- java_qualified_class = '{0}.{1}'.format(uc.FIND_PATH_FOLDER, uc.FIND_PATH_TESTCASE_JAVA)
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- java_qualified_class))
- mock_input.return_value = '1'
- java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- java_qualified_class))
- self.assertTrue(java_class in java_tmp_test_result)
- self.assertTrue(kt_class in java_tmp_test_result)
-
- # 5. CC class(find).
- cc_tmp_test_result = []
- _mock_isfile = False
- mock_input.return_value = '0'
- cpp_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_FILENAME_CC + '.cpp')
- cc_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_CC,
- True))
- mock_input.return_value = '1'
- cc_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_FILENAME_CC + '.cc')
- cc_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_CC,
- True))
- self.assertTrue(cpp_class in cc_tmp_test_result)
- self.assertTrue(cc_class in cc_tmp_test_result)
-
- # 6. CC class(read index).
- del cc_tmp_test_result[:]
- mock_input.return_value = '0'
- _mock_isfile = True
- test_finder_utils.FIND_INDEXES['CC_CLASS'] = uc.CC_CLASS_INDEX
- cpp_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_FILENAME_CC + '.cpp')
- cc_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_CC,
- True))
- mock_input.return_value = '1'
- cc_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_FILENAME_CC + '.cc')
- cc_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
- uc.FIND_PATH_TESTCASE_CC,
- True))
- self.assertTrue(cpp_class in cc_tmp_test_result)
- self.assertTrue(cc_class in cc_tmp_test_result)
-
- @mock.patch('__builtin__.raw_input', return_value='0')
- @mock.patch.object(test_finder_utils, 'get_dir_path_and_filename')
- @mock.patch('os.path.exists', return_value=True)
- def test_get_int_dir_from_path(self, _exists, _find, mock_input):
- """Test get_int_dir_from_path."""
- mock_input.return_value = '0'
- int_dirs = [INT_DIR1]
- path = os.path.join(uc.ROOT, INT_DIR1, INT_FILE_NAME+'.xml')
- _find.return_value = (INT_DIR1, INT_FILE_NAME+'.xml')
- test_result = test_finder_utils.get_int_dir_from_path(path, int_dirs)
- unittest_utils.assert_strict_equal(self, test_result, INT_DIR1)
- _find.return_value = (INT_DIR1, None)
- test_result = test_finder_utils.get_int_dir_from_path(path, int_dirs)
- unittest_utils.assert_strict_equal(self, test_result, None)
- int_dirs = [INT_DIR1, INT_DIR2]
- _find.return_value = (INT_DIR1, INT_FILE_NAME+'.xml')
- test_result = test_finder_utils.get_int_dir_from_path(path, int_dirs)
- unittest_utils.assert_strict_equal(self, test_result, INT_DIR1)
-
- def test_get_install_locations(self):
- """Test get_install_locations."""
- host_installed_paths = ["out/host/a/b"]
- host_expect = set(['host'])
- self.assertEqual(test_finder_utils.get_install_locations(host_installed_paths),
- host_expect)
- device_installed_paths = ["out/target/c/d"]
- device_expect = set(['device'])
- self.assertEqual(test_finder_utils.get_install_locations(device_installed_paths),
- device_expect)
- both_installed_paths = ["out/host/e", "out/target/f"]
- both_expect = set(['host', 'device'])
- self.assertEqual(test_finder_utils.get_install_locations(both_installed_paths),
- both_expect)
- no_installed_paths = []
- no_expect = set()
- self.assertEqual(test_finder_utils.get_install_locations(no_installed_paths),
- no_expect)
-
- def test_get_plans_from_vts_xml(self):
- """Test get_plans_from_vts_xml method."""
- xml_path = os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'vts-staging-default.xml')
- self.assertEqual(
- test_finder_utils.get_plans_from_vts_xml(xml_path),
- VTS_PLAN_TARGETS)
- xml_path = os.path.join(uc.TEST_DATA_DIR, VTS_PLAN_DIR, 'NotExist.xml')
- self.assertRaises(atest_error.XmlNotExistError,
- test_finder_utils.get_plans_from_vts_xml, xml_path)
-
- def test_get_levenshtein_distance(self):
- """Test get_levenshetine distance module correctly returns distance."""
- self.assertEqual(test_finder_utils.get_levenshtein_distance(uc.MOD1, uc.FUZZY_MOD1), 1)
- self.assertEqual(test_finder_utils.get_levenshtein_distance(uc.MOD2, uc.FUZZY_MOD2,
- dir_costs=(1, 2, 3)), 3)
- self.assertEqual(test_finder_utils.get_levenshtein_distance(uc.MOD3, uc.FUZZY_MOD3,
- dir_costs=(1, 2, 1)), 8)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_finders/test_info.py b/atest-py2/test_finders/test_info.py
deleted file mode 100644
index 707f49a..0000000
--- a/atest-py2/test_finders/test_info.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-TestInfo class.
-"""
-
-from collections import namedtuple
-
-# pylint: disable=import-error
-import constants
-
-
-TestFilterBase = namedtuple('TestFilter', ['class_name', 'methods'])
-
-
-class TestInfo(object):
- """Information needed to identify and run a test."""
-
- # pylint: disable=too-many-arguments
- def __init__(self, test_name, test_runner, build_targets, data=None,
- suite=None, module_class=None, install_locations=None,
- test_finder='', compatibility_suites=None):
- """Init for TestInfo.
-
- Args:
- test_name: String of test name.
- test_runner: String of test runner.
- build_targets: Set of build targets.
- data: Dict of data for test runners to use.
- suite: Suite for test runners to use.
- module_class: A list of test classes. It's a snippet of class
- in module_info. e.g. ["EXECUTABLES", "NATIVE_TESTS"]
- install_locations: Set of install locations.
- e.g. set(['host', 'device'])
- test_finder: String of test finder.
- compatibility_suites: A list of compatibility_suites. It's a
- snippet of compatibility_suites in module_info. e.g.
- ["device-tests", "vts10"]
- """
- self.test_name = test_name
- self.test_runner = test_runner
- self.build_targets = build_targets
- self.data = data if data else {}
- self.suite = suite
- self.module_class = module_class if module_class else []
- self.install_locations = (install_locations if install_locations
- else set())
- # True if the TestInfo is built from a test configured in TEST_MAPPING.
- self.from_test_mapping = False
- # True if the test should run on host and require no device. The
- # attribute is only set through TEST_MAPPING file.
- self.host = False
- self.test_finder = test_finder
- self.compatibility_suites = (map(str, compatibility_suites)
- if compatibility_suites else [])
-
- def __str__(self):
- host_info = (' - runs on host without device required.' if self.host
- else '')
- return ('test_name: %s - test_runner:%s - build_targets:%s - data:%s - '
- 'suite:%s - module_class: %s - install_locations:%s%s - '
- 'test_finder: %s - compatibility_suites:%s' % (
- self.test_name, self.test_runner, self.build_targets,
- self.data, self.suite, self.module_class,
- self.install_locations, host_info, self.test_finder,
- self.compatibility_suites))
-
- def get_supported_exec_mode(self):
- """Get the supported execution mode of the test.
-
- Determine the test supports which execution mode by strategy:
- Robolectric/JAVA_LIBRARIES --> 'both'
- Not native tests or installed only in out/target --> 'device'
- Installed only in out/host --> 'both'
- Installed under host and target --> 'both'
-
- Return:
- String of execution mode.
- """
- install_path = self.install_locations
- if not self.module_class:
- return constants.DEVICE_TEST
- # Let Robolectric test support both.
- if constants.MODULE_CLASS_ROBOLECTRIC in self.module_class:
- return constants.BOTH_TEST
- # Let JAVA_LIBRARIES support both.
- if constants.MODULE_CLASS_JAVA_LIBRARIES in self.module_class:
- return constants.BOTH_TEST
- if not install_path:
- return constants.DEVICE_TEST
- # Non-Native test runs on device-only.
- if constants.MODULE_CLASS_NATIVE_TESTS not in self.module_class:
- return constants.DEVICE_TEST
- # Native test with install path as host should be treated as both.
- # Otherwise, return device test.
- if len(install_path) == 1 and constants.DEVICE_TEST in install_path:
- return constants.DEVICE_TEST
- return constants.BOTH_TEST
-
-
-class TestFilter(TestFilterBase):
- """Information needed to filter a test in Tradefed"""
-
- def to_set_of_tf_strings(self):
- """Return TestFilter as set of strings in TradeFed filter format."""
- if self.methods:
- return {'%s#%s' % (self.class_name, m) for m in self.methods}
- return {self.class_name}
diff --git a/atest-py2/test_finders/tf_integration_finder.py b/atest-py2/test_finders/tf_integration_finder.py
deleted file mode 100644
index ed0a539..0000000
--- a/atest-py2/test_finders/tf_integration_finder.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Integration Finder class.
-"""
-
-import copy
-import logging
-import os
-import re
-import xml.etree.ElementTree as ElementTree
-
-# pylint: disable=import-error
-import atest_error
-import constants
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_runners import atest_tf_test_runner
-
-# Find integration name based on file path of integration config xml file.
-# Group matches "foo/bar" given "blah/res/config/blah/res/config/foo/bar.xml
-_INT_NAME_RE = re.compile(r'^.*\/res\/config\/(?P<int_name>.*).xml$')
-_TF_TARGETS = frozenset(['tradefed', 'tradefed-contrib'])
-_GTF_TARGETS = frozenset(['google-tradefed', 'google-tradefed-contrib'])
-_CONTRIB_TARGETS = frozenset(['google-tradefed-contrib'])
-_TF_RES_DIR = '../res/config'
-
-
-class TFIntegrationFinder(test_finder_base.TestFinderBase):
- """Integration Finder class."""
- NAME = 'INTEGRATION'
- _TEST_RUNNER = atest_tf_test_runner.AtestTradefedTestRunner.NAME
-
-
- def __init__(self, module_info=None):
- super(TFIntegrationFinder, self).__init__()
- self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- self.module_info = module_info
- # TODO: Break this up into AOSP/google_tf integration finders.
- self.tf_dirs, self.gtf_dirs = self._get_integration_dirs()
- self.integration_dirs = self.tf_dirs + self.gtf_dirs
-
- def _get_mod_paths(self, module_name):
- """Return the paths of the given module name."""
- if self.module_info:
- # Since aosp/801774 merged, the path of test configs have been
- # changed to ../res/config.
- if module_name in _CONTRIB_TARGETS:
- mod_paths = self.module_info.get_paths(module_name)
- return [os.path.join(path, _TF_RES_DIR) for path in mod_paths]
- return self.module_info.get_paths(module_name)
- return []
-
- def _get_integration_dirs(self):
- """Get integration dirs from MODULE_INFO based on targets.
-
- Returns:
- A tuple of lists of strings of integration dir rel to repo root.
- """
- tf_dirs = filter(None, [d for x in _TF_TARGETS for d in self._get_mod_paths(x)])
- gtf_dirs = filter(None, [d for x in _GTF_TARGETS for d in self._get_mod_paths(x)])
- return tf_dirs, gtf_dirs
-
- def _get_build_targets(self, rel_config):
- config_file = os.path.join(self.root_dir, rel_config)
- xml_root = self._load_xml_file(config_file)
- targets = test_finder_utils.get_targets_from_xml_root(xml_root,
- self.module_info)
- if self.gtf_dirs:
- targets.add(constants.GTF_TARGET)
- return frozenset(targets)
-
- def _load_xml_file(self, path):
- """Load an xml file with option to expand <include> tags
-
- Args:
- path: A string of path to xml file.
-
- Returns:
- An xml.etree.ElementTree.Element instance of the root of the tree.
- """
- tree = ElementTree.parse(path)
- root = tree.getroot()
- self._load_include_tags(root)
- return root
-
- #pylint: disable=invalid-name
- def _load_include_tags(self, root):
- """Recursively expand in-place the <include> tags in a given xml tree.
-
- Python xml libraries don't support our type of <include> tags. Logic used
- below is modified version of the built-in ElementInclude logic found here:
- https://github.com/python/cpython/blob/2.7/Lib/xml/etree/ElementInclude.py
-
- Args:
- root: The root xml.etree.ElementTree.Element.
-
- Returns:
- An xml.etree.ElementTree.Element instance with include tags expanded
- """
- i = 0
- while i < len(root):
- elem = root[i]
- if elem.tag == 'include':
- # expand included xml file
- integration_name = elem.get('name')
- if not integration_name:
- logging.warn('skipping <include> tag with no "name" value')
- continue
- full_paths = self._search_integration_dirs(integration_name)
- node = None
- if full_paths:
- node = self._load_xml_file(full_paths[0])
- if node is None:
- raise atest_error.FatalIncludeError("can't load %r" %
- integration_name)
- node = copy.copy(node)
- if elem.tail:
- node.tail = (node.tail or "") + elem.tail
- root[i] = node
- i = i + 1
-
- def _search_integration_dirs(self, name):
- """Search integration dirs for name and return full path.
- Args:
- name: A string of integration name as seen in tf's list configs.
-
- Returns:
- A list of test path.
- """
- test_files = []
- for integration_dir in self.integration_dirs:
- abs_path = os.path.join(self.root_dir, integration_dir)
- found_test_files = test_finder_utils.run_find_cmd(
- test_finder_utils.FIND_REFERENCE_TYPE.INTEGRATION,
- abs_path, name)
- if found_test_files:
- test_files.extend(found_test_files)
- return test_files
-
- def find_test_by_integration_name(self, name):
- """Find the test info matching the given integration name.
-
- Args:
- name: A string of integration name as seen in tf's list configs.
-
- Returns:
- A populated TestInfo namedtuple if test found, else None
- """
- class_name = None
- if ':' in name:
- name, class_name = name.split(':')
- test_files = self._search_integration_dirs(name)
- if test_files is None:
- return None
- # Don't use names that simply match the path,
- # must be the actual name used by TF to run the test.
- t_infos = []
- for test_file in test_files:
- t_info = self._get_test_info(name, test_file, class_name)
- if t_info:
- t_infos.append(t_info)
- return t_infos
-
- def _get_test_info(self, name, test_file, class_name):
- """Find the test info matching the given test_file and class_name.
-
- Args:
- name: A string of integration name as seen in tf's list configs.
- test_file: A string of test_file full path.
- class_name: A string of user's input.
-
- Returns:
- A populated TestInfo namedtuple if test found, else None.
- """
- match = _INT_NAME_RE.match(test_file)
- if not match:
- logging.error('Integration test outside config dir: %s',
- test_file)
- return None
- int_name = match.group('int_name')
- if int_name != name:
- logging.warn('Input (%s) not valid integration name, '
- 'did you mean: %s?', name, int_name)
- return None
- rel_config = os.path.relpath(test_file, self.root_dir)
- filters = frozenset()
- if class_name:
- class_name, methods = test_finder_utils.split_methods(class_name)
- test_filters = []
- if '.' in class_name:
- test_filters.append(test_info.TestFilter(class_name, methods))
- else:
- logging.warn('Looking up fully qualified class name for: %s.'
- 'Improve speed by using fully qualified names.',
- class_name)
- paths = test_finder_utils.find_class_file(self.root_dir,
- class_name)
- if not paths:
- return None
- for path in paths:
- class_name = (
- test_finder_utils.get_fully_qualified_class_name(
- path))
- test_filters.append(test_info.TestFilter(
- class_name, methods))
- filters = frozenset(test_filters)
- return test_info.TestInfo(
- test_name=name,
- test_runner=self._TEST_RUNNER,
- build_targets=self._get_build_targets(rel_config),
- data={constants.TI_REL_CONFIG: rel_config,
- constants.TI_FILTER: filters})
-
- def find_int_test_by_path(self, path):
- """Find the first test info matching the given path.
-
- Strategy:
- path_to_integration_file --> Resolve to INTEGRATION
- # If the path is a dir, we return nothing.
- path_to_dir_with_integration_files --> Return None
-
- Args:
- path: A string of the test's path.
-
- Returns:
- A list of populated TestInfo namedtuple if test found, else None
- """
- path, _ = test_finder_utils.split_methods(path)
-
- # Make sure we're looking for a config.
- if not path.endswith('.xml'):
- return None
-
- # TODO: See if this can be generalized and shared with methods above
- # create absolute path from cwd and remove symbolic links
- path = os.path.realpath(path)
- if not os.path.exists(path):
- return None
- int_dir = test_finder_utils.get_int_dir_from_path(path,
- self.integration_dirs)
- if int_dir:
- rel_config = os.path.relpath(path, self.root_dir)
- match = _INT_NAME_RE.match(rel_config)
- if not match:
- logging.error('Integration test outside config dir: %s',
- rel_config)
- return None
- int_name = match.group('int_name')
- return [test_info.TestInfo(
- test_name=int_name,
- test_runner=self._TEST_RUNNER,
- build_targets=self._get_build_targets(rel_config),
- data={constants.TI_REL_CONFIG: rel_config,
- constants.TI_FILTER: frozenset()})]
- return None
diff --git a/atest-py2/test_finders/tf_integration_finder_unittest.py b/atest-py2/test_finders/tf_integration_finder_unittest.py
deleted file mode 100755
index 170da0c..0000000
--- a/atest-py2/test_finders/tf_integration_finder_unittest.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for tf_integration_finder."""
-
-import os
-import unittest
-import mock
-
-# pylint: disable=import-error
-import constants
-import unittest_constants as uc
-import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_finders import tf_integration_finder
-from test_runners import atest_tf_test_runner as atf_tr
-
-
-INT_NAME_CLASS = uc.INT_NAME + ':' + uc.FULL_CLASS_NAME
-INT_NAME_METHOD = INT_NAME_CLASS + '#' + uc.METHOD_NAME
-GTF_INT_CONFIG = os.path.join(uc.GTF_INT_DIR, uc.GTF_INT_NAME + '.xml')
-INT_CLASS_INFO = test_info.TestInfo(
- uc.INT_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_FILTER: frozenset([uc.CLASS_FILTER]),
- constants.TI_REL_CONFIG: uc.INT_CONFIG})
-INT_METHOD_INFO = test_info.TestInfo(
- uc.INT_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_FILTER: frozenset([uc.METHOD_FILTER]),
- constants.TI_REL_CONFIG: uc.INT_CONFIG})
-
-
-class TFIntegrationFinderUnittests(unittest.TestCase):
- """Unit tests for tf_integration_finder.py"""
-
- def setUp(self):
- """Set up for testing."""
- self.tf_finder = tf_integration_finder.TFIntegrationFinder()
- self.tf_finder.integration_dirs = [os.path.join(uc.ROOT, uc.INT_DIR),
- os.path.join(uc.ROOT, uc.GTF_INT_DIR)]
- self.tf_finder.root_dir = uc.ROOT
-
- @mock.patch.object(tf_integration_finder.TFIntegrationFinder,
- '_get_build_targets', return_value=set())
- @mock.patch.object(test_finder_utils, 'get_fully_qualified_class_name',
- return_value=uc.FULL_CLASS_NAME)
- @mock.patch('subprocess.check_output')
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('os.path.isfile', return_value=False)
- @mock.patch('os.path.isdir', return_value=False)
- #pylint: disable=unused-argument
- def test_find_test_by_integration_name(self, _isdir, _isfile, _path, mock_find,
- _fcqn, _build):
- """Test find_test_by_integration_name.
-
- Note that _isfile is always False since we don't index integration tests.
- """
- mock_find.return_value = os.path.join(uc.ROOT, uc.INT_DIR, uc.INT_NAME + '.xml')
- t_infos = self.tf_finder.find_test_by_integration_name(uc.INT_NAME)
- self.assertEqual(len(t_infos), 0)
- _isdir.return_value = True
- t_infos = self.tf_finder.find_test_by_integration_name(uc.INT_NAME)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.INT_INFO)
- t_infos = self.tf_finder.find_test_by_integration_name(INT_NAME_CLASS)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], INT_CLASS_INFO)
- t_infos = self.tf_finder.find_test_by_integration_name(INT_NAME_METHOD)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], INT_METHOD_INFO)
- not_fully_qual = uc.INT_NAME + ':' + 'someClass'
- t_infos = self.tf_finder.find_test_by_integration_name(not_fully_qual)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], INT_CLASS_INFO)
- mock_find.return_value = os.path.join(uc.ROOT, uc.GTF_INT_DIR,
- uc.GTF_INT_NAME + '.xml')
- t_infos = self.tf_finder.find_test_by_integration_name(uc.GTF_INT_NAME)
- unittest_utils.assert_equal_testinfos(
- self,
- t_infos[0],
- uc.GTF_INT_INFO)
- mock_find.return_value = ''
- self.assertEqual(
- self.tf_finder.find_test_by_integration_name('NotIntName'), [])
-
- @mock.patch.object(tf_integration_finder.TFIntegrationFinder,
- '_get_build_targets', return_value=set())
- @mock.patch('os.path.realpath',
- side_effect=unittest_utils.realpath_side_effect)
- @mock.patch('os.path.isdir', return_value=True)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch.object(test_finder_utils, 'find_parent_module_dir')
- @mock.patch('os.path.exists', return_value=True)
- def test_find_int_test_by_path(self, _exists, _find, _isfile, _isdir, _real,
- _build):
- """Test find_int_test_by_path."""
- path = os.path.join(uc.INT_DIR, uc.INT_NAME + '.xml')
- t_infos = self.tf_finder.find_int_test_by_path(path)
- unittest_utils.assert_equal_testinfos(
- self, uc.INT_INFO, t_infos[0])
- path = os.path.join(uc.GTF_INT_DIR, uc.GTF_INT_NAME + '.xml')
- t_infos = self.tf_finder.find_int_test_by_path(path)
- unittest_utils.assert_equal_testinfos(
- self, uc.GTF_INT_INFO, t_infos[0])
-
- #pylint: disable=protected-access
- @mock.patch.object(tf_integration_finder.TFIntegrationFinder,
- '_search_integration_dirs')
- def test_load_xml_file(self, search):
- """Test _load_xml_file and _load_include_tags methods."""
- search.return_value = [os.path.join(uc.TEST_DATA_DIR,
- 'CtsUiDeviceTestCases.xml')]
- xml_file = os.path.join(uc.TEST_DATA_DIR, constants.MODULE_CONFIG)
- xml_root = self.tf_finder._load_xml_file(xml_file)
- include_tags = xml_root.findall('.//include')
- self.assertEqual(0, len(include_tags))
- option_tags = xml_root.findall('.//option')
- included = False
- for tag in option_tags:
- if tag.attrib['value'].strip() == 'CtsUiDeviceTestCases.apk':
- included = True
- self.assertTrue(included)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_mapping.py b/atest-py2/test_mapping.py
deleted file mode 100644
index 02f8f31..0000000
--- a/atest-py2/test_mapping.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Classes for test mapping related objects
-"""
-
-
-import copy
-import fnmatch
-import os
-import re
-
-import atest_utils
-import constants
-
-TEST_MAPPING = 'TEST_MAPPING'
-
-
-class TestDetail(object):
- """Stores the test details set in a TEST_MAPPING file."""
-
- def __init__(self, details):
- """TestDetail constructor
-
- Parse test detail from a dictionary, e.g.,
- {
- "name": "SettingsUnitTests",
- "host": true,
- "options": [
- {
- "instrumentation-arg":
- "annotation=android.platform.test.annotations.Presubmit"
- },
- "file_patterns": ["(/|^)Window[^/]*\\.java",
- "(/|^)Activity[^/]*\\.java"]
- }
-
- Args:
- details: A dictionary of test detail.
- """
- self.name = details['name']
- self.options = []
- # True if the test should run on host and require no device.
- self.host = details.get('host', False)
- assert isinstance(self.host, bool), 'host can only have boolean value.'
- options = details.get('options', [])
- for option in options:
- assert len(option) == 1, 'Each option can only have one key.'
- self.options.append(copy.deepcopy(option).popitem())
- self.options.sort(key=lambda o: o[0])
- self.file_patterns = details.get('file_patterns', [])
-
- def __str__(self):
- """String value of the TestDetail object."""
- host_info = (', runs on host without device required.' if self.host
- else '')
- if not self.options:
- return self.name + host_info
- options = ''
- for option in self.options:
- options += '%s: %s, ' % option
-
- return '%s (%s)%s' % (self.name, options.strip(', '), host_info)
-
- def __hash__(self):
- """Get the hash of TestDetail based on the details"""
- return hash(str(self))
-
- def __eq__(self, other):
- return str(self) == str(other)
-
-
-class Import(object):
- """Store test mapping import details."""
-
- def __init__(self, test_mapping_file, details):
- """Import constructor
-
- Parse import details from a dictionary, e.g.,
- {
- "path": "..\folder1"
- }
- in which, project is the name of the project, by default it's the
- current project of the containing TEST_MAPPING file.
-
- Args:
- test_mapping_file: Path to the TEST_MAPPING file that contains the
- import.
- details: A dictionary of details about importing another
- TEST_MAPPING file.
- """
- self.test_mapping_file = test_mapping_file
- self.path = details['path']
-
- def __str__(self):
- """String value of the Import object."""
- return 'Source: %s, path: %s' % (self.test_mapping_file, self.path)
-
- def get_path(self):
- """Get the path to TEST_MAPPING import directory."""
- path = os.path.realpath(os.path.join(
- os.path.dirname(self.test_mapping_file), self.path))
- if os.path.exists(path):
- return path
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, os.sep)
- path = os.path.realpath(os.path.join(root_dir, self.path))
- if os.path.exists(path):
- return path
- # The import path can't be located.
- return None
-
-
-def is_match_file_patterns(test_mapping_file, test_detail):
- """Check if the changed file names match the regex pattern defined in
- file_patterns of TEST_MAPPING files.
-
- Args:
- test_mapping_file: Path to a TEST_MAPPING file.
- test_detail: A TestDetail object.
-
- Returns:
- True if the test's file_patterns setting is not set or contains a
- pattern matches any of the modified files.
- """
- # Only check if the altered files are located in the same or sub directory
- # of the TEST_MAPPING file. Extract the relative path of the modified files
- # which match file patterns.
- file_patterns = test_detail.get('file_patterns', [])
- if not file_patterns:
- return True
- test_mapping_dir = os.path.dirname(test_mapping_file)
- modified_files = atest_utils.get_modified_files(test_mapping_dir)
- if not modified_files:
- return False
- modified_files_in_source_dir = [
- os.path.relpath(filepath, test_mapping_dir)
- for filepath in fnmatch.filter(modified_files,
- os.path.join(test_mapping_dir, '*'))
- ]
- for modified_file in modified_files_in_source_dir:
- # Force to run the test if it's in a TEST_MAPPING file included in the
- # changesets.
- if modified_file == constants.TEST_MAPPING:
- return True
- for pattern in file_patterns:
- if re.search(pattern, modified_file):
- return True
- return False
diff --git a/atest-py2/test_mapping_unittest.py b/atest-py2/test_mapping_unittest.py
deleted file mode 100755
index 557e28d..0000000
--- a/atest-py2/test_mapping_unittest.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for test_mapping"""
-
-import unittest
-import mock
-
-import test_mapping
-import unittest_constants as uc
-
-
-class TestMappingUnittests(unittest.TestCase):
- """Unit tests for test_mapping.py"""
-
- def test_parsing(self):
- """Test creating TestDetail object"""
- detail = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
- self.assertEqual(uc.TEST_MAPPING_TEST['name'], detail.name)
- self.assertTrue(detail.host)
- self.assertEqual([], detail.options)
-
- def test_parsing_with_option(self):
- """Test creating TestDetail object with option configured"""
- detail = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
- self.assertEqual(uc.TEST_MAPPING_TEST_WITH_OPTION['name'], detail.name)
- self.assertEqual(uc.TEST_MAPPING_TEST_WITH_OPTION_STR, str(detail))
-
- def test_parsing_with_bad_option(self):
- """Test creating TestDetail object with bad option configured"""
- with self.assertRaises(Exception) as context:
- test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_BAD_OPTION)
- self.assertEqual(
- 'Each option can only have one key.', str(context.exception))
-
- def test_parsing_with_bad_host_value(self):
- """Test creating TestDetail object with bad host value configured"""
- with self.assertRaises(Exception) as context:
- test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_BAD_HOST_VALUE)
- self.assertEqual(
- 'host can only have boolean value.', str(context.exception))
-
- @mock.patch("atest_utils.get_modified_files")
- def test_is_match_file_patterns(self, mock_modified_files):
- """Test mathod is_match_file_patterns."""
- test_mapping_file = ''
- test_detail = {
- "name": "Test",
- "file_patterns": ["(/|^)test_fp1[^/]*\\.java",
- "(/|^)test_fp2[^/]*\\.java"]
- }
- mock_modified_files.return_value = {'/a/b/test_fp122.java',
- '/a/b/c/d/test_fp222.java'}
- self.assertTrue(test_mapping.is_match_file_patterns(test_mapping_file,
- test_detail))
- mock_modified_files.return_value = {}
- self.assertFalse(test_mapping.is_match_file_patterns(test_mapping_file,
- test_detail))
- mock_modified_files.return_value = {'/a/b/test_fp3.java'}
- self.assertFalse(test_mapping.is_match_file_patterns(test_mapping_file,
- test_detail))
- test_mapping_file = '/a/b/TEST_MAPPING'
- mock_modified_files.return_value = {'/a/b/test_fp3.java',
- '/a/b/TEST_MAPPING'}
- self.assertTrue(test_mapping.is_match_file_patterns(test_mapping_file,
- test_detail))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runner_handler.py b/atest-py2/test_runner_handler.py
deleted file mode 100644
index 3c18119..0000000
--- a/atest-py2/test_runner_handler.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Aggregates test runners, groups tests by test runners and kicks off tests.
-"""
-
-import itertools
-import time
-import traceback
-
-import atest_error
-import constants
-import result_reporter
-
-from metrics import metrics
-from metrics import metrics_utils
-from test_runners import atest_tf_test_runner
-from test_runners import robolectric_test_runner
-from test_runners import suite_plan_test_runner
-from test_runners import vts_tf_test_runner
-
-# pylint: disable=line-too-long
-_TEST_RUNNERS = {
- atest_tf_test_runner.AtestTradefedTestRunner.NAME: atest_tf_test_runner.AtestTradefedTestRunner,
- robolectric_test_runner.RobolectricTestRunner.NAME: robolectric_test_runner.RobolectricTestRunner,
- suite_plan_test_runner.SuitePlanTestRunner.NAME: suite_plan_test_runner.SuitePlanTestRunner,
- vts_tf_test_runner.VtsTradefedTestRunner.NAME: vts_tf_test_runner.VtsTradefedTestRunner,
-}
-
-
-def _get_test_runners():
- """Returns the test runners.
-
- If external test runners are defined outside atest, they can be try-except
- imported into here.
-
- Returns:
- Dict of test runner name to test runner class.
- """
- test_runners_dict = _TEST_RUNNERS
- # Example import of example test runner:
- try:
- # pylint: disable=line-too-long
- from test_runners import example_test_runner
- test_runners_dict[example_test_runner.ExampleTestRunner.NAME] = example_test_runner.ExampleTestRunner
- except ImportError:
- pass
- return test_runners_dict
-
-
-def group_tests_by_test_runners(test_infos):
- """Group the test_infos by test runners
-
- Args:
- test_infos: List of TestInfo.
-
- Returns:
- List of tuples (test runner, tests).
- """
- tests_by_test_runner = []
- test_runner_dict = _get_test_runners()
- key = lambda x: x.test_runner
- sorted_test_infos = sorted(list(test_infos), key=key)
- for test_runner, tests in itertools.groupby(sorted_test_infos, key):
- # groupby returns a grouper object, we want to operate on a list.
- tests = list(tests)
- test_runner_class = test_runner_dict.get(test_runner)
- if test_runner_class is None:
- raise atest_error.UnknownTestRunnerError('Unknown Test Runner %s' %
- test_runner)
- tests_by_test_runner.append((test_runner_class, tests))
- return tests_by_test_runner
-
-
-def get_test_runner_reqs(module_info, test_infos):
- """Returns the requirements for all test runners specified in the tests.
-
- Args:
- module_info: ModuleInfo object.
- test_infos: List of TestInfo.
-
- Returns:
- Set of build targets required by the test runners.
- """
- unused_result_dir = ''
- test_runner_build_req = set()
- for test_runner, _ in group_tests_by_test_runners(test_infos):
- test_runner_build_req |= test_runner(
- unused_result_dir,
- module_info=module_info).get_test_runner_build_reqs()
- return test_runner_build_req
-
-
-def run_all_tests(results_dir, test_infos, extra_args,
- delay_print_summary=False):
- """Run the given tests.
-
- Args:
- results_dir: String directory to store atest results.
- test_infos: List of TestInfo.
- extra_args: Dict of extra args for test runners to use.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- reporter = result_reporter.ResultReporter()
- reporter.print_starting_text()
- tests_ret_code = constants.EXIT_CODE_SUCCESS
- for test_runner, tests in group_tests_by_test_runners(test_infos):
- test_name = ' '.join([test.test_name for test in tests])
- test_start = time.time()
- is_success = True
- ret_code = constants.EXIT_CODE_TEST_FAILURE
- stacktrace = ''
- try:
- test_runner = test_runner(results_dir)
- ret_code = test_runner.run_tests(tests, extra_args, reporter)
- tests_ret_code |= ret_code
- # pylint: disable=broad-except
- except Exception:
- stacktrace = traceback.format_exc()
- reporter.runner_failure(test_runner.NAME, stacktrace)
- tests_ret_code = constants.EXIT_CODE_TEST_FAILURE
- is_success = False
- metrics.RunnerFinishEvent(
- duration=metrics_utils.convert_duration(time.time() - test_start),
- success=is_success,
- runner_name=test_runner.NAME,
- test=[{'name': test_name,
- 'result': ret_code,
- 'stacktrace': stacktrace}])
- if delay_print_summary:
- return tests_ret_code, reporter
- return reporter.print_summary() or tests_ret_code, reporter
diff --git a/atest-py2/test_runner_handler_unittest.py b/atest-py2/test_runner_handler_unittest.py
deleted file mode 100755
index b5a430e..0000000
--- a/atest-py2/test_runner_handler_unittest.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for test_runner_handler."""
-
-# pylint: disable=protected-access
-
-import unittest
-import mock
-
-import atest_error
-import test_runner_handler
-from metrics import metrics
-from test_finders import test_info
-from test_runners import test_runner_base as tr_base
-
-FAKE_TR_NAME_A = 'FakeTestRunnerA'
-FAKE_TR_NAME_B = 'FakeTestRunnerB'
-MISSING_TR_NAME = 'MissingTestRunner'
-FAKE_TR_A_REQS = {'fake_tr_A_req1', 'fake_tr_A_req2'}
-FAKE_TR_B_REQS = {'fake_tr_B_req1', 'fake_tr_B_req2'}
-MODULE_NAME_A = 'ModuleNameA'
-MODULE_NAME_A_AGAIN = 'ModuleNameA_AGAIN'
-MODULE_NAME_B = 'ModuleNameB'
-MODULE_NAME_B_AGAIN = 'ModuleNameB_AGAIN'
-MODULE_INFO_A = test_info.TestInfo(MODULE_NAME_A, FAKE_TR_NAME_A, set())
-MODULE_INFO_A_AGAIN = test_info.TestInfo(MODULE_NAME_A_AGAIN, FAKE_TR_NAME_A,
- set())
-MODULE_INFO_B = test_info.TestInfo(MODULE_NAME_B, FAKE_TR_NAME_B, set())
-MODULE_INFO_B_AGAIN = test_info.TestInfo(MODULE_NAME_B_AGAIN, FAKE_TR_NAME_B,
- set())
-BAD_TESTINFO = test_info.TestInfo('bad_name', MISSING_TR_NAME, set())
-
-class FakeTestRunnerA(tr_base.TestRunnerBase):
- """Fake test runner A."""
-
- NAME = FAKE_TR_NAME_A
- EXECUTABLE = 'echo'
-
- def run_tests(self, test_infos, extra_args, reporter):
- return 0
-
- def host_env_check(self):
- pass
-
- def get_test_runner_build_reqs(self):
- return FAKE_TR_A_REQS
-
- def generate_run_commands(self, test_infos, extra_args, port=None):
- return ['fake command']
-
-
-class FakeTestRunnerB(FakeTestRunnerA):
- """Fake test runner B."""
-
- NAME = FAKE_TR_NAME_B
-
- def run_tests(self, test_infos, extra_args, reporter):
- return 1
-
- def get_test_runner_build_reqs(self):
- return FAKE_TR_B_REQS
-
-
-class TestRunnerHandlerUnittests(unittest.TestCase):
- """Unit tests for test_runner_handler.py"""
-
- _TEST_RUNNERS = {
- FakeTestRunnerA.NAME: FakeTestRunnerA,
- FakeTestRunnerB.NAME: FakeTestRunnerB,
- }
-
- def setUp(self):
- mock.patch('test_runner_handler._get_test_runners',
- return_value=self._TEST_RUNNERS).start()
-
- def tearDown(self):
- mock.patch.stopall()
-
- def test_group_tests_by_test_runners(self):
- """Test that we properly group tests by test runners."""
- # Happy path testing.
- test_infos = [MODULE_INFO_A, MODULE_INFO_A_AGAIN, MODULE_INFO_B,
- MODULE_INFO_B_AGAIN]
- want_list = [(FakeTestRunnerA, [MODULE_INFO_A, MODULE_INFO_A_AGAIN]),
- (FakeTestRunnerB, [MODULE_INFO_B, MODULE_INFO_B_AGAIN])]
- self.assertEqual(
- want_list,
- test_runner_handler.group_tests_by_test_runners(test_infos))
-
- # Let's make sure we fail as expected.
- self.assertRaises(
- atest_error.UnknownTestRunnerError,
- test_runner_handler.group_tests_by_test_runners, [BAD_TESTINFO])
-
- def test_get_test_runner_reqs(self):
- """Test that we get all the reqs from the test runners."""
- test_infos = [MODULE_INFO_A, MODULE_INFO_B]
- want_set = FAKE_TR_A_REQS | FAKE_TR_B_REQS
- empty_module_info = None
- self.assertEqual(
- want_set,
- test_runner_handler.get_test_runner_reqs(empty_module_info,
- test_infos))
-
- @mock.patch.object(metrics, 'RunnerFinishEvent')
- def test_run_all_tests(self, _mock_runner_finish):
- """Test that the return value as we expected."""
- results_dir = ""
- extra_args = []
- # Tests both run_tests return 0
- test_infos = [MODULE_INFO_A, MODULE_INFO_A_AGAIN]
- self.assertEqual(
- 0,
- test_runner_handler.run_all_tests(
- results_dir, test_infos, extra_args)[0])
- # Tests both run_tests return 1
- test_infos = [MODULE_INFO_B, MODULE_INFO_B_AGAIN]
- self.assertEqual(
- 1,
- test_runner_handler.run_all_tests(
- results_dir, test_infos, extra_args)[0])
- # Tests with on run_tests return 0, the other return 1
- test_infos = [MODULE_INFO_A, MODULE_INFO_B]
- self.assertEqual(
- 1,
- test_runner_handler.run_all_tests(
- results_dir, test_infos, extra_args)[0])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runners/__init__.py b/atest-py2/test_runners/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/test_runners/__init__.py
+++ /dev/null
diff --git a/atest-py2/test_runners/atest_tf_test_runner.py b/atest-py2/test_runners/atest_tf_test_runner.py
deleted file mode 100644
index a59707c..0000000
--- a/atest-py2/test_runners/atest_tf_test_runner.py
+++ /dev/null
@@ -1,663 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Atest Tradefed test runner class.
-"""
-
-from __future__ import print_function
-import json
-import logging
-import os
-import re
-import select
-import socket
-import subprocess
-
-from functools import partial
-
-# pylint: disable=import-error
-import atest_utils
-import constants
-import result_reporter
-from event_handler import EventHandler
-from test_finders import test_info
-from test_runners import test_runner_base
-
-POLL_FREQ_SECS = 10
-SOCKET_HOST = '127.0.0.1'
-SOCKET_QUEUE_MAX = 1
-SOCKET_BUFFER = 4096
-SELECT_TIMEOUT = 5
-
-# Socket Events of form FIRST_EVENT {JSON_DATA}\nSECOND_EVENT {JSON_DATA}
-# EVENT_RE has groups for the name and the data. "." does not match \n.
-EVENT_RE = re.compile(r'\n*(?P<event_name>[A-Z_]+) (?P<json_data>{.*})(?=\n|.)*')
-
-EXEC_DEPENDENCIES = ('adb', 'aapt')
-
-TRADEFED_EXIT_MSG = 'TradeFed subprocess exited early with exit code=%s.'
-
-LOG_FOLDER_NAME = 'log'
-
-_INTEGRATION_FINDERS = frozenset(['', 'INTEGRATION', 'INTEGRATION_FILE_PATH'])
-
-class TradeFedExitError(Exception):
- """Raised when TradeFed exists before test run has finished."""
-
-
-class AtestTradefedTestRunner(test_runner_base.TestRunnerBase):
- """TradeFed Test Runner class."""
- NAME = 'AtestTradefedTestRunner'
- EXECUTABLE = 'atest_tradefed.sh'
- _TF_TEMPLATE = 'template/atest_local_min'
- # Use --no-enable-granular-attempts to control reporter replay behavior.
- # TODO(b/142630648): Enable option enable-granular-attempts in sharding mode.
- _LOG_ARGS = ('--logcat-on-failure --atest-log-file-path={log_path} '
- '--no-enable-granular-attempts')
- _RUN_CMD = ('{exe} {template} --template:map '
- 'test=atest {tf_customize_template} {log_args} {args}')
- _BUILD_REQ = {'tradefed-core'}
- _RERUN_OPTION_GROUP = [constants.ITERATIONS,
- constants.RERUN_UNTIL_FAILURE,
- constants.RETRY_ANY_FAILURE]
-
- def __init__(self, results_dir, module_info=None, **kwargs):
- """Init stuff for base class."""
- super(AtestTradefedTestRunner, self).__init__(results_dir, **kwargs)
- self.module_info = module_info
- self.log_path = os.path.join(results_dir, LOG_FOLDER_NAME)
- if not os.path.exists(self.log_path):
- os.makedirs(self.log_path)
- log_args = {'log_path': self.log_path}
- self.run_cmd_dict = {'exe': self.EXECUTABLE,
- 'template': self._TF_TEMPLATE,
- 'tf_customize_template': '',
- 'args': '',
- 'log_args': self._LOG_ARGS.format(**log_args)}
- self.is_verbose = logging.getLogger().isEnabledFor(logging.DEBUG)
- self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
-
- def _try_set_gts_authentication_key(self):
- """Set GTS authentication key if it is available or exists.
-
- Strategy:
- Get APE_API_KEY from os.environ:
- - If APE_API_KEY is already set by user -> do nothing.
- Get the APE_API_KEY from constants:
- - If the key file exists -> set to env var.
- If APE_API_KEY isn't set and the key file doesn't exist:
- - Warn user some GTS tests may fail without authentication.
- """
- if os.environ.get('APE_API_KEY'):
- logging.debug('APE_API_KEY is set by developer.')
- return
- ape_api_key = constants.GTS_GOOGLE_SERVICE_ACCOUNT
- key_path = os.path.join(self.root_dir, ape_api_key)
- if ape_api_key and os.path.exists(key_path):
- logging.debug('Set APE_API_KEY: %s', ape_api_key)
- os.environ['APE_API_KEY'] = key_path
- else:
- logging.debug('APE_API_KEY not set, some GTS tests may fail'
- ' without authentication.')
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos. See base class for more.
-
- Args:
- test_infos: A list of TestInfos.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- reporter.log_path = self.log_path
- reporter.rerun_options = self._extract_rerun_options(extra_args)
- # Set google service key if it's available or found before running tests.
- self._try_set_gts_authentication_key()
- if os.getenv(test_runner_base.OLD_OUTPUT_ENV_VAR):
- return self.run_tests_raw(test_infos, extra_args, reporter)
- return self.run_tests_pretty(test_infos, extra_args, reporter)
-
- def run_tests_raw(self, test_infos, extra_args, reporter):
- """Run the list of test_infos. See base class for more.
-
- Args:
- test_infos: A list of TestInfos.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- iterations = self._generate_iterations(extra_args)
- reporter.register_unsupported_runner(self.NAME)
-
- ret_code = constants.EXIT_CODE_SUCCESS
- for _ in range(iterations):
- run_cmds = self.generate_run_commands(test_infos, extra_args)
- subproc = self.run(run_cmds[0], output_to_stdout=True,
- env_vars=self.generate_env_vars(extra_args))
- ret_code |= self.wait_for_subprocess(subproc)
- return ret_code
-
- def run_tests_pretty(self, test_infos, extra_args, reporter):
- """Run the list of test_infos. See base class for more.
-
- Args:
- test_infos: A list of TestInfos.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- iterations = self._generate_iterations(extra_args)
- ret_code = constants.EXIT_CODE_SUCCESS
- for _ in range(iterations):
- server = self._start_socket_server()
- run_cmds = self.generate_run_commands(test_infos, extra_args,
- server.getsockname()[1])
- subproc = self.run(run_cmds[0], output_to_stdout=self.is_verbose,
- env_vars=self.generate_env_vars(extra_args))
- self.handle_subprocess(subproc, partial(self._start_monitor,
- server,
- subproc,
- reporter))
- server.close()
- ret_code |= self.wait_for_subprocess(subproc)
- return ret_code
-
- # pylint: disable=too-many-branches
- def _start_monitor(self, server, tf_subproc, reporter):
- """Polling and process event.
-
- Args:
- server: Socket server object.
- tf_subproc: The tradefed subprocess to poll.
- reporter: Result_Reporter object.
- """
- inputs = [server]
- event_handlers = {}
- data_map = {}
- inv_socket = None
- while inputs:
- try:
- readable, _, _ = select.select(inputs, [], [], SELECT_TIMEOUT)
- for socket_object in readable:
- if socket_object is server:
- conn, addr = socket_object.accept()
- logging.debug('Accepted connection from %s', addr)
- conn.setblocking(False)
- inputs.append(conn)
- data_map[conn] = ''
- # The First connection should be invocation level reporter.
- if not inv_socket:
- inv_socket = conn
- else:
- # Count invocation level reporter events
- # without showing real-time information.
- if inv_socket == socket_object:
- reporter.silent = True
- event_handler = event_handlers.setdefault(
- socket_object, EventHandler(reporter, self.NAME))
- else:
- event_handler = event_handlers.setdefault(
- socket_object, EventHandler(
- result_reporter.ResultReporter(), self.NAME))
- recv_data = self._process_connection(data_map,
- socket_object,
- event_handler)
- if not recv_data:
- inputs.remove(socket_object)
- socket_object.close()
- finally:
- # Subprocess ended and all socket client closed.
- if tf_subproc.poll() is not None and len(inputs) == 1:
- inputs.pop().close()
- if not data_map:
- raise TradeFedExitError(TRADEFED_EXIT_MSG
- % tf_subproc.returncode)
-
- def _process_connection(self, data_map, conn, event_handler):
- """Process a socket connection between TF and ATest.
-
- Expect data of form EVENT_NAME {JSON_DATA}. Multiple events will be
- \n deliminated. Need to buffer data in case data exceeds socket
- buffer.
- E.q.
- TEST_RUN_STARTED {runName":"hello_world_test","runAttempt":0}\n
- TEST_STARTED {"start_time":2172917, "testName":"PrintHelloWorld"}\n
- Args:
- data_map: The data map of all connections.
- conn: Socket connection.
- event_handler: EventHandler object.
-
- Returns:
- True if conn.recv() has data , False otherwise.
- """
- # Set connection into blocking mode.
- conn.settimeout(None)
- data = conn.recv(SOCKET_BUFFER)
- logging.debug('received: %s', data)
- if data:
- data_map[conn] += data
- while True:
- match = EVENT_RE.match(data_map[conn])
- if not match:
- break
- try:
- event_data = json.loads(match.group('json_data'))
- except ValueError:
- logging.debug('Json incomplete, wait for more data')
- break
- event_name = match.group('event_name')
- event_handler.process_event(event_name, event_data)
- data_map[conn] = data_map[conn][match.end():]
- return bool(data)
-
- def _start_socket_server(self):
- """Start a TCP server."""
- server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- # Port 0 lets the OS pick an open port between 1024 and 65535.
- server.bind((SOCKET_HOST, 0))
- server.listen(SOCKET_QUEUE_MAX)
- server.settimeout(POLL_FREQ_SECS)
- logging.debug('Socket server started on port %s',
- server.getsockname()[1])
- return server
-
- def generate_env_vars(self, extra_args):
- """Convert extra args into env vars."""
- env_vars = os.environ.copy()
- debug_port = extra_args.get(constants.TF_DEBUG, '')
- if debug_port:
- env_vars['TF_DEBUG'] = 'true'
- env_vars['TF_DEBUG_PORT'] = str(debug_port)
- return env_vars
-
- def host_env_check(self):
- """Check that host env has everything we need.
-
- We actually can assume the host env is fine because we have the same
- requirements that atest has. Update this to check for android env vars
- if that changes.
- """
- pass
-
- @staticmethod
- def _is_missing_exec(executable):
- """Check if system build executable is available.
-
- Args:
- executable: Executable we are checking for.
- Returns:
- True if executable is missing, False otherwise.
- """
- try:
- output = subprocess.check_output(['which', executable])
- except subprocess.CalledProcessError:
- return True
- # TODO: Check if there is a clever way to determine if system adb is
- # good enough.
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- return os.path.commonprefix([output, root_dir]) != root_dir
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- build_req = self._BUILD_REQ
- # Use different base build requirements if google-tf is around.
- if self.module_info.is_module(constants.GTF_MODULE):
- build_req = {constants.GTF_TARGET}
- # Always add ATest's own TF target.
- build_req.add(constants.ATEST_TF_MODULE)
- # Add adb if we can't find it.
- for executable in EXEC_DEPENDENCIES:
- if self._is_missing_exec(executable):
- build_req.add(executable)
- return build_req
-
- # pylint: disable=too-many-branches
- # pylint: disable=too-many-statements
- @staticmethod
- def _parse_extra_args(extra_args):
- """Convert the extra args into something tf can understand.
-
- Args:
- extra_args: Dict of args
-
- Returns:
- Tuple of args to append and args not supported.
- """
- args_to_append = []
- args_not_supported = []
- for arg in extra_args:
- if constants.WAIT_FOR_DEBUGGER == arg:
- args_to_append.append('--wait-for-debugger')
- continue
- if constants.DISABLE_INSTALL == arg:
- args_to_append.append('--disable-target-preparers')
- continue
- if constants.SERIAL == arg:
- args_to_append.append('--serial')
- args_to_append.append(extra_args[arg])
- continue
- if constants.SHARDING == arg:
- args_to_append.append('--shard-count')
- args_to_append.append(str(extra_args[arg]))
- continue
- if constants.DISABLE_TEARDOWN == arg:
- args_to_append.append('--disable-teardown')
- continue
- if constants.HOST == arg:
- args_to_append.append('-n')
- args_to_append.append('--prioritize-host-config')
- args_to_append.append('--skip-host-arch-check')
- continue
- if constants.CUSTOM_ARGS == arg:
- # We might need to sanitize it prior to appending but for now
- # let's just treat it like a simple arg to pass on through.
- args_to_append.extend(extra_args[arg])
- continue
- if constants.ALL_ABI == arg:
- args_to_append.append('--all-abi')
- continue
- if constants.DRY_RUN == arg:
- continue
- if constants.INSTANT == arg:
- args_to_append.append('--enable-parameterized-modules')
- args_to_append.append('--module-parameter')
- args_to_append.append('instant_app')
- continue
- if constants.USER_TYPE == arg:
- args_to_append.append('--enable-parameterized-modules')
- args_to_append.append('--enable-optional-parameterization')
- args_to_append.append('--module-parameter')
- args_to_append.append(extra_args[arg])
- continue
- if constants.ITERATIONS == arg:
- args_to_append.append('--retry-strategy')
- args_to_append.append(constants.ITERATIONS)
- args_to_append.append('--max-testcase-run-count')
- args_to_append.append(str(extra_args[arg]))
- continue
- if constants.RERUN_UNTIL_FAILURE == arg:
- args_to_append.append('--retry-strategy')
- args_to_append.append(constants.RERUN_UNTIL_FAILURE)
- args_to_append.append('--max-testcase-run-count')
- args_to_append.append(str(extra_args[arg]))
- continue
- if constants.RETRY_ANY_FAILURE == arg:
- args_to_append.append('--retry-strategy')
- args_to_append.append(constants.RETRY_ANY_FAILURE)
- args_to_append.append('--max-testcase-run-count')
- args_to_append.append(str(extra_args[arg]))
- continue
- if constants.COLLECT_TESTS_ONLY == arg:
- args_to_append.append('--collect-tests-only')
- continue
- if constants.TF_DEBUG == arg:
- print("Please attach process to your IDE...")
- continue
- args_not_supported.append(arg)
- return args_to_append, args_not_supported
-
- def _generate_metrics_folder(self, extra_args):
- """Generate metrics folder."""
- metrics_folder = ''
- if extra_args.get(constants.PRE_PATCH_ITERATIONS):
- metrics_folder = os.path.join(self.results_dir, 'baseline-metrics')
- elif extra_args.get(constants.POST_PATCH_ITERATIONS):
- metrics_folder = os.path.join(self.results_dir, 'new-metrics')
- return metrics_folder
-
- def _generate_iterations(self, extra_args):
- """Generate iterations."""
- iterations = 1
- if extra_args.get(constants.PRE_PATCH_ITERATIONS):
- iterations = extra_args.pop(constants.PRE_PATCH_ITERATIONS)
- elif extra_args.get(constants.POST_PATCH_ITERATIONS):
- iterations = extra_args.pop(constants.POST_PATCH_ITERATIONS)
- return iterations
-
- def generate_run_commands(self, test_infos, extra_args, port=None):
- """Generate a single run command from TestInfos.
-
- Args:
- test_infos: A set of TestInfo instances.
- extra_args: A Dict of extra args to append.
- port: Optional. An int of the port number to send events to. If
- None, then subprocess reporter in TF won't try to connect.
-
- Returns:
- A list that contains the string of atest tradefed run command.
- Only one command is returned.
- """
- args = self._create_test_args(test_infos)
- metrics_folder = self._generate_metrics_folder(extra_args)
-
- # Create a copy of args as more args could be added to the list.
- test_args = list(args)
- if port:
- test_args.extend(['--subprocess-report-port', str(port)])
- if metrics_folder:
- test_args.extend(['--metrics-folder', metrics_folder])
- logging.info('Saved metrics in: %s', metrics_folder)
- log_level = 'WARN'
- if self.is_verbose:
- log_level = 'VERBOSE'
- test_args.extend(['--log-level-display', log_level])
- test_args.extend(['--log-level', log_level])
-
- args_to_add, args_not_supported = self._parse_extra_args(extra_args)
-
- # TODO(b/122889707) Remove this after finding the root cause.
- env_serial = os.environ.get(constants.ANDROID_SERIAL)
- # Use the env variable ANDROID_SERIAL if it's set by user but only when
- # the target tests are not deviceless tests.
- if env_serial and '--serial' not in args_to_add and '-n' not in args_to_add:
- args_to_add.append("--serial")
- args_to_add.append(env_serial)
-
- test_args.extend(args_to_add)
- if args_not_supported:
- logging.info('%s does not support the following args %s',
- self.EXECUTABLE, args_not_supported)
-
- # Only need to check one TestInfo to determine if the tests are
- # configured in TEST_MAPPING.
- for_test_mapping = test_infos and test_infos[0].from_test_mapping
- test_args.extend(atest_utils.get_result_server_args(for_test_mapping))
- self.run_cmd_dict['args'] = ' '.join(test_args)
- self.run_cmd_dict['tf_customize_template'] = (
- self._extract_customize_tf_templates(extra_args))
- return [self._RUN_CMD.format(**self.run_cmd_dict)]
-
- def _flatten_test_infos(self, test_infos):
- """Sort and group test_infos by module_name and sort and group filters
- by class name.
-
- Example of three test_infos in a set:
- Module1, {(classA, {})}
- Module1, {(classB, {Method1})}
- Module1, {(classB, {Method2}}
- Becomes a set with one element:
- Module1, {(ClassA, {}), (ClassB, {Method1, Method2})}
- Where:
- Each line is a test_info namedtuple
- {} = Frozenset
- () = TestFilter namedtuple
-
- Args:
- test_infos: A set of TestInfo namedtuples.
-
- Returns:
- A set of TestInfos flattened.
- """
- results = set()
- key = lambda x: x.test_name
- for module, group in atest_utils.sort_and_group(test_infos, key):
- # module is a string, group is a generator of grouped TestInfos.
- # Module Test, so flatten test_infos:
- no_filters = False
- filters = set()
- test_runner = None
- test_finder = None
- build_targets = set()
- data = {}
- module_args = []
- for test_info_i in group:
- data.update(test_info_i.data)
- # Extend data with constants.TI_MODULE_ARG instead of overwriting.
- module_args.extend(test_info_i.data.get(constants.TI_MODULE_ARG, []))
- test_runner = test_info_i.test_runner
- test_finder = test_info_i.test_finder
- build_targets |= test_info_i.build_targets
- test_filters = test_info_i.data.get(constants.TI_FILTER)
- if not test_filters or no_filters:
- # test_info wants whole module run, so hardcode no filters.
- no_filters = True
- filters = set()
- continue
- filters |= test_filters
- if module_args:
- data[constants.TI_MODULE_ARG] = module_args
- data[constants.TI_FILTER] = self._flatten_test_filters(filters)
- results.add(
- test_info.TestInfo(test_name=module,
- test_runner=test_runner,
- test_finder=test_finder,
- build_targets=build_targets,
- data=data))
- return results
-
- @staticmethod
- def _flatten_test_filters(filters):
- """Sort and group test_filters by class_name.
-
- Example of three test_filters in a frozenset:
- classA, {}
- classB, {Method1}
- classB, {Method2}
- Becomes a frozenset with these elements:
- classA, {}
- classB, {Method1, Method2}
- Where:
- Each line is a TestFilter namedtuple
- {} = Frozenset
-
- Args:
- filters: A frozenset of test_filters.
-
- Returns:
- A frozenset of test_filters flattened.
- """
- results = set()
- key = lambda x: x.class_name
- for class_name, group in atest_utils.sort_and_group(filters, key):
- # class_name is a string, group is a generator of TestFilters
- assert class_name is not None
- methods = set()
- for test_filter in group:
- if not test_filter.methods:
- # Whole class should be run
- methods = set()
- break
- methods |= test_filter.methods
- results.add(test_info.TestFilter(class_name, frozenset(methods)))
- return frozenset(results)
-
- def _create_test_args(self, test_infos):
- """Compile TF command line args based on the given test infos.
-
- Args:
- test_infos: A set of TestInfo instances.
-
- Returns: A list of TF arguments to run the tests.
- """
- args = []
- if not test_infos:
- return []
-
- test_infos = self._flatten_test_infos(test_infos)
- # In order to do dry-run verification, sort it to make each run has the
- # same result
- test_infos = list(test_infos)
- test_infos.sort()
- has_integration_test = False
- for info in test_infos:
- # Integration test exists in TF's jar, so it must have the option
- # if it's integration finder.
- if info.test_finder in _INTEGRATION_FINDERS:
- has_integration_test = True
- args.extend([constants.TF_INCLUDE_FILTER, info.test_name])
- filters = set()
- for test_filter in info.data.get(constants.TI_FILTER, []):
- filters.update(test_filter.to_set_of_tf_strings())
- for test_filter in filters:
- filter_arg = constants.TF_ATEST_INCLUDE_FILTER_VALUE_FMT.format(
- test_name=info.test_name, test_filter=test_filter)
- args.extend([constants.TF_ATEST_INCLUDE_FILTER, filter_arg])
- for option in info.data.get(constants.TI_MODULE_ARG, []):
- if constants.TF_INCLUDE_FILTER_OPTION == option[0]:
- suite_filter = (
- constants.TF_SUITE_FILTER_ARG_VALUE_FMT.format(
- test_name=info.test_name, option_value=option[1]))
- args.extend([constants.TF_INCLUDE_FILTER, suite_filter])
- elif constants.TF_EXCLUDE_FILTER_OPTION == option[0]:
- suite_filter = (
- constants.TF_SUITE_FILTER_ARG_VALUE_FMT.format(
- test_name=info.test_name, option_value=option[1]))
- args.extend([constants.TF_EXCLUDE_FILTER, suite_filter])
- else:
- module_arg = (
- constants.TF_MODULE_ARG_VALUE_FMT.format(
- test_name=info.test_name, option_name=option[0],
- option_value=option[1]))
- args.extend([constants.TF_MODULE_ARG, module_arg])
- # TODO (b/141090547) Pass the config path to TF to load configs.
- # Compile option in TF if finder is not INTEGRATION or not set.
- if not has_integration_test:
- args.append(constants.TF_SKIP_LOADING_CONFIG_JAR)
- return args
-
- def _extract_rerun_options(self, extra_args):
- """Extract rerun options to a string for output.
-
- Args:
- extra_args: Dict of extra args for test runners to use.
-
- Returns: A string of rerun options.
- """
- extracted_options = ['{} {}'.format(arg, extra_args[arg])
- for arg in extra_args
- if arg in self._RERUN_OPTION_GROUP]
- return ' '.join(extracted_options)
-
- def _extract_customize_tf_templates(self, extra_args):
- """Extract tradefed template options to a string for output.
-
- Args:
- extra_args: Dict of extra args for test runners to use.
-
- Returns: A string of tradefed template options.
- """
- return ''.join(['--template:map %s '
- % x for x in extra_args.get(constants.TF_TEMPLATE, [])])
diff --git a/atest-py2/test_runners/atest_tf_test_runner_unittest.py b/atest-py2/test_runners/atest_tf_test_runner_unittest.py
deleted file mode 100755
index 5344ba0..0000000
--- a/atest-py2/test_runners/atest_tf_test_runner_unittest.py
+++ /dev/null
@@ -1,643 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for atest_tf_test_runner."""
-
-import os
-import sys
-import tempfile
-import unittest
-import json
-import mock
-
-# pylint: disable=import-error
-import constants
-import unittest_constants as uc
-import unittest_utils
-import atest_tf_test_runner as atf_tr
-import event_handler
-from test_finders import test_info
-
-if sys.version_info[0] == 2:
- from StringIO import StringIO
-else:
- from io import StringIO
-
-#pylint: disable=protected-access
-#pylint: disable=invalid-name
-TEST_INFO_DIR = '/tmp/atest_run_1510085893_pi_Nbi'
-METRICS_DIR = '%s/baseline-metrics' % TEST_INFO_DIR
-METRICS_DIR_ARG = '--metrics-folder %s ' % METRICS_DIR
-# TODO(147567606): Replace {serial} with {extra_args} for general extra
-# arguments testing.
-RUN_CMD_ARGS = '{metrics}--log-level WARN{serial}'
-LOG_ARGS = atf_tr.AtestTradefedTestRunner._LOG_ARGS.format(
- log_path=os.path.join(TEST_INFO_DIR, atf_tr.LOG_FOLDER_NAME))
-RUN_CMD = atf_tr.AtestTradefedTestRunner._RUN_CMD.format(
- exe=atf_tr.AtestTradefedTestRunner.EXECUTABLE,
- template=atf_tr.AtestTradefedTestRunner._TF_TEMPLATE,
- tf_customize_template='{tf_customize_template}',
- args=RUN_CMD_ARGS,
- log_args=LOG_ARGS)
-FULL_CLASS2_NAME = 'android.jank.cts.ui.SomeOtherClass'
-CLASS2_FILTER = test_info.TestFilter(FULL_CLASS2_NAME, frozenset())
-METHOD2_FILTER = test_info.TestFilter(uc.FULL_CLASS_NAME, frozenset([uc.METHOD2_NAME]))
-MODULE_ARG1 = [(constants.TF_INCLUDE_FILTER_OPTION, "A"),
- (constants.TF_INCLUDE_FILTER_OPTION, "B")]
-MODULE_ARG2 = []
-CLASS2_METHOD_FILTER = test_info.TestFilter(FULL_CLASS2_NAME,
- frozenset([uc.METHOD_NAME, uc.METHOD2_NAME]))
-MODULE2_INFO = test_info.TestInfo(uc.MODULE2_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_REL_CONFIG: uc.CONFIG2_FILE,
- constants.TI_FILTER: frozenset()})
-CLASS1_BUILD_TARGETS = {'class_1_build_target'}
-CLASS1_INFO = test_info.TestInfo(uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS1_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([uc.CLASS_FILTER])})
-CLASS2_BUILD_TARGETS = {'class_2_build_target'}
-CLASS2_INFO = test_info.TestInfo(uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS2_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([CLASS2_FILTER])})
-CLASS3_BUILD_TARGETS = {'class_3_build_target'}
-CLASS3_INFO = test_info.TestInfo(uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS3_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset(),
- constants.TI_MODULE_ARG: MODULE_ARG1})
-CLASS4_BUILD_TARGETS = {'class_4_build_target'}
-CLASS4_INFO = test_info.TestInfo(uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS4_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset(),
- constants.TI_MODULE_ARG: MODULE_ARG2})
-CLASS1_CLASS2_MODULE_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.MODULE_BUILD_TARGETS | CLASS1_BUILD_TARGETS | CLASS2_BUILD_TARGETS,
- uc.MODULE_DATA)
-FLAT_CLASS_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS1_BUILD_TARGETS | CLASS2_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([uc.CLASS_FILTER, CLASS2_FILTER])})
-FLAT2_CLASS_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS3_BUILD_TARGETS | CLASS4_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset(),
- constants.TI_MODULE_ARG: MODULE_ARG1 + MODULE_ARG2})
-GTF_INT_CONFIG = os.path.join(uc.GTF_INT_DIR, uc.GTF_INT_NAME + '.xml')
-CLASS2_METHOD_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER:
- frozenset([test_info.TestFilter(
- FULL_CLASS2_NAME, frozenset([uc.METHOD_NAME, uc.METHOD2_NAME]))])})
-METHOD_AND_CLASS2_METHOD = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.MODULE_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([uc.METHOD_FILTER, CLASS2_METHOD_FILTER])})
-METHOD_METHOD2_AND_CLASS2_METHOD = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- uc.MODULE_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([uc.FLAT_METHOD_FILTER, CLASS2_METHOD_FILTER])})
-METHOD2_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_REL_CONFIG: uc.CONFIG_FILE,
- constants.TI_FILTER: frozenset([METHOD2_FILTER])})
-
-INT_INFO = test_info.TestInfo(
- uc.INT_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- test_finder='INTEGRATION')
-
-MOD_INFO = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- test_finder='MODULE')
-
-MOD_INFO_NO_TEST_FINDER = test_info.TestInfo(
- uc.MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set())
-
-EVENTS_NORMAL = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2}),
- ('TEST_STARTED', {'start_time':52, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':1048, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':48, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_FAILED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
- ('TEST_ENDED', {'end_time':9876450, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_RUN_ENDED', {}),
- ('TEST_MODULE_ENDED', {'foo': 'bar'}),
-]
-
-class AtestTradefedTestRunnerUnittests(unittest.TestCase):
- """Unit tests for atest_tf_test_runner.py"""
-
- def setUp(self):
- self.tr = atf_tr.AtestTradefedTestRunner(results_dir=TEST_INFO_DIR)
-
- def tearDown(self):
- mock.patch.stopall()
-
- @mock.patch.object(atf_tr.AtestTradefedTestRunner,
- '_start_socket_server')
- @mock.patch.object(atf_tr.AtestTradefedTestRunner,
- 'run')
- @mock.patch.object(atf_tr.AtestTradefedTestRunner,
- '_create_test_args', return_value=['some_args'])
- @mock.patch.object(atf_tr.AtestTradefedTestRunner,
- 'generate_run_commands', return_value='some_cmd')
- @mock.patch.object(atf_tr.AtestTradefedTestRunner,
- '_process_connection', return_value=None)
- @mock.patch('select.select')
- @mock.patch('os.killpg', return_value=None)
- @mock.patch('os.getpgid', return_value=None)
- @mock.patch('signal.signal', return_value=None)
- def test_run_tests_pretty(self, _signal, _pgid, _killpg, mock_select,
- _process, _run_cmd, _test_args,
- mock_run, mock_start_socket_server):
- """Test _run_tests_pretty method."""
- mock_subproc = mock.Mock()
- mock_run.return_value = mock_subproc
- mock_subproc.returncode = 0
- mock_subproc.poll.side_effect = [True, True, None]
- mock_server = mock.Mock()
- mock_server.getsockname.return_value = ('', '')
- mock_start_socket_server.return_value = mock_server
- mock_reporter = mock.Mock()
-
- # Test no early TF exit
- mock_conn = mock.Mock()
- mock_server.accept.return_value = (mock_conn, 'some_addr')
- mock_server.close.return_value = True
- mock_select.side_effect = [([mock_server], None, None),
- ([mock_conn], None, None)]
- self.tr.run_tests_pretty([MODULE2_INFO], {}, mock_reporter)
-
- # Test early TF exit
- tmp_file = tempfile.NamedTemporaryFile()
- with open(tmp_file.name, 'w') as f:
- f.write("tf msg")
- self.tr.test_log_file = tmp_file
- mock_select.side_effect = [([], None, None)]
- mock_subproc.poll.side_effect = None
- capture_output = StringIO()
- sys.stdout = capture_output
- self.assertRaises(atf_tr.TradeFedExitError, self.tr.run_tests_pretty,
- [MODULE2_INFO], {}, mock_reporter)
- sys.stdout = sys.__stdout__
- self.assertTrue('tf msg' in capture_output.getvalue())
-
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_process_connection')
- @mock.patch('select.select')
- def test_start_monitor_2_connection(self, mock_select, mock_process):
- """Test _start_monitor method."""
- mock_server = mock.Mock()
- mock_subproc = mock.Mock()
- mock_reporter = mock.Mock()
- mock_conn1 = mock.Mock()
- mock_conn2 = mock.Mock()
- mock_server.accept.side_effect = [(mock_conn1, 'addr 1'),
- (mock_conn2, 'addr 2')]
- mock_select.side_effect = [([mock_server], None, None),
- ([mock_server], None, None),
- ([mock_conn1], None, None),
- ([mock_conn2], None, None),
- ([mock_conn1], None, None),
- ([mock_conn2], None, None)]
- mock_process.side_effect = ['abc', 'def', False, False]
- mock_subproc.poll.side_effect = [None, None, None, None,
- None, True]
- self.tr._start_monitor(mock_server, mock_subproc, mock_reporter)
- self.assertEqual(mock_process.call_count, 4)
- calls = [mock.call.accept(), mock.call.close()]
- mock_server.assert_has_calls(calls)
- mock_conn1.assert_has_calls([mock.call.close()])
- mock_conn2.assert_has_calls([mock.call.close()])
-
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_process_connection')
- @mock.patch('select.select')
- def test_start_monitor_tf_exit_before_2nd_connection(self,
- mock_select,
- mock_process):
- """Test _start_monitor method."""
- mock_server = mock.Mock()
- mock_subproc = mock.Mock()
- mock_reporter = mock.Mock()
- mock_conn1 = mock.Mock()
- mock_conn2 = mock.Mock()
- mock_server.accept.side_effect = [(mock_conn1, 'addr 1'),
- (mock_conn2, 'addr 2')]
- mock_select.side_effect = [([mock_server], None, None),
- ([mock_server], None, None),
- ([mock_conn1], None, None),
- ([mock_conn2], None, None),
- ([mock_conn1], None, None),
- ([mock_conn2], None, None)]
- mock_process.side_effect = ['abc', 'def', False, False]
- # TF exit early but have not processed data in socket buffer.
- mock_subproc.poll.side_effect = [None, None, True, True,
- True, True]
- self.tr._start_monitor(mock_server, mock_subproc, mock_reporter)
- self.assertEqual(mock_process.call_count, 4)
- calls = [mock.call.accept(), mock.call.close()]
- mock_server.assert_has_calls(calls)
- mock_conn1.assert_has_calls([mock.call.close()])
- mock_conn2.assert_has_calls([mock.call.close()])
-
-
- def test_start_socket_server(self):
- """Test start_socket_server method."""
- server = self.tr._start_socket_server()
- host, port = server.getsockname()
- self.assertEqual(host, atf_tr.SOCKET_HOST)
- self.assertLessEqual(port, 65535)
- self.assertGreaterEqual(port, 1024)
- server.close()
-
- @mock.patch('os.path.exists')
- @mock.patch.dict('os.environ', {'APE_API_KEY':'/tmp/123.json'})
- def test_try_set_gts_authentication_key_is_set_by_user(self, mock_exist):
- """Test try_set_authentication_key_is_set_by_user method."""
- # Test key is set by user.
- self.tr._try_set_gts_authentication_key()
- mock_exist.assert_not_called()
-
- @mock.patch('os.path.join', return_value='/tmp/file_not_exist.json')
- def test_try_set_gts_authentication_key_not_set(self, _):
- """Test try_set_authentication_key_not_set method."""
- # Delete the environment variable if it's set. This is fine for this
- # method because it's for validating the APE_API_KEY isn't set.
- if os.environ.get('APE_API_KEY'):
- del os.environ['APE_API_KEY']
- self.tr._try_set_gts_authentication_key()
- self.assertEqual(os.environ.get('APE_API_KEY'), None)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_process_connection(self, mock_pe):
- """Test _process_connection method."""
- mock_socket = mock.Mock()
- for name, data in EVENTS_NORMAL:
- datas = {mock_socket: ''}
- socket_data = '%s %s' % (name, json.dumps(data))
- mock_socket.recv.return_value = socket_data
- self.tr._process_connection(datas, mock_socket, mock_pe)
-
- calls = [mock.call.process_event(name, data) for name, data in EVENTS_NORMAL]
- mock_pe.assert_has_calls(calls)
- mock_socket.recv.return_value = ''
- self.assertFalse(self.tr._process_connection(datas, mock_socket, mock_pe))
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_process_connection_multiple_lines_in_single_recv(self, mock_pe):
- """Test _process_connection when recv reads multiple lines in one go."""
- mock_socket = mock.Mock()
- squashed_events = '\n'.join(['%s %s' % (name, json.dumps(data))
- for name, data in EVENTS_NORMAL])
- socket_data = [squashed_events, '']
- mock_socket.recv.side_effect = socket_data
- datas = {mock_socket: ''}
- self.tr._process_connection(datas, mock_socket, mock_pe)
- calls = [mock.call.process_event(name, data) for name, data in EVENTS_NORMAL]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_process_connection_with_buffering(self, mock_pe):
- """Test _process_connection when events overflow socket buffer size"""
- mock_socket = mock.Mock()
- module_events = [EVENTS_NORMAL[0], EVENTS_NORMAL[-1]]
- socket_events = ['%s %s' % (name, json.dumps(data))
- for name, data in module_events]
- # test try-block code by breaking apart first event after first }
- index = socket_events[0].index('}') + 1
- socket_data = [socket_events[0][:index], socket_events[0][index:]]
- # test non-try block buffering with second event
- socket_data.extend([socket_events[1][:-4], socket_events[1][-4:], ''])
- mock_socket.recv.side_effect = socket_data
- datas = {mock_socket: ''}
- self.tr._process_connection(datas, mock_socket, mock_pe)
- self.tr._process_connection(datas, mock_socket, mock_pe)
- self.tr._process_connection(datas, mock_socket, mock_pe)
- self.tr._process_connection(datas, mock_socket, mock_pe)
- calls = [mock.call.process_event(name, data) for name, data in module_events]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_process_connection_with_not_completed_event_data(self, mock_pe):
- """Test _process_connection when event have \n prefix."""
- mock_socket = mock.Mock()
- mock_socket.recv.return_value = ('\n%s %s'
- %(EVENTS_NORMAL[0][0],
- json.dumps(EVENTS_NORMAL[0][1])))
- datas = {mock_socket: ''}
- self.tr._process_connection(datas, mock_socket, mock_pe)
- calls = [mock.call.process_event(EVENTS_NORMAL[0][0],
- EVENTS_NORMAL[0][1])]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch('os.environ.get', return_value=None)
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
- def test_generate_run_commands_without_serial_env(self, mock_resultargs, mock_mertrics, _):
- """Test generate_run_command method."""
- # Basic Run Cmd
- mock_resultargs.return_value = []
- mock_mertrics.return_value = ''
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {}),
- [RUN_CMD.format(metrics='',
- serial='',
- tf_customize_template='')])
- mock_mertrics.return_value = METRICS_DIR
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {}),
- [RUN_CMD.format(metrics=METRICS_DIR_ARG,
- serial='',
- tf_customize_template='')])
- # Run cmd with result server args.
- result_arg = '--result_arg'
- mock_resultargs.return_value = [result_arg]
- mock_mertrics.return_value = ''
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {}),
- [RUN_CMD.format(metrics='',
- serial='',
- tf_customize_template='') + ' ' + result_arg])
-
- @mock.patch('os.environ.get')
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
- def test_generate_run_commands_with_serial_env(self, mock_resultargs, mock_mertrics, mock_env):
- """Test generate_run_command method."""
- # Basic Run Cmd
- env_device_serial = 'env-device-0'
- mock_resultargs.return_value = []
- mock_mertrics.return_value = ''
- mock_env.return_value = env_device_serial
- env_serial_arg = ' --serial %s' % env_device_serial
- # Serial env be set and without --serial arg.
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {}),
- [RUN_CMD.format(metrics='',
- serial=env_serial_arg,
- tf_customize_template='')])
- # Serial env be set but with --serial arg.
- arg_device_serial = 'arg-device-0'
- arg_serial_arg = ' --serial %s' % arg_device_serial
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {constants.SERIAL:arg_device_serial}),
- [RUN_CMD.format(metrics='',
- serial=arg_serial_arg,
- tf_customize_template='')])
- # Serial env be set but with -n arg
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], {constants.HOST: True}),
- [RUN_CMD.format(metrics='',
- serial='',
- tf_customize_template='') +
- ' -n --prioritize-host-config --skip-host-arch-check'])
-
-
- def test_flatten_test_filters(self):
- """Test _flatten_test_filters method."""
- # No Flattening
- filters = self.tr._flatten_test_filters({uc.CLASS_FILTER})
- unittest_utils.assert_strict_equal(self, frozenset([uc.CLASS_FILTER]),
- filters)
- filters = self.tr._flatten_test_filters({CLASS2_FILTER})
- unittest_utils.assert_strict_equal(
- self, frozenset([CLASS2_FILTER]), filters)
- filters = self.tr._flatten_test_filters({uc.METHOD_FILTER})
- unittest_utils.assert_strict_equal(
- self, frozenset([uc.METHOD_FILTER]), filters)
- filters = self.tr._flatten_test_filters({uc.METHOD_FILTER,
- CLASS2_METHOD_FILTER})
- unittest_utils.assert_strict_equal(
- self, frozenset([uc.METHOD_FILTER, CLASS2_METHOD_FILTER]), filters)
- # Flattening
- filters = self.tr._flatten_test_filters({uc.METHOD_FILTER,
- METHOD2_FILTER})
- unittest_utils.assert_strict_equal(
- self, filters, frozenset([uc.FLAT_METHOD_FILTER]))
- filters = self.tr._flatten_test_filters({uc.METHOD_FILTER,
- METHOD2_FILTER,
- CLASS2_METHOD_FILTER,})
- unittest_utils.assert_strict_equal(
- self, filters, frozenset([uc.FLAT_METHOD_FILTER,
- CLASS2_METHOD_FILTER]))
-
- def test_flatten_test_infos(self):
- """Test _flatten_test_infos method."""
- # No Flattening
- test_infos = self.tr._flatten_test_infos({uc.MODULE_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {uc.MODULE_INFO})
-
- test_infos = self.tr._flatten_test_infos([uc.MODULE_INFO, MODULE2_INFO])
- unittest_utils.assert_equal_testinfo_sets(
- self, test_infos, {uc.MODULE_INFO, MODULE2_INFO})
-
- test_infos = self.tr._flatten_test_infos({CLASS1_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {CLASS1_INFO})
-
- test_infos = self.tr._flatten_test_infos({uc.INT_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {uc.INT_INFO})
-
- test_infos = self.tr._flatten_test_infos({uc.METHOD_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {uc.METHOD_INFO})
-
- # Flattening
- test_infos = self.tr._flatten_test_infos({CLASS1_INFO, CLASS2_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {FLAT_CLASS_INFO})
-
- test_infos = self.tr._flatten_test_infos({CLASS1_INFO, uc.INT_INFO,
- CLASS2_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {uc.INT_INFO,
- FLAT_CLASS_INFO})
-
- test_infos = self.tr._flatten_test_infos({CLASS1_INFO, uc.MODULE_INFO,
- CLASS2_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {CLASS1_CLASS2_MODULE_INFO})
-
- test_infos = self.tr._flatten_test_infos({MODULE2_INFO, uc.INT_INFO,
- CLASS1_INFO, CLASS2_INFO,
- uc.GTF_INT_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {uc.INT_INFO, uc.GTF_INT_INFO,
- FLAT_CLASS_INFO,
- MODULE2_INFO})
-
- test_infos = self.tr._flatten_test_infos({uc.METHOD_INFO,
- CLASS2_METHOD_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {METHOD_AND_CLASS2_METHOD})
-
- test_infos = self.tr._flatten_test_infos({uc.METHOD_INFO, METHOD2_INFO,
- CLASS2_METHOD_INFO})
- unittest_utils.assert_equal_testinfo_sets(
- self, test_infos, {METHOD_METHOD2_AND_CLASS2_METHOD})
- test_infos = self.tr._flatten_test_infos({uc.METHOD_INFO, METHOD2_INFO,
- CLASS2_METHOD_INFO,
- MODULE2_INFO,
- uc.INT_INFO})
- unittest_utils.assert_equal_testinfo_sets(
- self, test_infos, {uc.INT_INFO, MODULE2_INFO,
- METHOD_METHOD2_AND_CLASS2_METHOD})
-
- test_infos = self.tr._flatten_test_infos({CLASS3_INFO, CLASS4_INFO})
- unittest_utils.assert_equal_testinfo_sets(self, test_infos,
- {FLAT2_CLASS_INFO})
-
- def test_create_test_args(self):
- """Test _create_test_args method."""
- # Only compile '--skip-loading-config-jar' in TF if it's not
- # INTEGRATION finder or the finder property isn't set.
- args = self.tr._create_test_args([MOD_INFO])
- self.assertTrue(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
- args = self.tr._create_test_args([INT_INFO])
- self.assertFalse(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
- args = self.tr._create_test_args([MOD_INFO_NO_TEST_FINDER])
- self.assertFalse(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
- args = self.tr._create_test_args([MOD_INFO_NO_TEST_FINDER, INT_INFO])
- self.assertFalse(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
- args = self.tr._create_test_args([MOD_INFO_NO_TEST_FINDER])
- self.assertFalse(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
- args = self.tr._create_test_args([MOD_INFO_NO_TEST_FINDER, INT_INFO, MOD_INFO])
- self.assertFalse(constants.TF_SKIP_LOADING_CONFIG_JAR in args)
-
-
- @mock.patch('os.environ.get', return_value=None)
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
- def test_generate_run_commands_with_tf_template(self, mock_resultargs, mock_mertrics, _):
- """Test generate_run_command method."""
- tf_tmplate_key1 = 'tf_tmplate_key1'
- tf_tmplate_val1 = 'tf_tmplate_val1'
- tf_tmplate_key2 = 'tf_tmplate_key2'
- tf_tmplate_val2 = 'tf_tmplate_val2'
- # Testing with only one tradefed template command
- mock_resultargs.return_value = []
- mock_mertrics.return_value = ''
- extra_args = {constants.TF_TEMPLATE:
- ['{}={}'.format(tf_tmplate_key1,
- tf_tmplate_val1)]}
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], extra_args),
- [RUN_CMD.format(
- metrics='',
- serial='',
- tf_customize_template=
- '--template:map {}={} ').format(tf_tmplate_key1,
- tf_tmplate_val1)])
- # Testing with two tradefed template commands
- extra_args = {constants.TF_TEMPLATE:
- ['{}={}'.format(tf_tmplate_key1,
- tf_tmplate_val1),
- '{}={}'.format(tf_tmplate_key2,
- tf_tmplate_val2)]}
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], extra_args),
- [RUN_CMD.format(
- metrics='',
- serial='',
- tf_customize_template=
- '--template:map {}={} --template:map {}={} ').format(
- tf_tmplate_key1,
- tf_tmplate_val1,
- tf_tmplate_key2,
- tf_tmplate_val2)])
-
- @mock.patch('os.environ.get', return_value=None)
- @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
- def test_generate_run_commands_collect_tests_only(self,
- mock_resultargs,
- mock_mertrics, _):
- """Test generate_run_command method."""
- # Testing without collect-tests-only
- mock_resultargs.return_value = []
- mock_mertrics.return_value = ''
- extra_args = {}
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], extra_args),
- [RUN_CMD.format(
- metrics='',
- serial='',
- tf_customize_template='')])
- # Testing with collect-tests-only
- mock_resultargs.return_value = []
- mock_mertrics.return_value = ''
- extra_args = {constants.COLLECT_TESTS_ONLY: True}
- unittest_utils.assert_strict_equal(
- self,
- self.tr.generate_run_commands([], extra_args),
- [RUN_CMD.format(
- metrics='',
- serial=' --collect-tests-only',
- tf_customize_template='')])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runners/event_handler.py b/atest-py2/test_runners/event_handler.py
deleted file mode 100644
index efe0236..0000000
--- a/atest-py2/test_runners/event_handler.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Atest test event handler class.
-"""
-
-from __future__ import print_function
-from collections import deque
-from datetime import timedelta
-import time
-import logging
-
-import atest_execution_info
-
-from test_runners import test_runner_base
-
-
-EVENT_NAMES = {'module_started': 'TEST_MODULE_STARTED',
- 'module_ended': 'TEST_MODULE_ENDED',
- 'run_started': 'TEST_RUN_STARTED',
- 'run_ended': 'TEST_RUN_ENDED',
- # Next three are test-level events
- 'test_started': 'TEST_STARTED',
- 'test_failed': 'TEST_FAILED',
- 'test_ended': 'TEST_ENDED',
- # Last two failures are runner-level, not test-level.
- # Invocation failure is broader than run failure.
- 'run_failed': 'TEST_RUN_FAILED',
- 'invocation_failed': 'INVOCATION_FAILED',
- 'test_ignored': 'TEST_IGNORED',
- 'test_assumption_failure': 'TEST_ASSUMPTION_FAILURE',
- 'log_association': 'LOG_ASSOCIATION'}
-
-EVENT_PAIRS = {EVENT_NAMES['module_started']: EVENT_NAMES['module_ended'],
- EVENT_NAMES['run_started']: EVENT_NAMES['run_ended'],
- EVENT_NAMES['test_started']: EVENT_NAMES['test_ended']}
-START_EVENTS = list(EVENT_PAIRS.keys())
-END_EVENTS = list(EVENT_PAIRS.values())
-TEST_NAME_TEMPLATE = '%s#%s'
-EVENTS_NOT_BALANCED = ('Error: Saw %s Start event and %s End event. These '
- 'should be equal!')
-
-# time in millisecond.
-ONE_SECOND = 1000
-ONE_MINUTE = 60000
-ONE_HOUR = 3600000
-
-CONNECTION_STATE = {
- 'current_test': None,
- 'test_run_name': None,
- 'last_failed': None,
- 'last_ignored': None,
- 'last_assumption_failed': None,
- 'current_group': None,
- 'current_group_total': None,
- 'test_count': 0,
- 'test_start_time': None}
-
-class EventHandleError(Exception):
- """Raised when handle event error."""
-
-class EventHandler(object):
- """Test Event handle class."""
-
- def __init__(self, reporter, name):
- self.reporter = reporter
- self.runner_name = name
- self.state = CONNECTION_STATE.copy()
- self.event_stack = deque()
-
- def _module_started(self, event_data):
- if atest_execution_info.PREPARE_END_TIME is None:
- atest_execution_info.PREPARE_END_TIME = time.time()
- self.state['current_group'] = event_data['moduleName']
- self.state['last_failed'] = None
- self.state['current_test'] = None
-
- def _run_started(self, event_data):
- # Technically there can be more than one run per module.
- self.state['test_run_name'] = event_data.setdefault('runName', '')
- self.state['current_group_total'] = event_data['testCount']
- self.state['test_count'] = 0
- self.state['last_failed'] = None
- self.state['current_test'] = None
-
- def _test_started(self, event_data):
- name = TEST_NAME_TEMPLATE % (event_data['className'],
- event_data['testName'])
- self.state['current_test'] = name
- self.state['test_count'] += 1
- self.state['test_start_time'] = event_data['start_time']
-
- def _test_failed(self, event_data):
- self.state['last_failed'] = {'name': TEST_NAME_TEMPLATE % (
- event_data['className'],
- event_data['testName']),
- 'trace': event_data['trace']}
-
- def _test_ignored(self, event_data):
- name = TEST_NAME_TEMPLATE % (event_data['className'],
- event_data['testName'])
- self.state['last_ignored'] = name
-
- def _test_assumption_failure(self, event_data):
- name = TEST_NAME_TEMPLATE % (event_data['className'],
- event_data['testName'])
- self.state['last_assumption_failed'] = name
-
- def _run_failed(self, event_data):
- # Module and Test Run probably started, but failure occurred.
- self.reporter.process_test_result(test_runner_base.TestResult(
- runner_name=self.runner_name,
- group_name=self.state['current_group'],
- test_name=self.state['current_test'],
- status=test_runner_base.ERROR_STATUS,
- details=event_data['reason'],
- test_count=self.state['test_count'],
- test_time='',
- runner_total=None,
- group_total=self.state['current_group_total'],
- additional_info={},
- test_run_name=self.state['test_run_name']))
-
- def _invocation_failed(self, event_data):
- # Broadest possible failure. May not even start the module/test run.
- self.reporter.process_test_result(test_runner_base.TestResult(
- runner_name=self.runner_name,
- group_name=self.state['current_group'],
- test_name=self.state['current_test'],
- status=test_runner_base.ERROR_STATUS,
- details=event_data['cause'],
- test_count=self.state['test_count'],
- test_time='',
- runner_total=None,
- group_total=self.state['current_group_total'],
- additional_info={},
- test_run_name=self.state['test_run_name']))
-
- def _run_ended(self, event_data):
- pass
-
- def _module_ended(self, event_data):
- pass
-
- def _test_ended(self, event_data):
- name = TEST_NAME_TEMPLATE % (event_data['className'],
- event_data['testName'])
- test_time = ''
- if self.state['test_start_time']:
- test_time = self._calc_duration(event_data['end_time'] -
- self.state['test_start_time'])
- if self.state['last_failed'] and name == self.state['last_failed']['name']:
- status = test_runner_base.FAILED_STATUS
- trace = self.state['last_failed']['trace']
- self.state['last_failed'] = None
- elif (self.state['last_assumption_failed'] and
- name == self.state['last_assumption_failed']):
- status = test_runner_base.ASSUMPTION_FAILED
- self.state['last_assumption_failed'] = None
- trace = None
- elif self.state['last_ignored'] and name == self.state['last_ignored']:
- status = test_runner_base.IGNORED_STATUS
- self.state['last_ignored'] = None
- trace = None
- else:
- status = test_runner_base.PASSED_STATUS
- trace = None
-
- default_event_keys = ['className', 'end_time', 'testName']
- additional_info = {}
- for event_key in event_data.keys():
- if event_key not in default_event_keys:
- additional_info[event_key] = event_data.get(event_key, None)
-
- self.reporter.process_test_result(test_runner_base.TestResult(
- runner_name=self.runner_name,
- group_name=self.state['current_group'],
- test_name=name,
- status=status,
- details=trace,
- test_count=self.state['test_count'],
- test_time=test_time,
- runner_total=None,
- additional_info=additional_info,
- group_total=self.state['current_group_total'],
- test_run_name=self.state['test_run_name']))
-
- def _log_association(self, event_data):
- pass
-
- switch_handler = {EVENT_NAMES['module_started']: _module_started,
- EVENT_NAMES['run_started']: _run_started,
- EVENT_NAMES['test_started']: _test_started,
- EVENT_NAMES['test_failed']: _test_failed,
- EVENT_NAMES['test_ignored']: _test_ignored,
- EVENT_NAMES['test_assumption_failure']: _test_assumption_failure,
- EVENT_NAMES['run_failed']: _run_failed,
- EVENT_NAMES['invocation_failed']: _invocation_failed,
- EVENT_NAMES['test_ended']: _test_ended,
- EVENT_NAMES['run_ended']: _run_ended,
- EVENT_NAMES['module_ended']: _module_ended,
- EVENT_NAMES['log_association']: _log_association}
-
- def process_event(self, event_name, event_data):
- """Process the events of the test run and call reporter with results.
-
- Args:
- event_name: A string of the event name.
- event_data: A dict of event data.
- """
- logging.debug('Processing %s %s', event_name, event_data)
- if event_name in START_EVENTS:
- self.event_stack.append(event_name)
- elif event_name in END_EVENTS:
- self._check_events_are_balanced(event_name, self.reporter)
- if self.switch_handler.has_key(event_name):
- self.switch_handler[event_name](self, event_data)
- else:
- # TODO(b/128875503): Implement the mechanism to inform not handled TF event.
- logging.debug('Event[%s] is not processable.', event_name)
-
- def _check_events_are_balanced(self, event_name, reporter):
- """Check Start events and End events. They should be balanced.
-
- If they are not balanced, print the error message in
- state['last_failed'], then raise TradeFedExitError.
-
- Args:
- event_name: A string of the event name.
- reporter: A ResultReporter instance.
- Raises:
- TradeFedExitError if we doesn't have a balance of START/END events.
- """
- start_event = self.event_stack.pop() if self.event_stack else None
- if not start_event or EVENT_PAIRS[start_event] != event_name:
- # Here bubble up the failed trace in the situation having
- # TEST_FAILED but never receiving TEST_ENDED.
- if self.state['last_failed'] and (start_event ==
- EVENT_NAMES['test_started']):
- reporter.process_test_result(test_runner_base.TestResult(
- runner_name=self.runner_name,
- group_name=self.state['current_group'],
- test_name=self.state['last_failed']['name'],
- status=test_runner_base.FAILED_STATUS,
- details=self.state['last_failed']['trace'],
- test_count=self.state['test_count'],
- test_time='',
- runner_total=None,
- group_total=self.state['current_group_total'],
- additional_info={},
- test_run_name=self.state['test_run_name']))
- raise EventHandleError(EVENTS_NOT_BALANCED % (start_event,
- event_name))
-
- @staticmethod
- def _calc_duration(duration):
- """Convert duration from ms to 3h2m43.034s.
-
- Args:
- duration: millisecond
-
- Returns:
- string in h:m:s, m:s, s or millis, depends on the duration.
- """
- delta = timedelta(milliseconds=duration)
- timestamp = str(delta).split(':') # hh:mm:microsec
-
- if duration < ONE_SECOND:
- return "({}ms)".format(duration)
- elif duration < ONE_MINUTE:
- return "({:.3f}s)".format(float(timestamp[2]))
- elif duration < ONE_HOUR:
- return "({0}m{1:.3f}s)".format(timestamp[1], float(timestamp[2]))
- return "({0}h{1}m{2:.3f}s)".format(timestamp[0],
- timestamp[1], float(timestamp[2]))
diff --git a/atest-py2/test_runners/event_handler_unittest.py b/atest-py2/test_runners/event_handler_unittest.py
deleted file mode 100755
index 09069b2..0000000
--- a/atest-py2/test_runners/event_handler_unittest.py
+++ /dev/null
@@ -1,348 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for event_handler."""
-
-import unittest
-import mock
-
-import atest_tf_test_runner as atf_tr
-import event_handler as e_h
-from test_runners import test_runner_base
-
-
-EVENTS_NORMAL = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2, 'runName': 'com.android.UnitTests'}),
- ('TEST_STARTED', {'start_time':52, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':1048, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':48, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_FAILED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
- ('TEST_ENDED', {'end_time':9876450, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_RUN_ENDED', {}),
- ('TEST_MODULE_ENDED', {'foo': 'bar'}),
-]
-
-EVENTS_RUN_FAILURE = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName': 'serial-util11462169742772610436.ser',
- 'moduleName': 'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2, 'runName': 'com.android.UnitTests'}),
- ('TEST_STARTED', {'start_time':10, 'className': 'someClassName',
- 'testName':'someTestName'}),
- ('TEST_RUN_FAILED', {'reason': 'someRunFailureReason'})
-]
-
-
-EVENTS_INVOCATION_FAILURE = [
- ('TEST_RUN_STARTED', {'testCount': None, 'runName': 'com.android.UnitTests'}),
- ('INVOCATION_FAILED', {'cause': 'someInvocationFailureReason'})
-]
-
-EVENTS_MISSING_TEST_RUN_STARTED_EVENT = [
- ('TEST_STARTED', {'start_time':52, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':1048, 'className':'someClassName',
- 'testName':'someTestName'}),
-]
-
-EVENTS_NOT_BALANCED_BEFORE_RAISE = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2, 'runName': 'com.android.UnitTests'}),
- ('TEST_STARTED', {'start_time':10, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':18, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':19, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_FAILED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
-]
-
-EVENTS_IGNORE = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2, 'runName': 'com.android.UnitTests'}),
- ('TEST_STARTED', {'start_time':8, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':18, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':28, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_IGNORED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
- ('TEST_ENDED', {'end_time':90, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_RUN_ENDED', {}),
- ('TEST_MODULE_ENDED', {'foo': 'bar'}),
-]
-
-EVENTS_WITH_PERF_INFO = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2, 'runName': 'com.android.UnitTests'}),
- ('TEST_STARTED', {'start_time':52, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':1048, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':48, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_FAILED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
- ('TEST_ENDED', {'end_time':9876450, 'className':'someClassName2',
- 'testName':'someTestName2', 'cpu_time':'1234.1234(ns)',
- 'real_time':'5678.5678(ns)', 'iterations':'6666'}),
- ('TEST_STARTED', {'start_time':10, 'className':'someClassName3',
- 'testName':'someTestName3'}),
- ('TEST_ENDED', {'end_time':70, 'className':'someClassName3',
- 'testName':'someTestName3', 'additional_info_min':'102773',
- 'additional_info_mean':'105973', 'additional_info_median':'103778'}),
- ('TEST_RUN_ENDED', {}),
- ('TEST_MODULE_ENDED', {'foo': 'bar'}),
-]
-
-class EventHandlerUnittests(unittest.TestCase):
- """Unit tests for event_handler.py"""
-
- def setUp(self):
- reload(e_h)
- self.mock_reporter = mock.Mock()
- self.fake_eh = e_h.EventHandler(self.mock_reporter,
- atf_tr.AtestTradefedTestRunner.NAME)
-
- def tearDown(self):
- mock.patch.stopall()
-
- def test_process_event_normal_results(self):
- """Test process_event method for normal test results."""
- for name, data in EVENTS_NORMAL:
- self.fake_eh.process_event(name, data)
- call1 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(996ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- call2 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName2#someTestName2',
- status=test_runner_base.FAILED_STATUS,
- details='someTrace',
- test_count=2,
- test_time='(2h44m36.402s)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call1, call2])
-
- def test_process_event_run_failure(self):
- """Test process_event method run failure."""
- for name, data in EVENTS_RUN_FAILURE:
- self.fake_eh.process_event(name, data)
- call = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.ERROR_STATUS,
- details='someRunFailureReason',
- test_count=1,
- test_time='',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call])
-
- def test_process_event_invocation_failure(self):
- """Test process_event method with invocation failure."""
- for name, data in EVENTS_INVOCATION_FAILURE:
- self.fake_eh.process_event(name, data)
- call = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name=None,
- test_name=None,
- status=test_runner_base.ERROR_STATUS,
- details='someInvocationFailureReason',
- test_count=0,
- test_time='',
- runner_total=None,
- group_total=None,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call])
-
- def test_process_event_missing_test_run_started_event(self):
- """Test process_event method for normal test results."""
- for name, data in EVENTS_MISSING_TEST_RUN_STARTED_EVENT:
- self.fake_eh.process_event(name, data)
- call = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name=None,
- test_name='someClassName#someTestName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(996ms)',
- runner_total=None,
- group_total=None,
- additional_info={},
- test_run_name=None
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call])
-
- # pylint: disable=protected-access
- def test_process_event_not_balanced(self):
- """Test process_event method with start/end event name not balanced."""
- for name, data in EVENTS_NOT_BALANCED_BEFORE_RAISE:
- self.fake_eh.process_event(name, data)
- call = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(8ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call])
- # Event pair: TEST_STARTED -> TEST_RUN_ENDED
- # It should raise TradeFedExitError in _check_events_are_balanced()
- name = 'TEST_RUN_ENDED'
- data = {}
- self.assertRaises(e_h.EventHandleError,
- self.fake_eh._check_events_are_balanced,
- name, self.mock_reporter)
- # Event pair: TEST_RUN_STARTED -> TEST_MODULE_ENDED
- # It should raise TradeFedExitError in _check_events_are_balanced()
- name = 'TEST_MODULE_ENDED'
- data = {'foo': 'bar'}
- self.assertRaises(e_h.EventHandleError,
- self.fake_eh._check_events_are_balanced,
- name, self.mock_reporter)
-
- def test_process_event_ignore(self):
- """Test _process_event method for normal test results."""
- for name, data in EVENTS_IGNORE:
- self.fake_eh.process_event(name, data)
- call1 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(10ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- call2 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName2#someTestName2',
- status=test_runner_base.IGNORED_STATUS,
- details=None,
- test_count=2,
- test_time='(62ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call1, call2])
-
- def test_process_event_with_additional_info(self):
- """Test process_event method with perf information."""
- for name, data in EVENTS_WITH_PERF_INFO:
- self.fake_eh.process_event(name, data)
- call1 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName#someTestName',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=1,
- test_time='(996ms)',
- runner_total=None,
- group_total=2,
- additional_info={},
- test_run_name='com.android.UnitTests'
- ))
-
- test_additional_info = {'cpu_time':'1234.1234(ns)', 'real_time':'5678.5678(ns)',
- 'iterations':'6666'}
- call2 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName2#someTestName2',
- status=test_runner_base.FAILED_STATUS,
- details='someTrace',
- test_count=2,
- test_time='(2h44m36.402s)',
- runner_total=None,
- group_total=2,
- additional_info=test_additional_info,
- test_run_name='com.android.UnitTests'
- ))
-
- test_additional_info2 = {'additional_info_min':'102773',
- 'additional_info_mean':'105973',
- 'additional_info_median':'103778'}
- call3 = mock.call(test_runner_base.TestResult(
- runner_name=atf_tr.AtestTradefedTestRunner.NAME,
- group_name='someTestModule',
- test_name='someClassName3#someTestName3',
- status=test_runner_base.PASSED_STATUS,
- details=None,
- test_count=3,
- test_time='(60ms)',
- runner_total=None,
- group_total=2,
- additional_info=test_additional_info2,
- test_run_name='com.android.UnitTests'
- ))
- self.mock_reporter.process_test_result.assert_has_calls([call1, call2, call3])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runners/example_test_runner.py b/atest-py2/test_runners/example_test_runner.py
deleted file mode 100644
index dc18112..0000000
--- a/atest-py2/test_runners/example_test_runner.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Example test runner class.
-"""
-
-# pylint: disable=import-error
-import test_runner_base
-
-
-class ExampleTestRunner(test_runner_base.TestRunnerBase):
- """Base Test Runner class."""
- NAME = 'ExampleTestRunner'
- EXECUTABLE = 'echo'
- _RUN_CMD = '{exe} ExampleTestRunner - test:{test}'
- _BUILD_REQ = set()
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter
- """
- run_cmds = self.generate_run_commands(test_infos, extra_args)
- for run_cmd in run_cmds:
- super(ExampleTestRunner, self).run(run_cmd)
-
- def host_env_check(self):
- """Check that host env has everything we need.
-
- We actually can assume the host env is fine because we have the same
- requirements that atest has. Update this to check for android env vars
- if that changes.
- """
- pass
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- return set()
-
- # pylint: disable=unused-argument
- def generate_run_commands(self, test_infos, extra_args, port=None):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: A set of TestInfo instances.
- extra_args: A Dict of extra args to append.
- port: Optional. An int of the port number to send events to.
- Subprocess reporter in TF won't try to connect if it's None.
-
- Returns:
- A list of run commands to run the tests.
- """
- run_cmds = []
- for test_info in test_infos:
- run_cmd_dict = {'exe': self.EXECUTABLE,
- 'test': test_info.test_name}
- run_cmds.extend(self._RUN_CMD.format(**run_cmd_dict))
- return run_cmds
diff --git a/atest-py2/test_runners/regression_test_runner.py b/atest-py2/test_runners/regression_test_runner.py
deleted file mode 100644
index 078040a..0000000
--- a/atest-py2/test_runners/regression_test_runner.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Regression Detection test runner class.
-"""
-
-# pylint: disable=import-error
-import constants
-from test_runners import test_runner_base
-
-
-class RegressionTestRunner(test_runner_base.TestRunnerBase):
- """Regression Test Runner class."""
- NAME = 'RegressionTestRunner'
- EXECUTABLE = 'tradefed.sh'
- _RUN_CMD = '{exe} run commandAndExit regression -n {args}'
- _BUILD_REQ = {'tradefed-core', constants.ATEST_TF_MODULE}
-
- def __init__(self, results_dir):
- """Init stuff for base class."""
- super(RegressionTestRunner, self).__init__(results_dir)
- self.run_cmd_dict = {'exe': self.EXECUTABLE,
- 'args': ''}
-
- # pylint: disable=unused-argument
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of args to add to regression detection test run.
- reporter: A ResultReporter instance.
-
- Returns:
- Return code of the process for running tests.
- """
- run_cmds = self.generate_run_commands(test_infos, extra_args)
- proc = super(RegressionTestRunner, self).run(run_cmds[0],
- output_to_stdout=True)
- proc.wait()
- return proc.returncode
-
- def host_env_check(self):
- """Check that host env has everything we need.
-
- We actually can assume the host env is fine because we have the same
- requirements that atest has. Update this to check for android env vars
- if that changes.
- """
- pass
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- return self._BUILD_REQ
-
- # pylint: disable=unused-argument
- def generate_run_commands(self, test_infos, extra_args, port=None):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: A set of TestInfo instances.
- extra_args: A Dict of extra args to append.
- port: Optional. An int of the port number to send events to.
- Subprocess reporter in TF won't try to connect if it's None.
-
- Returns:
- A list that contains the string of atest tradefed run command.
- Only one command is returned.
- """
- pre = extra_args.pop(constants.PRE_PATCH_FOLDER)
- post = extra_args.pop(constants.POST_PATCH_FOLDER)
- args = ['--pre-patch-metrics', pre, '--post-patch-metrics', post]
- self.run_cmd_dict['args'] = ' '.join(args)
- run_cmd = self._RUN_CMD.format(**self.run_cmd_dict)
- return [run_cmd]
diff --git a/atest-py2/test_runners/robolectric_test_runner.py b/atest-py2/test_runners/robolectric_test_runner.py
deleted file mode 100644
index fa34149..0000000
--- a/atest-py2/test_runners/robolectric_test_runner.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Robolectric test runner class.
-
-This test runner will be short lived, once robolectric support v2 is in, then
-robolectric tests will be invoked through AtestTFTestRunner.
-"""
-
-import json
-import logging
-import os
-import re
-import tempfile
-import time
-
-from functools import partial
-
-# pylint: disable=import-error
-import atest_utils
-import constants
-
-from event_handler import EventHandler
-from test_runners import test_runner_base
-
-POLL_FREQ_SECS = 0.1
-# A pattern to match event like below
-#TEST_FAILED {'className':'SomeClass', 'testName':'SomeTestName',
-# 'trace':'{"trace":"AssertionError: <true> is equal to <false>\n
-# at FailureStrategy.fail(FailureStrategy.java:24)\n
-# at FailureStrategy.fail(FailureStrategy.java:20)\n"}\n\n
-EVENT_RE = re.compile(r'^(?P<event_name>[A-Z_]+) (?P<json_data>{(.\r*|\n)*})(?:\n|$)')
-
-
-class RobolectricTestRunner(test_runner_base.TestRunnerBase):
- """Robolectric Test Runner class."""
- NAME = 'RobolectricTestRunner'
- # We don't actually use EXECUTABLE because we're going to use
- # atest_utils.build to kick off the test but if we don't set it, the base
- # class will raise an exception.
- EXECUTABLE = 'make'
-
- # pylint: disable=useless-super-delegation
- def __init__(self, results_dir, **kwargs):
- """Init stuff for robolectric runner class."""
- super(RobolectricTestRunner, self).__init__(results_dir, **kwargs)
- self.is_verbose = logging.getLogger().isEnabledFor(logging.DEBUG)
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos. See base class for more.
-
- Args:
- test_infos: A list of TestInfos.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- if os.getenv(test_runner_base.OLD_OUTPUT_ENV_VAR):
- return self.run_tests_raw(test_infos, extra_args, reporter)
- return self.run_tests_pretty(test_infos, extra_args, reporter)
-
- def run_tests_raw(self, test_infos, extra_args, reporter):
- """Run the list of test_infos with raw output.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: A ResultReporter Instance.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- reporter.register_unsupported_runner(self.NAME)
- ret_code = constants.EXIT_CODE_SUCCESS
- for test_info in test_infos:
- full_env_vars = self._get_full_build_environ(test_info,
- extra_args)
- run_cmd = self.generate_run_commands([test_info], extra_args)[0]
- subproc = self.run(run_cmd,
- output_to_stdout=self.is_verbose,
- env_vars=full_env_vars)
- ret_code |= self.wait_for_subprocess(subproc)
- return ret_code
-
- def run_tests_pretty(self, test_infos, extra_args, reporter):
- """Run the list of test_infos with pretty output mode.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: A ResultReporter Instance.
-
- Returns:
- 0 if tests succeed, non-zero otherwise.
- """
- ret_code = constants.EXIT_CODE_SUCCESS
- for test_info in test_infos:
- # Create a temp communication file.
- with tempfile.NamedTemporaryFile(mode='w+r',
- dir=self.results_dir) as event_file:
- # Prepare build environment parameter.
- full_env_vars = self._get_full_build_environ(test_info,
- extra_args,
- event_file)
- run_cmd = self.generate_run_commands([test_info], extra_args)[0]
- subproc = self.run(run_cmd,
- output_to_stdout=self.is_verbose,
- env_vars=full_env_vars)
- event_handler = EventHandler(reporter, self.NAME)
- # Start polling.
- self.handle_subprocess(subproc, partial(self._exec_with_robo_polling,
- event_file,
- subproc,
- event_handler))
- ret_code |= self.wait_for_subprocess(subproc)
- return ret_code
-
- def _get_full_build_environ(self, test_info=None, extra_args=None, event_file=None):
- """Helper to get full build environment.
-
- Args:
- test_info: TestInfo object.
- extra_args: Dict of extra args to add to test run.
- event_file: A file-like object that can be used as a temporary storage area.
- """
- full_env_vars = os.environ.copy()
- env_vars = self.generate_env_vars(test_info,
- extra_args,
- event_file)
- full_env_vars.update(env_vars)
- return full_env_vars
-
- def _exec_with_robo_polling(self, communication_file, robo_proc, event_handler):
- """Polling data from communication file
-
- Polling data from communication file. Exit when communication file
- is empty and subprocess ended.
-
- Args:
- communication_file: A monitored communication file.
- robo_proc: The build process.
- event_handler: A file-like object storing the events of robolectric tests.
- """
- buf = ''
- while True:
- # Make sure that ATest gets content from current position.
- communication_file.seek(0, 1)
- data = communication_file.read()
- buf += data
- reg = re.compile(r'(.|\n)*}\n\n')
- if not reg.match(buf) or data == '':
- if robo_proc.poll() is not None:
- logging.debug('Build process exited early')
- return
- time.sleep(POLL_FREQ_SECS)
- else:
- # Read all new data and handle it at one time.
- for event in re.split(r'\n\n', buf):
- match = EVENT_RE.match(event)
- if match:
- try:
- event_data = json.loads(match.group('json_data'),
- strict=False)
- except ValueError:
- # Parse event fail, continue to parse next one.
- logging.debug('"%s" is not valid json format.',
- match.group('json_data'))
- continue
- event_name = match.group('event_name')
- event_handler.process_event(event_name, event_data)
- buf = ''
-
- @staticmethod
- def generate_env_vars(test_info, extra_args, event_file=None):
- """Turn the args into env vars.
-
- Robolectric tests specify args through env vars, so look for class
- filters and debug args to apply to the env.
-
- Args:
- test_info: TestInfo class that holds the class filter info.
- extra_args: Dict of extra args to apply for test run.
- event_file: A file-like object storing the events of robolectric tests.
-
- Returns:
- Dict of env vars to pass into invocation.
- """
- env_var = {}
- for arg in extra_args:
- if constants.WAIT_FOR_DEBUGGER == arg:
- env_var['DEBUG_ROBOLECTRIC'] = 'true'
- continue
- filters = test_info.data.get(constants.TI_FILTER)
- if filters:
- robo_filter = next(iter(filters))
- env_var['ROBOTEST_FILTER'] = robo_filter.class_name
- if robo_filter.methods:
- logging.debug('method filtering not supported for robolectric '
- 'tests yet.')
- if event_file:
- env_var['EVENT_FILE_ROBOLECTRIC'] = event_file.name
- return env_var
-
- def host_env_check(self):
- """Check that host env has everything we need.
-
- We actually can assume the host env is fine because we have the same
- requirements that atest has. Update this to check for android env vars
- if that changes.
- """
- pass
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- return set()
-
- # pylint: disable=unused-argument
- def generate_run_commands(self, test_infos, extra_args, port=None):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: A set of TestInfo instances.
- extra_args: A Dict of extra args to append.
- port: Optional. An int of the port number to send events to.
- Subprocess reporter in TF won't try to connect if it's None.
-
- Returns:
- A list of run commands to run the tests.
- """
- run_cmds = []
- for test_info in test_infos:
- robo_command = atest_utils.get_build_cmd() + [str(test_info.test_name)]
- run_cmd = ' '.join(x for x in robo_command)
- if constants.DRY_RUN in extra_args:
- run_cmd = run_cmd.replace(
- os.environ.get(constants.ANDROID_BUILD_TOP) + os.sep, '')
- run_cmds.append(run_cmd)
- return run_cmds
diff --git a/atest-py2/test_runners/robolectric_test_runner_unittest.py b/atest-py2/test_runners/robolectric_test_runner_unittest.py
deleted file mode 100755
index 46164f0..0000000
--- a/atest-py2/test_runners/robolectric_test_runner_unittest.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Unittests for robolectric_test_runner."""
-
-import json
-import unittest
-import subprocess
-import tempfile
-import mock
-
-import event_handler
-# pylint: disable=import-error
-from test_finders import test_info
-from test_runners import robolectric_test_runner
-
-# pylint: disable=protected-access
-class RobolectricTestRunnerUnittests(unittest.TestCase):
- """Unit tests for robolectric_test_runner.py"""
-
- def setUp(self):
- self.polling_time = robolectric_test_runner.POLL_FREQ_SECS
- self.suite_tr = robolectric_test_runner.RobolectricTestRunner(results_dir='')
-
- def tearDown(self):
- mock.patch.stopall()
-
- @mock.patch.object(robolectric_test_runner.RobolectricTestRunner, 'run')
- def test_run_tests_raw(self, mock_run):
- """Test run_tests_raw method."""
- test_infos = [test_info.TestInfo("Robo1",
- "RobolectricTestRunner",
- ["RoboTest"])]
- extra_args = []
- mock_subproc = mock.Mock()
- mock_run.return_value = mock_subproc
- mock_subproc.returncode = 0
- mock_reporter = mock.Mock()
- # Test Build Pass
- self.assertEqual(
- 0,
- self.suite_tr.run_tests_raw(test_infos, extra_args, mock_reporter))
- # Test Build Fail
- mock_subproc.returncode = 1
- self.assertNotEqual(
- 0,
- self.suite_tr.run_tests_raw(test_infos, extra_args, mock_reporter))
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_exec_with_robo_polling_complete_information(self, mock_pe):
- """Test _exec_with_robo_polling method."""
- event_name = 'TEST_STARTED'
- event_data = {'className':'SomeClass', 'testName':'SomeTestName'}
-
- json_event_data = json.dumps(event_data)
- data = '%s %s\n\n' %(event_name, json_event_data)
- event_file = tempfile.NamedTemporaryFile(mode='w+r', delete=True)
- subprocess.call("echo '%s' -n >> %s" %(data, event_file.name), shell=True)
- robo_proc = subprocess.Popen("sleep %s" %str(self.polling_time * 2), shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(event_name, event_data)]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_exec_with_robo_polling_with_partial_info(self, mock_pe):
- """Test _exec_with_robo_polling method."""
- event_name = 'TEST_STARTED'
- event1 = '{"className":"SomeClass","test'
- event2 = 'Name":"SomeTestName"}\n\n'
- data1 = '%s %s'%(event_name, event1)
- data2 = event2
- event_file = tempfile.NamedTemporaryFile(mode='w+r', delete=True)
- subprocess.Popen("echo -n '%s' >> %s" %(data1, event_file.name), shell=True)
- robo_proc = subprocess.Popen("echo '%s' >> %s && sleep %s"
- %(data2,
- event_file.name,
- str(self.polling_time*5)),
- shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(event_name,
- json.loads(event1 + event2))]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_exec_with_robo_polling_with_fail_stacktrace(self, mock_pe):
- """Test _exec_with_robo_polling method."""
- event_name = 'TEST_FAILED'
- event_data = {'className':'SomeClass', 'testName':'SomeTestName',
- 'trace':'{"trace":"AssertionError: <true> is equal to <false>\n'
- 'at FailureStrategy.fail(FailureStrategy.java:24)\n'
- 'at FailureStrategy.fail(FailureStrategy.java:20)\n'}
- data = '%s %s\n\n'%(event_name, json.dumps(event_data))
- event_file = tempfile.NamedTemporaryFile(mode='w+r', delete=True)
- subprocess.call("echo '%s' -n >> %s" %(data, event_file.name), shell=True)
- robo_proc = subprocess.Popen("sleep %s" %str(self.polling_time * 2), shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(event_name, event_data)]
- mock_pe.assert_has_calls(calls)
-
- @mock.patch.object(event_handler.EventHandler, 'process_event')
- def test_exec_with_robo_polling_with_multi_event(self, mock_pe):
- """Test _exec_with_robo_polling method."""
- event_file = tempfile.NamedTemporaryFile(mode='w+r', delete=True)
- events = [
- ('TEST_MODULE_STARTED', {
- 'moduleContextFileName':'serial-util1146216{974}2772610436.ser',
- 'moduleName':'someTestModule'}),
- ('TEST_RUN_STARTED', {'testCount': 2}),
- ('TEST_STARTED', {'start_time':52, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_ENDED', {'end_time':1048, 'className':'someClassName',
- 'testName':'someTestName'}),
- ('TEST_STARTED', {'start_time':48, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_FAILED', {'className':'someClassName2', 'testName':'someTestName2',
- 'trace': 'someTrace'}),
- ('TEST_ENDED', {'end_time':9876450, 'className':'someClassName2',
- 'testName':'someTestName2'}),
- ('TEST_RUN_ENDED', {}),
- ('TEST_MODULE_ENDED', {'foo': 'bar'}),]
- data = ''
- for event in events:
- data += '%s %s\n\n'%(event[0], json.dumps(event[1]))
-
- subprocess.call("echo '%s' -n >> %s" %(data, event_file.name), shell=True)
- robo_proc = subprocess.Popen("sleep %s" %str(self.polling_time * 2), shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(name, data) for name, data in events]
- mock_pe.assert_has_calls(calls)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runners/suite_plan_test_runner.py b/atest-py2/test_runners/suite_plan_test_runner.py
deleted file mode 100644
index 9ba8233..0000000
--- a/atest-py2/test_runners/suite_plan_test_runner.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-SUITE Tradefed test runner class.
-"""
-
-import copy
-import logging
-
-# pylint: disable=import-error
-from test_runners import atest_tf_test_runner
-import atest_utils
-import constants
-
-
-class SuitePlanTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
- """Suite Plan Test Runner class."""
- NAME = 'SuitePlanTestRunner'
- EXECUTABLE = '%s-tradefed'
- _RUN_CMD = ('{exe} run commandAndExit {test} {args}')
-
- def __init__(self, results_dir, **kwargs):
- """Init stuff for suite tradefed runner class."""
- super(SuitePlanTestRunner, self).__init__(results_dir, **kwargs)
- self.run_cmd_dict = {'exe': '',
- 'test': '',
- 'args': ''}
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- build_req = set()
- build_req |= super(SuitePlanTestRunner,
- self).get_test_runner_build_reqs()
- return build_req
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos.
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- Return code of the process for running tests.
- """
- reporter.register_unsupported_runner(self.NAME)
- run_cmds = self.generate_run_commands(test_infos, extra_args)
- ret_code = constants.EXIT_CODE_SUCCESS
- for run_cmd in run_cmds:
- proc = super(SuitePlanTestRunner, self).run(run_cmd,
- output_to_stdout=True)
- ret_code |= self.wait_for_subprocess(proc)
- return ret_code
-
- def _parse_extra_args(self, extra_args):
- """Convert the extra args into something *ts-tf can understand.
-
- We want to transform the top-level args from atest into specific args
- that *ts-tradefed supports. The only arg we take as is
- EXTRA_ARG since that is what the user intentionally wants to pass to
- the test runner.
-
- Args:
- extra_args: Dict of args
-
- Returns:
- List of args to append.
- """
- args_to_append = []
- args_not_supported = []
- for arg in extra_args:
- if constants.SERIAL == arg:
- args_to_append.append('--serial')
- args_to_append.append(extra_args[arg])
- continue
- if constants.CUSTOM_ARGS == arg:
- args_to_append.extend(extra_args[arg])
- continue
- if constants.DRY_RUN == arg:
- continue
- args_not_supported.append(arg)
- if args_not_supported:
- logging.info('%s does not support the following args: %s',
- self.EXECUTABLE, args_not_supported)
- return args_to_append
-
- # pylint: disable=arguments-differ
- def generate_run_commands(self, test_infos, extra_args):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: List of TestInfo tests to run.
- extra_args: Dict of extra args to add to test run.
-
- Returns:
- A List of strings that contains the run command
- which *ts-tradefed supports.
- """
- cmds = []
- args = []
- args.extend(self._parse_extra_args(extra_args))
- args.extend(atest_utils.get_result_server_args())
- for test_info in test_infos:
- cmd_dict = copy.deepcopy(self.run_cmd_dict)
- cmd_dict['test'] = test_info.test_name
- cmd_dict['args'] = ' '.join(args)
- cmd_dict['exe'] = self.EXECUTABLE % test_info.suite
- cmds.append(self._RUN_CMD.format(**cmd_dict))
- return cmds
diff --git a/atest-py2/test_runners/suite_plan_test_runner_unittest.py b/atest-py2/test_runners/suite_plan_test_runner_unittest.py
deleted file mode 100755
index 857452e..0000000
--- a/atest-py2/test_runners/suite_plan_test_runner_unittest.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Unittests for test_suite_test_runner."""
-
-import unittest
-import mock
-
-# pylint: disable=import-error
-import suite_plan_test_runner
-import unittest_utils
-from test_finders import test_info
-
-
-# pylint: disable=protected-access
-class SuitePlanTestRunnerUnittests(unittest.TestCase):
- """Unit tests for test_suite_test_runner.py"""
-
- def setUp(self):
- self.suite_tr = suite_plan_test_runner.SuitePlanTestRunner(results_dir='')
-
- def tearDown(self):
- mock.patch.stopall()
-
- @mock.patch('atest_utils.get_result_server_args')
- def test_generate_run_commands(self, mock_resultargs):
- """Test _generate_run_command method.
- Strategy:
- suite_name: cts --> run_cmd: cts-tradefed run commandAndExit cts
- suite_name: cts-common --> run_cmd:
- cts-tradefed run commandAndExit cts-common
- """
- test_infos = set()
- suite_name = 'cts'
- t_info = test_info.TestInfo(suite_name,
- suite_plan_test_runner.SuitePlanTestRunner.NAME,
- {suite_name},
- suite=suite_name)
- test_infos.add(t_info)
-
- # Basic Run Cmd
- run_cmd = []
- exe_cmd = suite_plan_test_runner.SuitePlanTestRunner.EXECUTABLE % suite_name
- run_cmd.append(suite_plan_test_runner.SuitePlanTestRunner._RUN_CMD.format(
- exe=exe_cmd,
- test=suite_name,
- args=''))
- mock_resultargs.return_value = []
- unittest_utils.assert_strict_equal(
- self,
- self.suite_tr.generate_run_commands(test_infos, ''),
- run_cmd)
-
- # Run cmd with --serial LG123456789.
- run_cmd = []
- run_cmd.append(suite_plan_test_runner.SuitePlanTestRunner._RUN_CMD.format(
- exe=exe_cmd,
- test=suite_name,
- args='--serial LG123456789'))
- unittest_utils.assert_strict_equal(
- self,
- self.suite_tr.generate_run_commands(test_infos, {'SERIAL':'LG123456789'}),
- run_cmd)
-
- test_infos = set()
- suite_name = 'cts-common'
- suite = 'cts'
- t_info = test_info.TestInfo(suite_name,
- suite_plan_test_runner.SuitePlanTestRunner.NAME,
- {suite_name},
- suite=suite)
- test_infos.add(t_info)
-
- # Basic Run Cmd
- run_cmd = []
- exe_cmd = suite_plan_test_runner.SuitePlanTestRunner.EXECUTABLE % suite
- run_cmd.append(suite_plan_test_runner.SuitePlanTestRunner._RUN_CMD.format(
- exe=exe_cmd,
- test=suite_name,
- args=''))
- mock_resultargs.return_value = []
- unittest_utils.assert_strict_equal(
- self,
- self.suite_tr.generate_run_commands(test_infos, ''),
- run_cmd)
-
- # Run cmd with --serial LG123456789.
- run_cmd = []
- run_cmd.append(suite_plan_test_runner.SuitePlanTestRunner._RUN_CMD.format(
- exe=exe_cmd,
- test=suite_name,
- args='--serial LG123456789'))
- unittest_utils.assert_strict_equal(
- self,
- self.suite_tr.generate_run_commands(test_infos, {'SERIAL':'LG123456789'}),
- run_cmd)
-
- @mock.patch('subprocess.Popen')
- @mock.patch.object(suite_plan_test_runner.SuitePlanTestRunner, 'run')
- @mock.patch.object(suite_plan_test_runner.SuitePlanTestRunner,
- 'generate_run_commands')
- def test_run_tests(self, _mock_gen_cmd, _mock_run, _mock_popen):
- """Test run_tests method."""
- test_infos = []
- extra_args = []
- mock_reporter = mock.Mock()
- _mock_gen_cmd.return_value = ["cmd1", "cmd2"]
- # Test Build Pass
- _mock_popen.return_value.returncode = 0
- self.assertEqual(
- 0,
- self.suite_tr.run_tests(test_infos, extra_args, mock_reporter))
-
- # Test Build Pass
- _mock_popen.return_value.returncode = 1
- self.assertNotEqual(
- 0,
- self.suite_tr.run_tests(test_infos, extra_args, mock_reporter))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/test_runners/test_runner_base.py b/atest-py2/test_runners/test_runner_base.py
deleted file mode 100644
index 22994e3..0000000
--- a/atest-py2/test_runners/test_runner_base.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Base test runner class.
-
-Class that other test runners will instantiate for test runners.
-"""
-
-from __future__ import print_function
-import errno
-import logging
-import signal
-import subprocess
-import tempfile
-import os
-import sys
-
-from collections import namedtuple
-
-# pylint: disable=import-error
-import atest_error
-import atest_utils
-import constants
-
-OLD_OUTPUT_ENV_VAR = 'ATEST_OLD_OUTPUT'
-
-# TestResult contains information of individual tests during a test run.
-TestResult = namedtuple('TestResult', ['runner_name', 'group_name',
- 'test_name', 'status', 'details',
- 'test_count', 'test_time',
- 'runner_total', 'group_total',
- 'additional_info', 'test_run_name'])
-ASSUMPTION_FAILED = 'ASSUMPTION_FAILED'
-FAILED_STATUS = 'FAILED'
-PASSED_STATUS = 'PASSED'
-IGNORED_STATUS = 'IGNORED'
-ERROR_STATUS = 'ERROR'
-
-class TestRunnerBase(object):
- """Base Test Runner class."""
- NAME = ''
- EXECUTABLE = ''
-
- def __init__(self, results_dir, **kwargs):
- """Init stuff for base class."""
- self.results_dir = results_dir
- self.test_log_file = None
- if not self.NAME:
- raise atest_error.NoTestRunnerName('Class var NAME is not defined.')
- if not self.EXECUTABLE:
- raise atest_error.NoTestRunnerExecutable('Class var EXECUTABLE is '
- 'not defined.')
- if kwargs:
- logging.debug('ignoring the following args: %s', kwargs)
-
- def run(self, cmd, output_to_stdout=False, env_vars=None):
- """Shell out and execute command.
-
- Args:
- cmd: A string of the command to execute.
- output_to_stdout: A boolean. If False, the raw output of the run
- command will not be seen in the terminal. This
- is the default behavior, since the test_runner's
- run_tests() method should use atest's
- result reporter to print the test results.
-
- Set to True to see the output of the cmd. This
- would be appropriate for verbose runs.
- env_vars: Environment variables passed to the subprocess.
- """
- if not output_to_stdout:
- self.test_log_file = tempfile.NamedTemporaryFile(mode='w',
- dir=self.results_dir,
- delete=True)
- logging.debug('Executing command: %s', cmd)
- return subprocess.Popen(cmd, preexec_fn=os.setsid, shell=True,
- stderr=subprocess.STDOUT, stdout=self.test_log_file,
- env=env_vars)
-
- # pylint: disable=broad-except
- def handle_subprocess(self, subproc, func):
- """Execute the function. Interrupt the subproc when exception occurs.
-
- Args:
- subproc: A subprocess to be terminated.
- func: A function to be run.
- """
- try:
- signal.signal(signal.SIGINT, self._signal_passer(subproc))
- func()
- except Exception as error:
- # exc_info=1 tells logging to log the stacktrace
- logging.debug('Caught exception:', exc_info=1)
- # Remember our current exception scope, before new try block
- # Python3 will make this easier, the error itself stores
- # the scope via error.__traceback__ and it provides a
- # "raise from error" pattern.
- # https://docs.python.org/3.5/reference/simple_stmts.html#raise
- exc_type, exc_msg, traceback_obj = sys.exc_info()
- # If atest crashes, try to kill subproc group as well.
- try:
- logging.debug('Killing subproc: %s', subproc.pid)
- os.killpg(os.getpgid(subproc.pid), signal.SIGINT)
- except OSError:
- # this wipes our previous stack context, which is why
- # we have to save it above.
- logging.debug('Subproc already terminated, skipping')
- finally:
- if self.test_log_file:
- with open(self.test_log_file.name, 'r') as f:
- intro_msg = "Unexpected Issue. Raw Output:"
- print(atest_utils.colorize(intro_msg, constants.RED))
- print(f.read())
- # Ignore socket.recv() raising due to ctrl-c
- if not error.args or error.args[0] != errno.EINTR:
- raise exc_type, exc_msg, traceback_obj
-
- def wait_for_subprocess(self, proc):
- """Check the process status. Interrupt the TF subporcess if user
- hits Ctrl-C.
-
- Args:
- proc: The tradefed subprocess.
-
- Returns:
- Return code of the subprocess for running tests.
- """
- try:
- logging.debug('Runner Name: %s, Process ID: %s', self.NAME, proc.pid)
- signal.signal(signal.SIGINT, self._signal_passer(proc))
- proc.wait()
- return proc.returncode
- except:
- # If atest crashes, kill TF subproc group as well.
- os.killpg(os.getpgid(proc.pid), signal.SIGINT)
- raise
-
- def _signal_passer(self, proc):
- """Return the signal_handler func bound to proc.
-
- Args:
- proc: The tradefed subprocess.
-
- Returns:
- signal_handler function.
- """
- def signal_handler(_signal_number, _frame):
- """Pass SIGINT to proc.
-
- If user hits ctrl-c during atest run, the TradeFed subprocess
- won't stop unless we also send it a SIGINT. The TradeFed process
- is started in a process group, so this SIGINT is sufficient to
- kill all the child processes TradeFed spawns as well.
- """
- logging.info('Ctrl-C received. Killing subprocess group')
- os.killpg(os.getpgid(proc.pid), signal.SIGINT)
- return signal_handler
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos.
-
- Should contain code for kicking off the test runs using
- test_runner_base.run(). Results should be processed and printed
- via the reporter passed in.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
- """
- raise NotImplementedError
-
- def host_env_check(self):
- """Checks that host env has met requirements."""
- raise NotImplementedError
-
- def get_test_runner_build_reqs(self):
- """Returns a list of build targets required by the test runner."""
- raise NotImplementedError
-
- def generate_run_commands(self, test_infos, extra_args, port=None):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: A set of TestInfo instances.
- extra_args: A Dict of extra args to append.
- port: Optional. An int of the port number to send events to.
- Subprocess reporter in TF won't try to connect if it's None.
-
- Returns:
- A list of run commands to run the tests.
- """
- raise NotImplementedError
diff --git a/atest-py2/test_runners/vts_tf_test_runner.py b/atest-py2/test_runners/vts_tf_test_runner.py
deleted file mode 100644
index c1f53e0..0000000
--- a/atest-py2/test_runners/vts_tf_test_runner.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-VTS Tradefed test runner class.
-"""
-
-import copy
-import logging
-
-# pylint: disable=import-error
-from test_runners import atest_tf_test_runner
-import atest_utils
-import constants
-
-
-class VtsTradefedTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
- """TradeFed Test Runner class."""
- NAME = 'VtsTradefedTestRunner'
- EXECUTABLE = 'vts10-tradefed'
- _RUN_CMD = ('{exe} run commandAndExit {plan} -m {test} {args}')
- _BUILD_REQ = {'vts10-tradefed-standalone'}
- _DEFAULT_ARGS = ['--skip-all-system-status-check',
- '--skip-preconditions',
- '--primary-abi-only']
-
- def __init__(self, results_dir, **kwargs):
- """Init stuff for vts10 tradefed runner class."""
- super(VtsTradefedTestRunner, self).__init__(results_dir, **kwargs)
- self.run_cmd_dict = {'exe': self.EXECUTABLE,
- 'test': '',
- 'args': ''}
-
- def get_test_runner_build_reqs(self):
- """Return the build requirements.
-
- Returns:
- Set of build targets.
- """
- build_req = self._BUILD_REQ
- build_req |= super(VtsTradefedTestRunner,
- self).get_test_runner_build_reqs()
- return build_req
-
- def run_tests(self, test_infos, extra_args, reporter):
- """Run the list of test_infos.
-
- Args:
- test_infos: List of TestInfo.
- extra_args: Dict of extra args to add to test run.
- reporter: An instance of result_report.ResultReporter.
-
- Returns:
- Return code of the process for running tests.
- """
- ret_code = constants.EXIT_CODE_SUCCESS
- reporter.register_unsupported_runner(self.NAME)
- run_cmds = self.generate_run_commands(test_infos, extra_args)
- for run_cmd in run_cmds:
- proc = super(VtsTradefedTestRunner, self).run(run_cmd,
- output_to_stdout=True)
- ret_code |= self.wait_for_subprocess(proc)
- return ret_code
-
- def _parse_extra_args(self, extra_args):
- """Convert the extra args into something vts10-tf can understand.
-
- We want to transform the top-level args from atest into specific args
- that vts10-tradefed supports. The only arg we take as is is EXTRA_ARG
- since that is what the user intentionally wants to pass to the test
- runner.
-
- Args:
- extra_args: Dict of args
-
- Returns:
- List of args to append.
- """
- args_to_append = []
- args_not_supported = []
- for arg in extra_args:
- if constants.SERIAL == arg:
- args_to_append.append('--serial')
- args_to_append.append(extra_args[arg])
- continue
- if constants.CUSTOM_ARGS == arg:
- args_to_append.extend(extra_args[arg])
- continue
- if constants.DRY_RUN == arg:
- continue
- args_not_supported.append(arg)
- if args_not_supported:
- logging.info('%s does not support the following args: %s',
- self.EXECUTABLE, args_not_supported)
- return args_to_append
-
- # pylint: disable=arguments-differ
- def generate_run_commands(self, test_infos, extra_args):
- """Generate a list of run commands from TestInfos.
-
- Args:
- test_infos: List of TestInfo tests to run.
- extra_args: Dict of extra args to add to test run.
-
- Returns:
- A List of strings that contains the vts10-tradefed run command.
- """
- cmds = []
- args = self._DEFAULT_ARGS
- args.extend(self._parse_extra_args(extra_args))
- args.extend(atest_utils.get_result_server_args())
- for test_info in test_infos:
- cmd_dict = copy.deepcopy(self.run_cmd_dict)
- cmd_dict['plan'] = constants.VTS_STAGING_PLAN
- cmd_dict['test'] = test_info.test_name
- cmd_dict['args'] = ' '.join(args)
- cmds.append(self._RUN_CMD.format(**cmd_dict))
- return cmds
diff --git a/atest-py2/test_runners/vts_tf_test_runner_unittest.py b/atest-py2/test_runners/vts_tf_test_runner_unittest.py
deleted file mode 100755
index 7e8b408..0000000
--- a/atest-py2/test_runners/vts_tf_test_runner_unittest.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Unittests for vts_tf_test_runner."""
-
-import unittest
-import mock
-
-# pylint: disable=import-error
-from test_runners import vts_tf_test_runner
-
-# pylint: disable=protected-access
-class VtsTradefedTestRunnerUnittests(unittest.TestCase):
- """Unit tests for vts_tf_test_runner.py"""
-
- def setUp(self):
- self.vts_tr = vts_tf_test_runner.VtsTradefedTestRunner(results_dir='')
-
- def tearDown(self):
- mock.patch.stopall()
-
- @mock.patch('subprocess.Popen')
- @mock.patch.object(vts_tf_test_runner.VtsTradefedTestRunner, 'run')
- @mock.patch.object(vts_tf_test_runner.VtsTradefedTestRunner,
- 'generate_run_commands')
- def test_run_tests(self, _mock_gen_cmd, _mock_run, _mock_popen):
- """Test run_tests method."""
- test_infos = []
- extra_args = []
- mock_reporter = mock.Mock()
- _mock_gen_cmd.return_value = ["cmd1", "cmd2"]
- # Test Build Pass
- _mock_popen.return_value.returncode = 0
- self.assertEqual(
- 0,
- self.vts_tr.run_tests(test_infos, extra_args, mock_reporter))
-
- # Test Build Pass
- _mock_popen.return_value.returncode = 1
- self.assertNotEqual(
- 0,
- self.vts_tr.run_tests(test_infos, extra_args, mock_reporter))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest-py2/tools/__init__.py b/atest-py2/tools/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/tools/__init__.py
+++ /dev/null
diff --git a/atest-py2/tools/atest_tools.py b/atest-py2/tools/atest_tools.py
deleted file mode 100755
index 3cf189e..0000000
--- a/atest-py2/tools/atest_tools.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Atest tool functions.
-"""
-
-from __future__ import print_function
-
-import logging
-import os
-import pickle
-import shutil
-import subprocess
-import sys
-
-import constants
-import module_info
-
-from metrics import metrics_utils
-
-MAC_UPDB_SRC = os.path.join(os.path.dirname(__file__), 'updatedb_darwin.sh')
-MAC_UPDB_DST = os.path.join(os.getenv(constants.ANDROID_HOST_OUT, ''), 'bin')
-UPDATEDB = 'updatedb'
-LOCATE = 'locate'
-SEARCH_TOP = os.getenv(constants.ANDROID_BUILD_TOP, '')
-MACOSX = 'Darwin'
-OSNAME = os.uname()[0]
-# When adding new index, remember to append constants to below tuple.
-INDEXES = (constants.CC_CLASS_INDEX,
- constants.CLASS_INDEX,
- constants.LOCATE_CACHE,
- constants.MODULE_INDEX,
- constants.PACKAGE_INDEX,
- constants.QCLASS_INDEX)
-
-# The list was generated by command:
-# find `gettop` -type d -wholename `gettop`/out -prune -o -type d -name '.*'
-# -print | awk -F/ '{{print $NF}}'| sort -u
-PRUNENAMES = ['.abc', '.appveyor', '.azure-pipelines',
- '.bazelci', '.buildscript',
- '.ci', '.circleci', '.conan', '.config',
- '.externalToolBuilders',
- '.git', '.github', '.github-ci', '.google', '.gradle',
- '.idea', '.intermediates',
- '.jenkins',
- '.kokoro',
- '.libs_cffi_backend',
- '.mvn',
- '.prebuilt_info', '.private', '__pycache__',
- '.repo',
- '.semaphore', '.settings', '.static', '.svn',
- '.test', '.travis', '.tx',
- '.vscode']
-
-def _mkdir_when_inexists(dirname):
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
-
-def _install_updatedb():
- """Install a customized updatedb for MacOS and ensure it is executable."""
- _mkdir_when_inexists(MAC_UPDB_DST)
- _mkdir_when_inexists(constants.INDEX_DIR)
- if OSNAME == MACOSX:
- shutil.copy2(MAC_UPDB_SRC, os.path.join(MAC_UPDB_DST, UPDATEDB))
- os.chmod(os.path.join(MAC_UPDB_DST, UPDATEDB), 0755)
-
-def _delete_indexes():
- """Delete all available index files."""
- for index in INDEXES:
- if os.path.isfile(index):
- os.remove(index)
-
-def has_command(cmd):
- """Detect if the command is available in PATH.
-
- shutil.which('cmd') is only valid in Py3 so we need to customise it.
-
- Args:
- cmd: A string of the tested command.
-
- Returns:
- True if found, False otherwise."""
- paths = os.getenv('PATH', '').split(':')
- for path in paths:
- if os.path.isfile(os.path.join(path, cmd)):
- return True
- return False
-
-def run_updatedb(search_root=SEARCH_TOP, output_cache=constants.LOCATE_CACHE,
- **kwargs):
- """Run updatedb and generate cache in $ANDROID_HOST_OUT/indexes/mlocate.db
-
- Args:
- search_root: The path of the search root(-U).
- output_cache: The filename of the updatedb cache(-o).
- kwargs: (optional)
- prunepaths: A list of paths unwanted to be searched(-e).
- prunenames: A list of dirname that won't be cached(-n).
- """
- prunenames = kwargs.pop('prunenames', ' '.join(PRUNENAMES))
- prunepaths = kwargs.pop('prunepaths', os.path.join(search_root, 'out'))
- if kwargs:
- raise TypeError('Unexpected **kwargs: %r' % kwargs)
- updatedb_cmd = [UPDATEDB, '-l0']
- updatedb_cmd.append('-U%s' % search_root)
- updatedb_cmd.append('-e%s' % prunepaths)
- updatedb_cmd.append('-n%s' % prunenames)
- updatedb_cmd.append('-o%s' % output_cache)
- try:
- _install_updatedb()
- except IOError as e:
- logging.error('Error installing updatedb: %s', e)
-
- if not has_command(UPDATEDB):
- return
- logging.debug('Running updatedb... ')
- try:
- full_env_vars = os.environ.copy()
- logging.debug('Executing: %s', updatedb_cmd)
- subprocess.check_call(updatedb_cmd, env=full_env_vars)
- except (KeyboardInterrupt, SystemExit):
- logging.error('Process interrupted or failure.')
-
-def _dump_index(dump_file, output, output_re, key, value):
- """Dump indexed data with pickle.
-
- Args:
- dump_file: A string of absolute path of the index file.
- output: A string generated by locate and grep.
- output_re: An regex which is used for grouping patterns.
- key: A string for dictionary key, e.g. classname, package, cc_class, etc.
- value: A set of path.
-
- The data structure will be like:
- {
- 'Foo': {'/path/to/Foo.java', '/path2/to/Foo.kt'},
- 'Boo': {'/path3/to/Boo.java'}
- }
- """
- _dict = {}
- with open(dump_file, 'wb') as cache_file:
- for entry in output.splitlines():
- match = output_re.match(entry)
- if match:
- _dict.setdefault(match.group(key), set()).add(match.group(value))
- try:
- pickle.dump(_dict, cache_file, protocol=2)
- except IOError:
- os.remove(dump_file)
- logging.error('Failed in dumping %s', dump_file)
-
-def _get_cc_result(locatedb=None):
- """Search all testable cc/cpp and grep TEST(), TEST_F() or TEST_P().
-
- Returns:
- A string object generated by subprocess.
- """
- if not locatedb:
- locatedb = constants.LOCATE_CACHE
- cc_grep_re = r'^\s*TEST(_P|_F)?\s*\(\w+,'
- if OSNAME == MACOSX:
- find_cmd = (r"locate -d {0} '*.cpp' '*.cc' | grep -i test "
- "| xargs egrep -sH '{1}' || true")
- else:
- find_cmd = (r"locate -d {0} / | egrep -i '/*.test.*\.(cc|cpp)$' "
- "| xargs egrep -sH '{1}' || true")
- find_cc_cmd = find_cmd.format(locatedb, cc_grep_re)
- logging.debug('Probing CC classes:\n %s', find_cc_cmd)
- return subprocess.check_output(find_cc_cmd, shell=True)
-
-def _get_java_result(locatedb=None):
- """Search all testable java/kt and grep package.
-
- Returns:
- A string object generated by subprocess.
- """
- if not locatedb:
- locatedb = constants.LOCATE_CACHE
- package_grep_re = r'^\s*package\s+[a-z][[:alnum:]]+[^{]'
- if OSNAME == MACOSX:
- find_cmd = r"locate -d%s '*.java' '*.kt'|grep -i test" % locatedb
- else:
- find_cmd = r"locate -d%s / | egrep -i '/*.test.*\.(java|kt)$'" % locatedb
- find_java_cmd = find_cmd + '| xargs egrep -sH \'%s\' || true' % package_grep_re
- logging.debug('Probing Java classes:\n %s', find_java_cmd)
- return subprocess.check_output(find_java_cmd, shell=True)
-
-def _index_testable_modules(index):
- """Dump testable modules read by tab completion.
-
- Args:
- index: A string path of the index file.
- """
- logging.debug('indexing testable modules.')
- testable_modules = module_info.ModuleInfo().get_testable_modules()
- with open(index, 'wb') as cache:
- try:
- pickle.dump(testable_modules, cache, protocol=2)
- except IOError:
- os.remove(cache)
- logging.error('Failed in dumping %s', cache)
-
-def _index_cc_classes(output, index):
- """Index CC classes.
-
- The data structure is like:
- {
- 'FooTestCase': {'/path1/to/the/FooTestCase.cpp',
- '/path2/to/the/FooTestCase.cc'}
- }
-
- Args:
- output: A string object generated by _get_cc_result().
- index: A string path of the index file.
- """
- logging.debug('indexing CC classes.')
- _dump_index(dump_file=index, output=output,
- output_re=constants.CC_OUTPUT_RE,
- key='test_name', value='file_path')
-
-def _index_java_classes(output, index):
- """Index Java classes.
- The data structure is like:
- {
- 'FooTestCase': {'/path1/to/the/FooTestCase.java',
- '/path2/to/the/FooTestCase.kt'}
- }
-
- Args:
- output: A string object generated by _get_java_result().
- index: A string path of the index file.
- """
- logging.debug('indexing Java classes.')
- _dump_index(dump_file=index, output=output,
- output_re=constants.CLASS_OUTPUT_RE,
- key='class', value='java_path')
-
-def _index_packages(output, index):
- """Index Java packages.
- The data structure is like:
- {
- 'a.b.c.d': {'/path1/to/a/b/c/d/',
- '/path2/to/a/b/c/d/'
- }
-
- Args:
- output: A string object generated by _get_java_result().
- index: A string path of the index file.
- """
- logging.debug('indexing packages.')
- _dump_index(dump_file=index,
- output=output, output_re=constants.PACKAGE_OUTPUT_RE,
- key='package', value='java_dir')
-
-def _index_qualified_classes(output, index):
- """Index Fully Qualified Java Classes(FQCN).
- The data structure is like:
- {
- 'a.b.c.d.FooTestCase': {'/path1/to/a/b/c/d/FooTestCase.java',
- '/path2/to/a/b/c/d/FooTestCase.kt'}
- }
-
- Args:
- output: A string object generated by _get_java_result().
- index: A string path of the index file.
- """
- logging.debug('indexing qualified classes.')
- _dict = {}
- with open(index, 'wb') as cache_file:
- for entry in output.split('\n'):
- match = constants.QCLASS_OUTPUT_RE.match(entry)
- if match:
- fqcn = match.group('package') + '.' + match.group('class')
- _dict.setdefault(fqcn, set()).add(match.group('java_path'))
- try:
- pickle.dump(_dict, cache_file, protocol=2)
- except (KeyboardInterrupt, SystemExit):
- logging.error('Process interrupted or failure.')
- os.remove(index)
- except IOError:
- logging.error('Failed in dumping %s', index)
-
-def index_targets(output_cache=constants.LOCATE_CACHE, **kwargs):
- """The entrypoint of indexing targets.
-
- Utilise mlocate database to index reference types of CLASS, CC_CLASS,
- PACKAGE and QUALIFIED_CLASS. Testable module for tab completion is also
- generated in this method.
-
- Args:
- output_cache: A file path of the updatedb cache(e.g. /path/to/mlocate.db).
- kwargs: (optional)
- class_index: A path string of the Java class index.
- qclass_index: A path string of the qualified class index.
- package_index: A path string of the package index.
- cc_class_index: A path string of the CC class index.
- module_index: A path string of the testable module index.
- integration_index: A path string of the integration index.
- """
- class_index = kwargs.pop('class_index', constants.CLASS_INDEX)
- qclass_index = kwargs.pop('qclass_index', constants.QCLASS_INDEX)
- package_index = kwargs.pop('package_index', constants.PACKAGE_INDEX)
- cc_class_index = kwargs.pop('cc_class_index', constants.CC_CLASS_INDEX)
- module_index = kwargs.pop('module_index', constants.MODULE_INDEX)
- # Uncomment below if we decide to support INTEGRATION.
- #integration_index = kwargs.pop('integration_index', constants.INT_INDEX)
- if kwargs:
- raise TypeError('Unexpected **kwargs: %r' % kwargs)
-
- try:
- # Step 0: generate mlocate database prior to indexing targets.
- run_updatedb(SEARCH_TOP, constants.LOCATE_CACHE)
- if not has_command(LOCATE):
- return
- # Step 1: generate output string for indexing targets.
- logging.debug('Indexing targets... ')
- cc_result = _get_cc_result(output_cache)
- java_result = _get_java_result(output_cache)
- # Step 2: index Java and CC classes.
- _index_cc_classes(cc_result, cc_class_index)
- _index_java_classes(java_result, class_index)
- _index_qualified_classes(java_result, qclass_index)
- _index_packages(java_result, package_index)
- # Step 3: index testable mods and TEST_MAPPING files.
- _index_testable_modules(module_index)
-
- # Delete indexes when mlocate.db is locked() or other CalledProcessError.
- # (b/141588997)
- except subprocess.CalledProcessError as err:
- logging.error('Executing %s error.', UPDATEDB)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.MLOCATEDB_LOCKED)
- if err.output:
- logging.error(err.output)
- _delete_indexes()
-
-
-if __name__ == '__main__':
- if not os.getenv(constants.ANDROID_HOST_OUT, ''):
- sys.exit()
- index_targets()
diff --git a/atest-py2/tools/atest_tools_unittest.py b/atest-py2/tools/atest_tools_unittest.py
deleted file mode 100755
index 34bdfb2..0000000
--- a/atest-py2/tools/atest_tools_unittest.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for atest_tools."""
-
-# pylint: disable=line-too-long
-
-import os
-import pickle
-import platform
-import subprocess
-import unittest
-import mock
-
-from tools import atest_tools
-
-import unittest_constants as uc
-
-SEARCH_ROOT = uc.TEST_DATA_DIR
-PRUNEPATH = uc.TEST_CONFIG_DATA_DIR
-LOCATE = atest_tools.LOCATE
-UPDATEDB = atest_tools.UPDATEDB
-
-class AtestToolsUnittests(unittest.TestCase):
- """"Unittest Class for atest_tools.py."""
-
- @mock.patch('constants.LOCATE_CACHE', uc.LOCATE_CACHE)
- @mock.patch('tools.atest_tools.SEARCH_TOP', uc.TEST_DATA_DIR)
- @mock.patch('module_info.ModuleInfo.get_testable_modules')
- @mock.patch('module_info.ModuleInfo.__init__')
- def test_index_targets(self, mock_mod_info, mock_testable_mod):
- """Test method index_targets."""
- mock_mod_info.return_value = None
- mock_testable_mod.return_value = {uc.MODULE_NAME, uc.MODULE2_NAME}
- if atest_tools.has_command(UPDATEDB) and atest_tools.has_command(LOCATE):
- # 1. Test run_updatedb() is functional.
- atest_tools.run_updatedb(SEARCH_ROOT, uc.LOCATE_CACHE,
- prunepaths=PRUNEPATH)
- # test_config/ is excluded so that a.xml won't be found.
- locate_cmd1 = [LOCATE, '-d', uc.LOCATE_CACHE, '/a.xml']
- # locate always return 0 when not found in Darwin, therefore,
- # check null return in Darwin and return value in Linux.
- if platform.system() == 'Darwin':
- self.assertEqual(subprocess.check_output(locate_cmd1), "")
- else:
- self.assertEqual(subprocess.call(locate_cmd1), 1)
- # module-info.json can be found in the search_root.
- locate_cmd2 = [LOCATE, '-d', uc.LOCATE_CACHE, 'module-info.json']
- self.assertEqual(subprocess.call(locate_cmd2), 0)
-
- # 2. Test index_targets() is functional.
- atest_tools.index_targets(uc.LOCATE_CACHE,
- class_index=uc.CLASS_INDEX,
- cc_class_index=uc.CC_CLASS_INDEX,
- module_index=uc.MODULE_INDEX,
- package_index=uc.PACKAGE_INDEX,
- qclass_index=uc.QCLASS_INDEX)
- _cache = {}
- # Test finding a Java class.
- with open(uc.CLASS_INDEX, 'rb') as cache:
- _cache = pickle.load(cache)
- self.assertIsNotNone(_cache.get('PathTesting'))
- # Test finding a CC class.
- with open(uc.CC_CLASS_INDEX, 'rb') as cache:
- _cache = pickle.load(cache)
- self.assertIsNotNone(_cache.get('HelloWorldTest'))
- # Test finding a package.
- with open(uc.PACKAGE_INDEX, 'rb') as cache:
- _cache = pickle.load(cache)
- self.assertIsNotNone(_cache.get(uc.PACKAGE))
- # Test finding a fully qualified class name.
- with open(uc.QCLASS_INDEX, 'rb') as cache:
- _cache = pickle.load(cache)
- self.assertIsNotNone(_cache.get('android.jank.cts.ui.PathTesting'))
- _cache = set()
- # Test finding a module name.
- with open(uc.MODULE_INDEX, 'rb') as cache:
- _cache = pickle.load(cache)
- self.assertTrue(uc.MODULE_NAME in _cache)
- self.assertFalse(uc.CLASS_NAME in _cache)
- # Clean up.
- targets_to_delete = (uc.CC_CLASS_INDEX,
- uc.CLASS_INDEX,
- uc.LOCATE_CACHE,
- uc.MODULE_INDEX,
- uc.PACKAGE_INDEX,
- uc.QCLASS_INDEX)
- for idx in targets_to_delete:
- os.remove(idx)
- else:
- self.assertEqual(atest_tools.has_command(UPDATEDB), False)
- self.assertEqual(atest_tools.has_command(LOCATE), False)
-
-
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/atest-py2/tools/updatedb_darwin.sh b/atest-py2/tools/updatedb_darwin.sh
deleted file mode 100755
index d0b2339..0000000
--- a/atest-py2/tools/updatedb_darwin.sh
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Warning and exit when failed to meet the requirements.
-[ "$(uname -s)" != "Darwin" ] && { echo "This program runs on Darwin only."; exit 0; }
-[ "$UID" -eq 0 ] && { echo "Running with root user is not supported."; exit 0; }
-
-function usage() {
- echo "###########################################"
- echo "Usage: $prog [-U|-e|-n|-o||-l|-f|-h]"
- echo " -U: The PATH of the search root."
- echo " -e: The PATH that unwanted to be searched."
- echo " -n: The name of directories that won't be cached."
- echo " -o: The PATH of the generated database."
- echo " -l: No effect. For compatible with Linux mlocate."
- echo " -f: Filesystems which should not search for."
- echo " -h: This usage helper."
- echo
- echo "################ [EXAMPLE] ################"
- echo "$prog -U \$ANDROID_BUILD_TOP -n .git -l 0 \\"
- echo " -e \"\$ANDROID_BUILD_TOP/out \$ANDROID_BUILD_TOP/.repo\" \\"
- echo " -o \"\$ANDROID_HOST_OUT/locate.database\""
- echo
- echo "locate -d \$ANDROID_HOST_OUT/locate.database atest.py"
- echo "locate -d \$ANDROID_HOST_OUT/locate.database contrib/res/config"
-}
-
-function mktempdir() {
- TMPDIR=/tmp
- if ! TMPDIR=`mktemp -d $TMPDIR/locateXXXXXXXXXX`; then
- exit 1
- fi
- temp=$TMPDIR/_updatedb$$
-}
-
-function _updatedb_main() {
- # 0. Disable default features of bash.
- set -o noglob # Disable * expansion before passing arguments to find.
- set -o errtrace # Sub-shells inherit error trap.
-
- # 1. Get positional arguments and set variables.
- prog=$(basename $0)
- while getopts 'U:n:e:o:l:f:h' option; do
- case $option in
- U) SEARCHROOT="$OPTARG";; # Search root.
- e) PRUNEPATHS="$OPTARG";; # Paths to be excluded.
- n) PRUNENAMES="$OPTARG";; # Dirnames to be pruned.
- o) DATABASE="$OPTARG";; # the output of the DB.
- l) ;; # No effect.
- f) PRUNEFS="$OPTARG";; # Disallow network filesystems.
- *) usage; exit 0;;
- esac
- done
-
- : ${SEARCHROOT:="$ANDROID_BUILD_TOP"}
- if [ -z "$SEARCHROOT" ]; then
- echo 'Either $SEARCHROOT or $ANDROID_BUILD_TOP is required.'
- exit 0
- fi
-
- if [ -n "$ANDROID_BUILD_TOP" ]; then
- PRUNEPATHS="$PRUNEPATHS $ANDROID_BUILD_TOP/out"
- fi
-
- PRUNENAMES="$PRUNENAMES *.class *.pyc .gitignore"
- : ${DATABASE:=/tmp/locate.database}
- : ${PRUNEFS:="nfs afp smb"}
-
- # 2. Assemble excludes strings.
- excludes=""
- or=""
- sortarg="-presort"
- for fs in $PRUNEFS; do
- excludes="$excludes $or -fstype $fs -prune"
- or="-o"
- done
- for path in $PRUNEPATHS; do
- excludes="$excludes $or -path $path -prune"
- done
- for file in $PRUNENAMES; do
- excludes="$excludes $or -name $file -prune"
- done
-
- # 3. Find and create locate database.
- # Delete $temp when trapping specified return values.
- mktempdir
- trap 'rm -rf $temp $TMPDIR; exit' 0 1 2 3 5 10 15
- if find -s $SEARCHROOT $excludes $or -print 2>/dev/null -true |
- /usr/libexec/locate.mklocatedb $sortarg > $temp 2>/dev/null; then
- case x"`find $temp -size 257c -print`" in
- x) cat $temp > $DATABASE;;
- *) echo "$prog: database $temp is found empty."
- exit 1;;
- esac
- fi
-}
-
-_updatedb_main "$@"
diff --git a/atest-py2/unittest_constants.py b/atest-py2/unittest_constants.py
deleted file mode 100644
index c757936..0000000
--- a/atest-py2/unittest_constants.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Unittest constants.
-
-Unittest constants get their own file since they're used purely for testing and
-should not be combined with constants_defaults as part of normal atest
-operation. These constants are used commonly as test data so when updating a
-constant, do so with care and run all unittests to make sure nothing breaks.
-"""
-
-import os
-
-import constants
-from test_finders import test_info
-from test_runners import atest_tf_test_runner as atf_tr
-
-ROOT = '/'
-MODULE_DIR = 'foo/bar/jank'
-MODULE2_DIR = 'foo/bar/hello'
-MODULE_NAME = 'CtsJankDeviceTestCases'
-TYPO_MODULE_NAME = 'CtsJankDeviceTestCase'
-MODULE2_NAME = 'HelloWorldTests'
-CLASS_NAME = 'CtsDeviceJankUi'
-FULL_CLASS_NAME = 'android.jank.cts.ui.CtsDeviceJankUi'
-PACKAGE = 'android.jank.cts.ui'
-FIND_ONE = ROOT + 'foo/bar/jank/src/android/jank/cts/ui/CtsDeviceJankUi.java\n'
-FIND_TWO = ROOT + 'other/dir/test.java\n' + FIND_ONE
-FIND_PKG = ROOT + 'foo/bar/jank/src/android/jank/cts/ui\n'
-INT_NAME = 'example/reboot'
-GTF_INT_NAME = 'some/gtf_int_test'
-TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'unittest_data')
-TEST_CONFIG_DATA_DIR = os.path.join(TEST_DATA_DIR, 'test_config')
-
-INT_DIR = 'tf/contrib/res/config'
-GTF_INT_DIR = 'gtf/core/res/config'
-
-CONFIG_FILE = os.path.join(MODULE_DIR, constants.MODULE_CONFIG)
-CONFIG2_FILE = os.path.join(MODULE2_DIR, constants.MODULE_CONFIG)
-JSON_FILE = 'module-info.json'
-MODULE_INFO_TARGET = '/out/%s' % JSON_FILE
-MODULE_BUILD_TARGETS = {'tradefed-core', MODULE_INFO_TARGET,
- 'MODULES-IN-%s' % MODULE_DIR.replace('/', '-'),
- 'module-specific-target'}
-MODULE_BUILD_TARGETS2 = {'build-target2'}
-MODULE_DATA = {constants.TI_REL_CONFIG: CONFIG_FILE,
- constants.TI_FILTER: frozenset()}
-MODULE_DATA2 = {constants.TI_REL_CONFIG: CONFIG_FILE,
- constants.TI_FILTER: frozenset()}
-MODULE_INFO = test_info.TestInfo(MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- MODULE_DATA)
-MODULE_INFO2 = test_info.TestInfo(MODULE2_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS2,
- MODULE_DATA2)
-MODULE_INFOS = [MODULE_INFO]
-MODULE_INFOS2 = [MODULE_INFO, MODULE_INFO2]
-CLASS_FILTER = test_info.TestFilter(FULL_CLASS_NAME, frozenset())
-CLASS_DATA = {constants.TI_REL_CONFIG: CONFIG_FILE,
- constants.TI_FILTER: frozenset([CLASS_FILTER])}
-PACKAGE_FILTER = test_info.TestFilter(PACKAGE, frozenset())
-PACKAGE_DATA = {constants.TI_REL_CONFIG: CONFIG_FILE,
- constants.TI_FILTER: frozenset([PACKAGE_FILTER])}
-TEST_DATA_CONFIG = os.path.relpath(os.path.join(TEST_DATA_DIR,
- constants.MODULE_CONFIG), ROOT)
-PATH_DATA = {
- constants.TI_REL_CONFIG: TEST_DATA_CONFIG,
- constants.TI_FILTER: frozenset([PACKAGE_FILTER])}
-EMPTY_PATH_DATA = {
- constants.TI_REL_CONFIG: TEST_DATA_CONFIG,
- constants.TI_FILTER: frozenset()}
-
-CLASS_BUILD_TARGETS = {'class-specific-target'}
-CLASS_INFO = test_info.TestInfo(MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS,
- CLASS_DATA)
-CLASS_INFOS = [CLASS_INFO]
-
-CLASS_BUILD_TARGETS2 = {'class-specific-target2'}
-CLASS_DATA2 = {constants.TI_REL_CONFIG: CONFIG_FILE,
- constants.TI_FILTER: frozenset([CLASS_FILTER])}
-CLASS_INFO2 = test_info.TestInfo(MODULE2_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS2,
- CLASS_DATA2)
-CLASS_INFOS = [CLASS_INFO]
-CLASS_INFOS2 = [CLASS_INFO, CLASS_INFO2]
-PACKAGE_INFO = test_info.TestInfo(MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS,
- PACKAGE_DATA)
-PATH_INFO = test_info.TestInfo(MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- PATH_DATA)
-EMPTY_PATH_INFO = test_info.TestInfo(MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- EMPTY_PATH_DATA)
-MODULE_CLASS_COMBINED_BUILD_TARGETS = MODULE_BUILD_TARGETS | CLASS_BUILD_TARGETS
-INT_CONFIG = os.path.join(INT_DIR, INT_NAME + '.xml')
-GTF_INT_CONFIG = os.path.join(GTF_INT_DIR, GTF_INT_NAME + '.xml')
-METHOD_NAME = 'method1'
-METHOD_FILTER = test_info.TestFilter(FULL_CLASS_NAME, frozenset([METHOD_NAME]))
-METHOD_INFO = test_info.TestInfo(
- MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- data={constants.TI_FILTER: frozenset([METHOD_FILTER]),
- constants.TI_REL_CONFIG: CONFIG_FILE})
-METHOD2_NAME = 'method2'
-FLAT_METHOD_FILTER = test_info.TestFilter(
- FULL_CLASS_NAME, frozenset([METHOD_NAME, METHOD2_NAME]))
-INT_INFO = test_info.TestInfo(INT_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_REL_CONFIG: INT_CONFIG,
- constants.TI_FILTER: frozenset()})
-GTF_INT_INFO = test_info.TestInfo(
- GTF_INT_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- set(),
- data={constants.TI_FILTER: frozenset(),
- constants.TI_REL_CONFIG: GTF_INT_CONFIG})
-
-# Sample test configurations in TEST_MAPPING file.
-TEST_MAPPING_TEST = {'name': MODULE_NAME, 'host': True}
-TEST_MAPPING_TEST_WITH_OPTION = {
- 'name': CLASS_NAME,
- 'options': [
- {
- 'arg1': 'val1'
- },
- {
- 'arg2': ''
- }
- ]
-}
-TEST_MAPPING_TEST_WITH_OPTION_STR = '%s (arg1: val1, arg2:)' % CLASS_NAME
-TEST_MAPPING_TEST_WITH_BAD_OPTION = {
- 'name': CLASS_NAME,
- 'options': [
- {
- 'arg1': 'val1',
- 'arg2': ''
- }
- ]
-}
-TEST_MAPPING_TEST_WITH_BAD_HOST_VALUE = {
- 'name': CLASS_NAME,
- 'host': 'true'
-}
-# Constrants of cc test unittest
-FIND_CC_ONE = ROOT + 'foo/bt/hci/test/pf_test.cc\n'
-CC_MODULE_NAME = 'net_test_hci'
-CC_CLASS_NAME = 'PFTest'
-CC_MODULE_DIR = 'system/bt/hci'
-CC_CLASS_FILTER = test_info.TestFilter(CC_CLASS_NAME+".*", frozenset())
-CC_CONFIG_FILE = os.path.join(CC_MODULE_DIR, constants.MODULE_CONFIG)
-CC_MODULE_CLASS_DATA = {constants.TI_REL_CONFIG: CC_CONFIG_FILE,
- constants.TI_FILTER: frozenset([CC_CLASS_FILTER])}
-CC_MODULE_CLASS_INFO = test_info.TestInfo(CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS, CC_MODULE_CLASS_DATA)
-CC_MODULE2_DIR = 'foo/bar/hello'
-CC_MODULE2_NAME = 'hello_world_test'
-CC_PATH = 'pf_test.cc'
-CC_FIND_ONE = ROOT + 'system/bt/hci/test/pf_test.cc:TEST_F(PFTest, test1) {\n' + \
- ROOT + 'system/bt/hci/test/pf_test.cc:TEST_F(PFTest, test2) {\n'
-CC_FIND_TWO = ROOT + 'other/dir/test.cpp:TEST(PFTest, test_f) {\n' + \
- ROOT + 'other/dir/test.cpp:TEST(PFTest, test_p) {\n'
-CC_CONFIG2_FILE = os.path.join(CC_MODULE2_DIR, constants.MODULE_CONFIG)
-CC_CLASS_FILTER = test_info.TestFilter(CC_CLASS_NAME+".*", frozenset())
-CC_CLASS_DATA = {constants.TI_REL_CONFIG: CC_CONFIG_FILE,
- constants.TI_FILTER: frozenset([CC_CLASS_FILTER])}
-CC_CLASS_INFO = test_info.TestInfo(CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS, CC_CLASS_DATA)
-CC_METHOD_NAME = 'test1'
-CC_METHOD2_NAME = 'test2'
-CC_METHOD_FILTER = test_info.TestFilter(CC_CLASS_NAME+"."+CC_METHOD_NAME,
- frozenset())
-CC_METHOD2_FILTER = test_info.TestFilter(CC_CLASS_NAME+"."+CC_METHOD_NAME+ \
- ":"+CC_CLASS_NAME+"."+CC_METHOD2_NAME,
- frozenset())
-CC_METHOD_INFO = test_info.TestInfo(
- CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: CC_CONFIG_FILE,
- constants.TI_FILTER: frozenset([CC_METHOD_FILTER])})
-CC_METHOD2_INFO = test_info.TestInfo(
- CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- data={constants.TI_REL_CONFIG: CC_CONFIG_FILE,
- constants.TI_FILTER: frozenset([CC_METHOD2_FILTER])})
-CC_PATH_DATA = {
- constants.TI_REL_CONFIG: TEST_DATA_CONFIG,
- constants.TI_FILTER: frozenset()}
-CC_PATH_INFO = test_info.TestInfo(CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- MODULE_BUILD_TARGETS,
- CC_PATH_DATA)
-CC_PATH_DATA2 = {constants.TI_REL_CONFIG: CC_CONFIG_FILE,
- constants.TI_FILTER: frozenset()}
-CC_PATH_INFO2 = test_info.TestInfo(CC_MODULE_NAME,
- atf_tr.AtestTradefedTestRunner.NAME,
- CLASS_BUILD_TARGETS, CC_PATH_DATA2)
-CTS_INT_DIR = 'test/suite_harness/tools/cts-tradefed/res/config'
-# Constrants of java, kt, cc, cpp test_find_class_file() unittest
-FIND_PATH_TESTCASE_JAVA = 'hello_world_test'
-FIND_PATH_FILENAME_CC = 'hello_world_test'
-FIND_PATH_TESTCASE_CC = 'HelloWorldTest'
-FIND_PATH_FOLDER = 'class_file_path_testing'
-FIND_PATH = os.path.join(TEST_DATA_DIR, FIND_PATH_FOLDER)
-
-DEFAULT_INSTALL_PATH = ['/path/to/install']
-# Module names
-MOD1 = 'mod1'
-MOD2 = 'mod2'
-MOD3 = 'mod3'
-FUZZY_MOD1 = 'Mod1'
-FUZZY_MOD2 = 'nod2'
-FUZZY_MOD3 = 'mod3mod3'
-
-LOCATE_CACHE = '/tmp/mcloate.db'
-CLASS_INDEX = '/tmp/classes.idx'
-QCLASS_INDEX = '/tmp/fqcn.idx'
-CC_CLASS_INDEX = '/tmp/cc_classes.idx'
-PACKAGE_INDEX = '/tmp/packages.idx'
-MODULE_INDEX = '/tmp/modules.idx'
diff --git a/atest-py2/unittest_data/AndroidTest.xml b/atest-py2/unittest_data/AndroidTest.xml
deleted file mode 100644
index 431eafc..0000000
--- a/atest-py2/unittest_data/AndroidTest.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-<configuration description="Config for CTS Jank test cases">
- <option name="test-suite-tag" value="cts" />
- <option name="not-shardable" value="true" />
- <option name="config-descriptor:metadata" key="component" value="graphics" />
- <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
- <option name="cleanup-apks" value="true" />
- <option name="test-file-name" value="CtsJankDeviceTestCases.apk" />
- <option name="test-file-name" value="is_not_module.apk" />
- <option name="push" value="GtsEmptyTestApp.apk->/data/local/tmp/gts/packageinstaller/GtsEmptyTestApp.apk" />
- </target_preparer>
- <include name="CtsUiDeviceTestCases"/>
- <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
- <option name="package" value="android.jank.cts" />
- <option name="runtime-hint" value="11m20s" />
- </test>
- <option name="perf_arg" value="perf-setup.sh" />
- <test class="com.android.compatibility.class.for.test" />
-</configuration>
diff --git a/atest-py2/unittest_data/CtsUiDeviceTestCases.xml b/atest-py2/unittest_data/CtsUiDeviceTestCases.xml
deleted file mode 100644
index 2dd30f9..0000000
--- a/atest-py2/unittest_data/CtsUiDeviceTestCases.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
- <option name="test-file-name" value="CtsUiDeviceTestCases.apk" />
-</target_preparer>
diff --git a/atest-py2/unittest_data/KernelTest.xml b/atest-py2/unittest_data/KernelTest.xml
deleted file mode 100644
index a2a110f..0000000
--- a/atest-py2/unittest_data/KernelTest.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2020 The Android Open Source Project
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration description="Runs kernel_test.">
- <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
- <option name="ignore-binary-check" value="true" />
- <option name="per-binary-timeout" value="360000" />
- <option name="test-command-line" key="test_class_1" value="command 1" />
- <option name="test-command-line" key="test_class_2" value="command 2" />
- <option name="test-command-line" key="test_class_3" value="command 3" />
- </test>
-</configuration>
diff --git a/atest-py2/unittest_data/VtsAndroidTest.xml b/atest-py2/unittest_data/VtsAndroidTest.xml
deleted file mode 100644
index fc858f3..0000000
--- a/atest-py2/unittest_data/VtsAndroidTest.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<configuration description="Config for VTS target parsing">
- <option name="config-descriptor:metadata" key="plan" value="vts-treble" />
- <target_preparer class="com.android.compatibility.common.tradefed.targetprep.VtsFilePusher">
- <option name="abort-on-push-failure" value="false"/>
- <option name="push-group" value="push_file1.push"/>
- <option name="push" value="DATA/lib/libhidl-gen-hash.so->/data/local/tmp/32/libhidl-gen-hash.so"/>
- <option name="push" value="DATA/lib64/libhidl-gen-hash.so->/data/local/tmp/64/libhidl-gen-hash.so"/>
- <option name="push" value="hal-hidl-hash/frameworks/hardware/interfaces/current.txt->/data/local/tmp/frameworks/hardware/interfaces/current.txt"/>
- <option name="push" value="hal-hidl-hash/hardware/interfaces/current.txt->/data/local/tmp/hardware/interfaces/current.txt"/>
- <option name="push" value="hal-hidl-hash/system/hardware/interfaces/current.txt->/data/local/tmp/system/hardware/interfaces/current.txt"/>
- <option name="push" value="hal-hidl-hash/system/libhidl/transport/current.txt->/data/local/tmp/system/libhidl/transport/current.txt"/>
- </target_preparer>
- <multi_target_preparer class="com.android.tradefed.targetprep.VtsPythonVirtualenvPreparer" />
- <test class="com.android.tradefed.testtype.VtsMultiDeviceTest">
- <option name="test-module-name" value="VtsTestName"/>
- <option name="binary-test-working-directory" value="_32bit::/data/nativetest/" />
- <option name="binary-test-working-directory" value="_64bit::/data/nativetest64/" />
- <option name="binary-test-source" value="_32bit::DATA/nativetest/vts_treble_vintf_test/vts_treble_vintf_test" />
- <option name="binary-test-source" value="_64bit::DATA/nativetest64/vts_treble_vintf_test/vts_treble_vintf_test" />
- <option name="binary-test-source" value="target_with_delim->/path/to/target_with_delim" />
- <option name="binary-test-source" value="out/dir/target" />
- <option name="binary-test-type" value="gtest"/>
- <option name="test-timeout" value="5m"/>
- </test>
- <target_preparer class="com.android.compatibility.common.tradefed.targetprep.DeviceInfoCollector">
- <option name="apk" value="CtsDeviceInfo.apk"/>
- <option name="test-file-name" value="DATA/app/sl4a/sl4a.apk" />
- </target_preparer>
-</configuration>
diff --git a/atest-py2/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache b/atest-py2/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache
deleted file mode 100644
index 3b384c7..0000000
--- a/atest-py2/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache
+++ /dev/null
@@ -1,81 +0,0 @@
-c__builtin__
-set
-p0
-((lp1
-ccopy_reg
-_reconstructor
-p2
-(ctest_finders.test_info
-TestInfo
-p3
-c__builtin__
-object
-p4
-Ntp5
-Rp6
-(dp7
-S'compatibility_suites'
-p8
-(lp9
-S'device-tests'
-p10
-asS'install_locations'
-p11
-g0
-((lp12
-S'device'
-p13
-aS'host'
-p14
-atp15
-Rp16
-sS'test_runner'
-p17
-S'AtestTradefedTestRunner'
-p18
-sS'test_finder'
-p19
-S'MODULE'
-p20
-sS'module_class'
-p21
-(lp22
-VNATIVE_TESTS
-p23
-asS'from_test_mapping'
-p24
-I00
-sS'build_targets'
-p25
-g0
-((lp26
-VMODULES-IN-platform_testing-tests-example-native
-p27
-atp28
-Rp29
-sg14
-I00
-sS'test_name'
-p30
-S'hello_world_test'
-p31
-sS'suite'
-p32
-NsS'data'
-p33
-(dp34
-S'rel_config'
-p35
-Vplatform_testing/tests/example/native/AndroidTest.xml
-p36
-sS'filter'
-p37
-c__builtin__
-frozenset
-p38
-((lp39
-tp40
-Rp41
-ssbatp42
-Rp43
-.
\ No newline at end of file
diff --git a/atest-py2/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache b/atest-py2/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache
deleted file mode 100644
index 451a51e..0000000
--- a/atest-py2/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache
+++ /dev/null
@@ -1,72 +0,0 @@
-c__builtin__
-set
-p0
-((lp1
-ccopy_reg
-_reconstructor
-p2
-(ctest_finders.test_info
-TestInfo
-p3
-c__builtin__
-object
-p4
-Ntp5
-Rp6
-(dp7
-S'install_locations'
-p8
-g0
-((lp9
-S'device'
-p10
-aS'host'
-p11
-atp12
-Rp13
-sS'test_runner'
-p14
-S'AtestTradefedTestRunner'
-p15
-sS'module_class'
-p16
-(lp17
-VNATIVE_TESTS
-p18
-asS'from_test_mapping'
-p19
-I00
-sS'build_targets'
-p20
-g0
-((lp21
-VMODULES-IN-platform_testing-tests-example-native
-p22
-atp23
-Rp24
-sg11
-I00
-sS'test_name'
-p25
-S'hello_world_test'
-p26
-sS'suite'
-p27
-NsS'data'
-p28
-(dp29
-S'rel_config'
-p30
-Vplatform_testing/tests/example/native/AndroidTest.xml
-p31
-sS'filter'
-p32
-c__builtin__
-frozenset
-p33
-((lp34
-tp35
-Rp36
-ssbatp37
-Rp38
-.
\ No newline at end of file
diff --git a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cc b/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cc
deleted file mode 100644
index 8062618..0000000
--- a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include <stdio.h>
-
-TEST_F(HelloWorldTest, PrintHelloWorld) {
- printf("Hello, World!");
-}
\ No newline at end of file
diff --git a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cpp b/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cpp
deleted file mode 100644
index 8062618..0000000
--- a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include <stdio.h>
-
-TEST_F(HelloWorldTest, PrintHelloWorld) {
- printf("Hello, World!");
-}
\ No newline at end of file
diff --git a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.java b/atest-py2/unittest_data/class_file_path_testing/hello_world_test.java
deleted file mode 100644
index 8e0a999..0000000
--- a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.java
+++ /dev/null
@@ -1,9 +0,0 @@
-package com.test.hello_world_test;
-
-public class HelloWorldTest {
- @Test
- public void testMethod1() throws Exception {}
-
- @Test
- public void testMethod2() throws Exception {}
-}
diff --git a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.kt b/atest-py2/unittest_data/class_file_path_testing/hello_world_test.kt
deleted file mode 100644
index 623b4a2..0000000
--- a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.kt
+++ /dev/null
@@ -1 +0,0 @@
-package com.test.hello_world_test
\ No newline at end of file
diff --git a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.other b/atest-py2/unittest_data/class_file_path_testing/hello_world_test.other
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/unittest_data/class_file_path_testing/hello_world_test.other
+++ /dev/null
diff --git a/atest-py2/unittest_data/gts_auth_key.json b/atest-py2/unittest_data/gts_auth_key.json
deleted file mode 100644
index 0e48d55..0000000
--- a/atest-py2/unittest_data/gts_auth_key.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "type": "service_account",
- "project_id": "test",
- "private_key_id": "test",
- "private_key": "test",
- "client_email": "test",
- "client_id": "test"
-}
diff --git a/atest-py2/unittest_data/integration_dir_testing/int_dir1/int_dir_testing.xml b/atest-py2/unittest_data/integration_dir_testing/int_dir1/int_dir_testing.xml
deleted file mode 100644
index 2dd30f9..0000000
--- a/atest-py2/unittest_data/integration_dir_testing/int_dir1/int_dir_testing.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
- <option name="test-file-name" value="CtsUiDeviceTestCases.apk" />
-</target_preparer>
diff --git a/atest-py2/unittest_data/integration_dir_testing/int_dir2/int_dir_testing.xml b/atest-py2/unittest_data/integration_dir_testing/int_dir2/int_dir_testing.xml
deleted file mode 100644
index 2dd30f9..0000000
--- a/atest-py2/unittest_data/integration_dir_testing/int_dir2/int_dir_testing.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
- <option name="test-file-name" value="CtsUiDeviceTestCases.apk" />
-</target_preparer>
diff --git a/atest-py2/unittest_data/module-info.json b/atest-py2/unittest_data/module-info.json
deleted file mode 100644
index 0959fad..0000000
--- a/atest-py2/unittest_data/module-info.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "AmSlam": { "class": ["APPS"], "path": ["foo/bar/AmSlam"], "tags": ["tests"], "installed": ["out/target/product/generic/data/app/AmSlam/AmSlam.apk"], "module_name": "AmSlam" },
- "CtsJankDeviceTestCases": { "class": ["APPS"], "path": ["foo/bar/jank"], "tags": ["optional"], "installed": ["out/target/product/generic/data/app/CtsJankDeviceTestCases/CtsJankDeviceTestCases.apk"], "module_name": "CtsJankDeviceTestCases" },
- "CtsUiDeviceTestCases": { "class": ["APPS"], "path": ["tf/core/CtsUiDeviceTestCases"], "tags": ["optional"], "installed": ["out/target/product/generic/data/app/CtsUiDeviceTestCases/CtsUiDeviceTestCases.apk"], "module_name": "CtsJankDeviceTestCases" },
- "VtsTarget": { "class": ["FAKE"], "path": ["foo/bar/jank"], "tags": ["optional"], "installed": ["out/target/product/generic/VtsTarget"], "module_name": "VtsTarget" },
- "google-tradefed": { "class": ["JAVA_LIBRARIES"], "path": ["gtf/core"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/google-tradefed.jar"], "module_name": "google-tradefed" },
- "google-tradefed-contrib": { "class": ["JAVA_LIBRARIES"], "path": ["gtf/contrib"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/google-tradefed-contrib.jar"], "module_name": "google-tradefed-contrib" },
- "tradefed": { "class": ["EXECUTABLES", "JAVA_LIBRARIES"], "path": ["tf/core"], "tags": ["optional"], "installed": ["out/host/linux-x86/bin/tradefed.sh", "out/host/linux-x86/framework/tradefed.jar"], "module_name": "tradefed" },
- "tradefed-contrib": { "class": ["JAVA_LIBRARIES"], "path": ["tf/contrib"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "tradefed-contrib" },
- "module-no-path": { "class": ["JAVA_LIBRARIES"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": ["module-no-path"] },
- "module1": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "module1" },
- "module2": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "module2" },
- "multiarch1": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch1" },
- "multiarch1_32": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch1" },
- "multiarch2": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch2" },
- "multiarch2_32": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch2" },
- "multiarch3": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch3" },
- "multiarch3_32": { "class": ["JAVA_LIBRARIES"], "path": ["shared/path/to/be/used2"], "tags": ["optional"], "installed": ["out/host/linux-x86/framework/tradefed-contrib.jar"], "module_name": "multiarch3_32" }
-}
diff --git a/atest-py2/unittest_data/path_testing/PathTesting.java b/atest-py2/unittest_data/path_testing/PathTesting.java
deleted file mode 100644
index 2245c67..0000000
--- a/atest-py2/unittest_data/path_testing/PathTesting.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.jank.cts.ui;
-
-/** UNUSED Class file for unit tests. */
-public class SomeClassForTesting {
- private static final String SOME_UNUSED_VAR = "For testing purposes";
-}
diff --git a/atest-py2/unittest_data/path_testing_empty/.empty_file b/atest-py2/unittest_data/path_testing_empty/.empty_file
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/unittest_data/path_testing_empty/.empty_file
+++ /dev/null
diff --git a/atest-py2/unittest_data/test_config/a.xml b/atest-py2/unittest_data/test_config/a.xml
deleted file mode 100644
index e69de29..0000000
--- a/atest-py2/unittest_data/test_config/a.xml
+++ /dev/null
diff --git a/atest-py2/unittest_data/test_mapping/folder1/test_mapping_sample b/atest-py2/unittest_data/test_mapping/folder1/test_mapping_sample
deleted file mode 100644
index 05cea61..0000000
--- a/atest-py2/unittest_data/test_mapping/folder1/test_mapping_sample
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test2"
- }
- ],
- "postsubmit": [
- {
- "name": "test3"
- }
- ],
- "other_group": [
- {
- "name": "test4"
- }
- ],
- "imports": [
- {
- "path": "../folder2"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder2/test_mapping_sample b/atest-py2/unittest_data/test_mapping/folder2/test_mapping_sample
deleted file mode 100644
index 7517cd5..0000000
--- a/atest-py2/unittest_data/test_mapping/folder2/test_mapping_sample
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test5"
- }
- ],
- "postsubmit": [
- {
- "name": "test6"
- }
- ],
- "imports": [
- {
- "path": "../folder1"
- },
- {
- "path": "../folder3/folder4"
- },
- {
- "path": "../folder3/non-existing"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder3/folder4/test_mapping_sample b/atest-py2/unittest_data/test_mapping/folder3/folder4/test_mapping_sample
deleted file mode 100644
index 6310055..0000000
--- a/atest-py2/unittest_data/test_mapping/folder3/folder4/test_mapping_sample
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "imports": [
- {
- "path": "../../folder5"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder3/test_mapping_sample b/atest-py2/unittest_data/test_mapping/folder3/test_mapping_sample
deleted file mode 100644
index ecd5b7d..0000000
--- a/atest-py2/unittest_data/test_mapping/folder3/test_mapping_sample
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test7"
- }
- ],
- "postsubmit": [
- {
- "name": "test8"
- }
- ],
- "imports": [
- {
- "path": "../folder1"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder5/test_mapping_sample b/atest-py2/unittest_data/test_mapping/folder5/test_mapping_sample
deleted file mode 100644
index c449a0a..0000000
--- a/atest-py2/unittest_data/test_mapping/folder5/test_mapping_sample
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test9"
- }
- ],
- "postsubmit": [
- {
- "name": "test10"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_golden b/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_golden
deleted file mode 100644
index db3998d..0000000
--- a/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_golden
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test1",
- "host": true,
- "include-filter": "testClass#testMethod"
- }
- ],
- "imports": [
- {
- "path": "path1//path2//path3"
- }
- ]
-}
diff --git a/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_with_comments b/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_with_comments
deleted file mode 100644
index 3f4083f..0000000
--- a/atest-py2/unittest_data/test_mapping/folder6/test_mapping_sample_with_comments
+++ /dev/null
@@ -1,16 +0,0 @@
-{#comments1
- "presubmit": [//comments2 // comments3 # comment4
- #comments3
- { #comments4
- "name": "test1",#comments5
-//comments6
- "host": true,//comments7
- "include-filter": "testClass#testMethod" #comment11 // another comments
- }#comments8
- ],#comments9 // another comments
- "imports": [
- {
- "path": "path1//path2//path3"#comment12
- }
- ]
-}#comments10
diff --git a/atest-py2/unittest_data/test_mapping/test_mapping_sample b/atest-py2/unittest_data/test_mapping/test_mapping_sample
deleted file mode 100644
index a4edd9c..0000000
--- a/atest-py2/unittest_data/test_mapping/test_mapping_sample
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "presubmit": [
- {
- "name": "test1",
- "host": true
- }
- ]
-}
diff --git a/atest-py2/unittest_data/vts_plan_files/vts-aa.xml b/atest-py2/unittest_data/vts_plan_files/vts-aa.xml
deleted file mode 100644
index 629005c..0000000
--- a/atest-py2/unittest_data/vts_plan_files/vts-aa.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<configuration description="VTS Serving Plan for Staging(new) tests">
- <include name="vts-bb" />
- <include name="vts-dd" />
-</configuration>
diff --git a/atest-py2/unittest_data/vts_plan_files/vts-bb.xml b/atest-py2/unittest_data/vts_plan_files/vts-bb.xml
deleted file mode 100644
index 87c7588..0000000
--- a/atest-py2/unittest_data/vts_plan_files/vts-bb.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<configuration description="VTS Serving Plan for Staging(new) tests">
- <include name="vts-cc" />
-</configuration>
diff --git a/atest-py2/unittest_data/vts_plan_files/vts-cc.xml b/atest-py2/unittest_data/vts_plan_files/vts-cc.xml
deleted file mode 100644
index 14125c0..0000000
--- a/atest-py2/unittest_data/vts_plan_files/vts-cc.xml
+++ /dev/null
@@ -1,2 +0,0 @@
-<configuration description="Common preparer">
-</configuration>
diff --git a/atest-py2/unittest_data/vts_plan_files/vts-dd.xml b/atest-py2/unittest_data/vts_plan_files/vts-dd.xml
deleted file mode 100644
index a56597b..0000000
--- a/atest-py2/unittest_data/vts_plan_files/vts-dd.xml
+++ /dev/null
@@ -1,2 +0,0 @@
-<configuration description="VTS Serving Plan for Staging(new) tests">
-</configuration>
diff --git a/atest-py2/unittest_data/vts_plan_files/vts-staging-default.xml b/atest-py2/unittest_data/vts_plan_files/vts-staging-default.xml
deleted file mode 100644
index 34cccce..0000000
--- a/atest-py2/unittest_data/vts_plan_files/vts-staging-default.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<configuration description="VTS Serving Plan for Staging(new) tests">
- <include name="vts-aa" />
-</configuration>
diff --git a/atest-py2/unittest_data/vts_push_files/push_file1.push b/atest-py2/unittest_data/vts_push_files/push_file1.push
deleted file mode 100644
index b55f453..0000000
--- a/atest-py2/unittest_data/vts_push_files/push_file1.push
+++ /dev/null
@@ -1,4 +0,0 @@
-push_file1_target1->/path/to/push/push_file1_target1
-push_file1_target2->/path/to/push/push_file1_target2
-
-push_file2.push
diff --git a/atest-py2/unittest_data/vts_push_files/push_file2.push b/atest-py2/unittest_data/vts_push_files/push_file2.push
deleted file mode 100644
index 3c5ae78..0000000
--- a/atest-py2/unittest_data/vts_push_files/push_file2.push
+++ /dev/null
@@ -1,2 +0,0 @@
-push_file2_target1->/path/to/push_file2_target1
-push_file2_target2->/path/to/push_file2_target2
diff --git a/atest-py2/unittest_utils.py b/atest-py2/unittest_utils.py
deleted file mode 100644
index a57afac..0000000
--- a/atest-py2/unittest_utils.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions for unit tests."""
-
-import os
-
-import constants
-import unittest_constants as uc
-
-def assert_strict_equal(test_class, first, second):
- """Check for strict equality and strict equality of nametuple elements.
-
- assertEqual considers types equal to their subtypes, but we want to
- not consider set() and frozenset() equal for testing.
- """
- test_class.assertEqual(first, second)
- # allow byte and unicode string equality.
- if not (isinstance(first, basestring) and
- isinstance(second, basestring)):
- test_class.assertIsInstance(first, type(second))
- test_class.assertIsInstance(second, type(first))
- # Recursively check elements of namedtuples for strict equals.
- if isinstance(first, tuple) and hasattr(first, '_fields'):
- # pylint: disable=invalid-name
- for f in first._fields:
- assert_strict_equal(test_class, getattr(first, f),
- getattr(second, f))
-
-def assert_equal_testinfos(test_class, test_info_a, test_info_b):
- """Check that the passed in TestInfos are equal."""
- # Use unittest.assertEqual to do checks when None is involved.
- if test_info_a is None or test_info_b is None:
- test_class.assertEqual(test_info_a, test_info_b)
- return
-
- for attr in test_info_a.__dict__:
- test_info_a_attr = getattr(test_info_a, attr)
- test_info_b_attr = getattr(test_info_b, attr)
- test_class.assertEqual(test_info_a_attr, test_info_b_attr,
- msg=('TestInfo.%s mismatch: %s != %s' %
- (attr, test_info_a_attr, test_info_b_attr)))
-
-def assert_equal_testinfo_sets(test_class, test_info_set_a, test_info_set_b):
- """Check that the sets of TestInfos are equal."""
- test_class.assertEqual(len(test_info_set_a), len(test_info_set_b),
- msg=('mismatch # of TestInfos: %d != %d' %
- (len(test_info_set_a), len(test_info_set_b))))
- # Iterate over a set and pop them out as you compare them.
- while test_info_set_a:
- test_info_a = test_info_set_a.pop()
- test_info_b_to_remove = None
- for test_info_b in test_info_set_b:
- try:
- assert_equal_testinfos(test_class, test_info_a, test_info_b)
- test_info_b_to_remove = test_info_b
- break
- except AssertionError:
- pass
- if test_info_b_to_remove:
- test_info_set_b.remove(test_info_b_to_remove)
- else:
- # We haven't found a match, raise an assertion error.
- raise AssertionError('No matching TestInfo (%s) in [%s]' %
- (test_info_a, ';'.join([str(t) for t in test_info_set_b])))
-
-
-def isfile_side_effect(value):
- """Mock return values for os.path.isfile."""
- if value == '/%s/%s' % (uc.CC_MODULE_DIR, constants.MODULE_CONFIG):
- return True
- if value == '/%s/%s' % (uc.MODULE_DIR, constants.MODULE_CONFIG):
- return True
- if value.endswith('.cc'):
- return True
- if value.endswith('.cpp'):
- return True
- if value.endswith('.java'):
- return True
- if value.endswith('.kt'):
- return True
- if value.endswith(uc.INT_NAME + '.xml'):
- return True
- if value.endswith(uc.GTF_INT_NAME + '.xml'):
- return True
- return False
-
-
-def realpath_side_effect(path):
- """Mock return values for os.path.realpath."""
- return os.path.join(uc.ROOT, path)
diff --git a/atest/Android.bp b/atest/Android.bp
index e3cd98c..24af31f 100644
--- a/atest/Android.bp
+++ b/atest/Android.bp
@@ -23,18 +23,6 @@
java_resource_dirs: ["res"],
}
-java_library_host {
- name: "asuite_proto_java",
- srcs: [
- "proto/*.proto",
- ],
- proto: {
- type: "full",
- canonical_path_from_root: false,
- include_dirs: ["external/protobuf/src"],
- },
-}
-
tradefed_binary_host {
name: "atest-tradefed",
short_name: "ATEST",
@@ -43,54 +31,10 @@
required: ["atest_tradefed.sh", "atest_script_help.sh"],
}
+// Attributes common to both atest and atest_flag_list_generator.
python_defaults {
- name: "atest_default",
+ name: "atest_defaults",
pkg_path: "atest",
- version: {
- py2: {
- enabled: false,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
-}
-
-python_defaults {
- name: "atest_lib_default",
- pkg_path: "atest",
- version: {
- py2: {
- enabled: false,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
-}
-
-python_defaults {
- name: "asuite_default",
- pkg_path: "asuite",
- version: {
- py2: {
- enabled: true,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
-}
-
-python_binary_host {
- name: "atest",
- main: "atest.py",
srcs: [
"**/*.py",
],
@@ -98,43 +42,55 @@
"*_unittest.py",
"*/*_unittest.py",
"asuite_lib_test/*.py",
- "proto/*_pb2.py",
- "proto/__init__.py",
- "tf_proto/__init__.py",
- "tf_proto/*_pb2.py",
+ "proto/*.py",
+ "tf_proto/*.py",
],
libs: [
- "atest_proto",
+ "asuite_proto",
"tradefed-protos-py",
"py-google-api-python-client",
"py-oauth2client",
- "py-six",
],
data: [
- "tools/updatedb_darwin.sh",
+ "bazel/resources/**/*",
],
- // Make atest's built name to atest-dev
+}
+
+python_binary_host {
+ name: "atest",
+ defaults: ["atest_defaults"],
+ main: "atest_main.py",
+ data: [
+ ":atest_flag_list_for_completion",
+ ],
+ // Make atest's built name be atest-dev
stem: "atest-dev",
- defaults: ["atest_default"],
dist: {
targets: ["droidcore"],
},
}
-python_library_host {
- name: "atest_proto",
- defaults: ["atest_default"],
- srcs: [
- "proto/*.proto",
- ],
- proto: {
- canonical_path_from_root: false,
- },
+// A helper binary used to generate the atest_flag_list_for_completion.txt
+// file, it should never be run outside of the build. It's the same
+// as atest except it has atest_flag_list_generator.py as it's main python
+// file. The atest_flag_list_for_completion.txt file is extracted from the
+// atest binary during autocompletion.
+python_binary_host {
+ name: "atest_flag_list_generator",
+ defaults: ["atest_defaults"],
+ main: "atest_flag_list_generator.py",
+}
+
+genrule {
+ name: "atest_flag_list_for_completion",
+ out: ["atest_flag_list_for_completion.txt"],
+ tools: ["atest_flag_list_generator"],
+ cmd: "$(location atest_flag_list_generator) > $(out)",
}
python_library_host {
name: "atest_module_info",
- defaults: ["atest_lib_default"],
+ pkg_path: "atest",
srcs: [
"atest_error.py",
"atest_decorator.py",
@@ -144,44 +100,22 @@
"module_info.py",
],
libs: [
- "metrics-protos",
- ]
-}
-
-python_library_host {
- name: "asuite_proto",
- defaults: ["asuite_default"],
- srcs: [
- "proto/*.proto",
- ],
- proto: {
- canonical_path_from_root: false,
- },
-}
-
-python_library_host {
- name: "asuite_metrics",
- defaults: ["asuite_default"],
- srcs: [
- "asuite_metrics.py",
+ "tradefed-protos-py",
],
}
python_library_host {
name: "asuite_cc_client",
- defaults: ["asuite_default"],
+ pkg_path: "atest",
srcs: [
- "atest_error.py",
- "atest_decorator.py",
"atest_enum.py",
- "atest_utils.py",
- "constants.py",
- "constants_default.py",
+ "asuite_metrics.py",
"metrics/*.py",
+ "coverage/*.py",
],
libs: [
"asuite_proto",
- "asuite_metrics",
+ "atest_module_info",
],
}
@@ -196,26 +130,22 @@
unit_test: true,
},
data: [
- "tools/updatedb_darwin.sh",
"unittest_data/**/*",
"unittest_data/**/.*",
],
exclude_srcs: [
"asuite_lib_test/*.py",
- "proto/*_pb2.py",
- "proto/__init__.py",
"tools/atest_updatedb_unittest.py",
- "tf_proto/__init__.py",
- "tf_proto/*_pb2.py",
+ "proto/*.py",
+ "tf_proto/*.py",
],
libs: [
- "atest_proto",
+ "asuite_proto",
"pyfakefs",
"tradefed-protos-py",
],
test_config: "atest_unittests.xml",
test_suites: ["general-tests"],
- defaults: ["atest_default"],
}
python_test_host {
@@ -230,7 +160,6 @@
],
test_config: "atest_integration_tests.xml",
test_suites: ["null-suite"],
- defaults: ["atest_default"],
test_options: {
unit_test: false,
},
diff --git a/atest/BUILD.bazel b/atest/BUILD.bazel
new file mode 100644
index 0000000..25ac092
--- /dev/null
+++ b/atest/BUILD.bazel
@@ -0,0 +1,15 @@
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+ name = "atest-tradefed-shell",
+ srcs = glob(["res/**/*"]),
+)
+
+java_library(
+ name = "atest-tradefed",
+ resource_strip_prefix = "tools/asuite/atest/res",
+ resources = [
+ ":atest-tradefed-shell",
+ ],
+ target_compatible_with = ["//build/bazel/platforms/os:linux"],
+)
diff --git a/atest/OWNERS b/atest/OWNERS
index 4d3541a..738e735 100644
--- a/atest/OWNERS
+++ b/atest/OWNERS
@@ -2,3 +2,6 @@
easoncylee@google.com
kevcheng@google.com
yangbill@google.com
+kellyhung@google.com
+yikezh@google.com
+nelsonli@google.com
diff --git a/atest/__init__.py b/atest/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/__init__.py
+++ /dev/null
diff --git a/atest/asuite_lib_test/Android.bp b/atest/asuite_lib_test/Android.bp
index 99e0a5f..9669ada 100644
--- a/atest/asuite_lib_test/Android.bp
+++ b/atest/asuite_lib_test/Android.bp
@@ -16,29 +16,10 @@
// also include asuite_metrics and other needed python files, in order to make sure asuite_metrics
// tests result is accurate, separate them to two different test modules.
-// For testing asuite_metrics python2 libs
package {
default_applicable_licenses: ["Android-Apache-2.0"],
}
-// For testing asuite_metrics python3 libs
-python_test_host {
- name: "asuite_metrics_lib_tests",
- main: "asuite_lib_run_tests.py",
- pkg_path: "asuite_test",
- srcs: [
- "asuite_lib_run_tests.py",
- "asuite_metrics_test.py",
- ],
- libs: [
- "asuite_metrics",
- ],
- test_options: {
- unit_test: true,
- },
- defaults: ["atest_lib_default"],
-}
-
// For testing asuite_cc_client python3 libs
python_test_host {
name: "asuite_cc_lib_tests",
@@ -47,6 +28,7 @@
srcs: [
"asuite_lib_run_tests.py",
"asuite_cc_client_test.py",
+ "asuite_metrics_test.py",
],
libs: [
"asuite_cc_client",
@@ -54,5 +36,4 @@
test_options: {
unit_test: true,
},
- defaults: ["atest_lib_default"],
}
diff --git a/atest/asuite_lib_test/asuite_cc_client_test.py b/atest/asuite_lib_test/asuite_cc_client_test.py
index 9ed222e..1408e1d 100644
--- a/atest/asuite_lib_test/asuite_cc_client_test.py
+++ b/atest/asuite_lib_test/asuite_cc_client_test.py
@@ -30,10 +30,10 @@
# pylint: disable=unused-variable
# pylint: disable=import-outside-toplevel
# pylint: disable=unused-import
- from asuite.metrics import metrics
- from asuite.metrics import metrics_base
- from asuite.metrics import metrics_utils
- from asuite import atest_utils
+ from atest.metrics import metrics
+ from atest.metrics import metrics_base
+ from atest.metrics import metrics_utils
+
# TODO (b/132602907): Add the real usage for checking if metrics pass or
# fail.
diff --git a/atest/asuite_lib_test/asuite_lib_run_tests.py b/atest/asuite_lib_test/asuite_lib_run_tests.py
index baa34cf..4e22311 100755
--- a/atest/asuite_lib_test/asuite_lib_run_tests.py
+++ b/atest/asuite_lib_test/asuite_lib_run_tests.py
@@ -32,9 +32,10 @@
List of strings (the testable module import path).
"""
testable_modules = []
- base_path = os.path.dirname(os.path.realpath(__file__))
+ package = os.path.dirname(os.path.realpath(__file__))
+ base_path = os.path.dirname(package)
- for dirpath, _, files in os.walk(base_path):
+ for dirpath, _, files in os.walk(package):
for f in files:
if f.endswith("_test.py"):
# Now transform it into a relative import path.
@@ -56,12 +57,6 @@
Returns:
0 if success. None-zero if fails.
"""
- # Force remove syspath related to atest to make sure the env is clean.
- # These tests need to run in isolation (to find bugs like b/132086641)
- # so we scrub out all atest modules.
- for path in sys.path:
- if path.endswith('/atest'):
- sys.path.remove(path)
test_modules = get_test_modules()
for mod in test_modules:
import_module(mod)
diff --git a/atest/asuite_lib_test/asuite_metrics_test.py b/atest/asuite_lib_test/asuite_metrics_test.py
index 26c1eaf..73dd673 100644
--- a/atest/asuite_lib_test/asuite_metrics_test.py
+++ b/atest/asuite_lib_test/asuite_metrics_test.py
@@ -30,7 +30,7 @@
# pylint: disable=unused-variable
# pylint: disable=import-outside-toplevel
# pylint: disable=unused-import
- from asuite import asuite_metrics
+ from atest import asuite_metrics
# TODO (b/132602907): Add the real usage for checking if metrics pass or fail.
diff --git a/atest/asuite_metrics.py b/atest/asuite_metrics.py
index 0828152..49d8b85 100644
--- a/atest/asuite_metrics.py
+++ b/atest/asuite_metrics.py
@@ -27,6 +27,7 @@
from urllib2 import Request
from urllib2 import urlopen
+
_JSON_HEADERS = {'Content-Type': 'application/json'}
_METRICS_RESPONSE = 'done'
_METRICS_TIMEOUT = 2 #seconds
@@ -70,17 +71,15 @@
def _get_grouping_key():
- """Get grouping key. Returns UUID.uuid4."""
+ """Get grouping key. Returns UUID.uuid5."""
meta_file = os.path.join(os.path.expanduser('~'),
'.config', 'asuite', '.metadata')
- if os.path.isfile(meta_file):
- with open(meta_file) as f:
- try:
- return uuid.UUID(f.read(), version=4)
- except ValueError:
- logging.debug('malformed group_key in file, rewriting')
- # Cache uuid to file. Raise exception if any file access error.
- key = uuid.uuid4()
+ # (b/278503654) Treat non-human invocation as the same user when the email
+ # is null.
+ # Prevent circular import.
+ #pylint: disable=import-outside-toplevel
+ from atest.metrics import metrics_base
+ key = uuid.uuid5(uuid.NAMESPACE_DNS, metrics_base.get_user_email())
dir_path = os.path.dirname(meta_file)
if os.path.isfile(dir_path):
os.remove(dir_path)
diff --git a/atest/atest_arg_parser.py b/atest/atest_arg_parser.py
index a285653..49b054a 100644
--- a/atest/atest_arg_parser.py
+++ b/atest/atest_arg_parser.py
@@ -18,13 +18,32 @@
Atest Argument Parser class for atest.
"""
-# pylint: disable=line-too-long
+# TODO: (@jimtang) Unsuppress too-many-lines Pylint warning.
+# pylint: disable=line-too-long, too-many-lines
import argparse
import pydoc
-import bazel_mode
-import constants
+from atest import bazel_mode
+from atest import constants
+
+from atest.atest_utils import BuildOutputMode
+from atest.test_runners.roboleaf_test_runner import BazelBuildMode
+
+def output_mode_msg() -> str:
+ """Generate helper strings for BuildOutputMode."""
+ msg = []
+ for _, value in BuildOutputMode.__members__.items():
+ if value == BuildOutputMode.STREAMED:
+ msg.append(f'\t\t{BuildOutputMode.STREAMED.value}: '
+ 'full output like what "m" does. (default)')
+ elif value == BuildOutputMode.LOGGED:
+ msg.append(f'\t\t{BuildOutputMode.LOGGED.value}: '
+ 'print build output to a log file.')
+ else:
+ raise RuntimeError('Found unknown attribute!')
+ return '\n'.join(msg)
+
# Constants used for AtestArgParser and EPILOG_TEMPLATE
HELP_DESC = ('A command line tool that allows users to build, install, and run '
@@ -39,19 +58,29 @@
ALL_ABI = 'Set to run tests for all abis.'
ANNOTATION_FILTER = ('Accept keyword that will be translated to fully qualified'
'annotation class name.')
+AUTO_SHARDING = ('Trigger N AVDs/shards for long duration tests. (N is 2 by '
+ 'default).')
BUILD = 'Run a build.'
BAZEL_MODE = 'Run tests using Bazel.'
BAZEL_ARG = ('Forward a flag to Bazel for tests executed with Bazel; '
'see --bazel-mode.')
+BUILD_OUTPUT = (r'Specifies the desired build output mode. '
+ f'Valid values are:\n{output_mode_msg()}')
CLEAR_CACHE = 'Wipe out the test_infos cache of the test and start a new search.'
COLLECT_TESTS_ONLY = ('Collect a list test cases of the instrumentation tests '
'without testing them in real.')
+COVERAGE = ('Instrument tests with code coverage and generate a code coverage '
+ 'report.')
+DEVICE_ONLY = ('Only run tests that require a device. (Note: only workable with'
+ ' --test-mapping.)')
DISABLE_TEARDOWN = 'Disable test teardown and cleanup.'
DRY_RUN = 'Dry run atest without building, installing and running tests in real.'
ENABLE_DEVICE_PREPARER = ('Enable template/preparers/device-preparer as the '
'default preparer.')
ENABLE_FILE_PATTERNS = 'Enable FILE_PATTERNS in TEST_MAPPING.'
FLAKES_INFO = 'Test result with flakes info.'
+FUZZY_SEARCH = 'Running fuzzy search when test not found. (implicit True)'
+GENERATE_RUNNER_CMD = 'Generate the runner command(s) of given tests.'
HISTORY = ('Show test results in chronological order(with specified number or '
'all by default).')
HOST = ('Run the test completely on the host without a device. '
@@ -66,21 +95,24 @@
'"--instant" is passed.')
ITERATION = 'Loop-run tests until the max iteration is reached. (default: 10)'
LATEST_RESULT = 'Print latest test result.'
+LD_LIB_PATH = ('Insert $ANDROID_HOST_OUT/{lib,lib64} to LD_LIBRARY_PATH when '
+ 'running tests with Tradefed.')
LIST_MODULES = 'List testable modules of the given suite.'
+NO_CHECKING_DEVICE = 'Do NOT check device availability. (even it is a device test)'
NO_ENABLE_ROOT = ('Do NOT restart adbd with root permission even the test config '
'has RootTargetPreparer.')
NO_METRICS = 'Do not send metrics.'
+ROBOLEAF_MODE = ('Check if module has been listed in the ["prod", "staging", or'
+ ' "dev"] roboleaf allowlists and invoke with b test.')
REBUILD_MODULE_INFO = ('Forces a rebuild of the module-info.json file. '
'This may be necessary following a repo sync or '
'when writing a new test.')
-
REQUEST_UPLOAD_RESULT = ('Request permission to upload test result. This option '
'only needs to set once and takes effect until '
'--disable-upload-result is set.')
DISABLE_UPLOAD_RESULT = ('Turn off the upload of test result. This option '
'only needs to set once and takes effect until '
'--request-upload-result is set')
-
RERUN_UNTIL_FAILURE = ('Rerun all tests until a failure occurs or the max '
'iteration is reached. (default: forever!)')
# For Integer.MAX_VALUE == (2**31 - 1) and not possible to give a larger integer
@@ -90,6 +122,8 @@
'is reached. (default: 10)')
SERIAL = 'The device to run the test on.'
SHARDING = 'Option to specify sharding count. (default: 2)'
+SMART_TESTING_LOCAL = ('Automatically detect untracked/unstaged files in current'
+ ' git run associated tests.')
START_AVD = 'Automatically create an AVD and run tests on the virtual device.'
TEST = ('Run the tests. WARNING: Many test configs force cleanup of device '
'after test run. In this case, "-d" must be used in previous test run '
@@ -151,8 +185,12 @@
def add_atest_args(self):
"""A function that does ArgumentParser.add_argument()"""
self.add_argument('tests', nargs='*', help='Tests to build and/or run.')
+
# Options that to do with testing.
self.add_argument('-a', '--all-abi', action='store_true', help=ALL_ABI)
+
+ self.add_argument('--auto-sharding', action='store_true', help=AUTO_SHARDING)
+
self.add_argument('-b', '--build', action='append_const', dest='steps',
const=constants.BUILD_STEP, help=BUILD)
self.add_argument('--bazel-mode', default=True, action='store_true',
@@ -164,8 +202,16 @@
self.add_argument('-d', '--disable-teardown', action='store_true',
help=DISABLE_TEARDOWN)
- self.add_argument('--enable-device-preparer', action='store_true', help=HOST)
- self.add_argument('--host', action='store_true', help=HOST)
+ self.add_argument('--enable-device-preparer', action='store_true',
+ help=ENABLE_DEVICE_PREPARER)
+ self.add_argument('--experimental-coverage', action='store_true', help=COVERAGE)
+ # Options for host and device-only:
+ # A group of options for testing mapping tests. They are mutually
+ # exclusive in a command line.
+ hgroup = self.add_mutually_exclusive_group()
+ hgroup.add_argument('--host', action='store_true', help=HOST)
+ hgroup.add_argument('--device-only', action='store_true',
+ help=DEVICE_ONLY)
self.add_argument('-i', '--install', action='append_const',
dest='steps', const=constants.INSTALL_STEP,
help=INSTALL)
@@ -173,6 +219,13 @@
action='store_true', help=REBUILD_MODULE_INFO)
self.add_argument('--no-enable-root', help=NO_ENABLE_ROOT,
action='store_true')
+ self.add_argument('--roboleaf-mode',
+ nargs='?',
+ default=BazelBuildMode.OFF,
+ const=BazelBuildMode.PROD,
+ choices=BazelBuildMode,
+ type=BazelBuildMode,
+ help=ROBOLEAF_MODE)
self.add_argument('--sharding', nargs='?', const=2,
type=_positive_int, default=0,
help=SHARDING)
@@ -182,6 +235,9 @@
action='store_true')
self.add_argument('-w', '--wait-for-debugger', action='store_true',
help=WAIT_FOR_DEBUGGER)
+ self.add_argument('--auto-ld-library-path', action='store_true',
+ help=LD_LIB_PATH)
+
# Options for request/disable upload results. They are mutually
# exclusive in a command line.
ugroup = self.add_mutually_exclusive_group()
@@ -190,8 +246,11 @@
ugroup.add_argument('--disable-upload-result', action='store_true',
help=DISABLE_UPLOAD_RESULT)
+ mgroup = self.add_mutually_exclusive_group()
+ mgroup.add_argument('--smart-testing-local', action='store_true',
+ help=SMART_TESTING_LOCAL)
# Options related to Test Mapping
- self.add_argument('-p', '--test-mapping', action='store_true',
+ mgroup.add_argument('-p', '--test-mapping', action='store_true',
help=TEST_MAPPING)
self.add_argument('--include-subdirs', action='store_true',
help=INCLUDE_SUBDIRS)
@@ -201,7 +260,7 @@
help=ENABLE_FILE_PATTERNS)
# Options related to Host Unit Test.
- self.add_argument('--host-unit-test-only', action='store_true',
+ mgroup.add_argument('--host-unit-test-only', action='store_true',
help=HOST_UNIT_TEST_ONLY)
# Options for information queries and dry-runs:
@@ -217,6 +276,18 @@
self.add_argument('-L', '--list-modules', help=LIST_MODULES)
self.add_argument('-v', '--verbose', action='store_true', help=VERBOSE)
self.add_argument('-V', '--version', action='store_true', help=VERSION)
+ self.add_argument('--build-output',
+ default=BuildOutputMode.STREAMED,
+ choices=BuildOutputMode,
+ type=BuildOutputMode,
+ help=BUILD_OUTPUT)
+
+ # Options that switch on/off fuzzy searching.
+ fgroup = self.add_mutually_exclusive_group()
+ fgroup.add_argument('--no-fuzzy-search', action='store_false',
+ default=True, dest='fuzzy_search', help=FUZZY_SEARCH)
+ fgroup.add_argument('--fuzzy-search', action='store_true',
+ help=FUZZY_SEARCH)
# Options that to do with acloud/AVDs.
agroup = self.add_mutually_exclusive_group()
@@ -268,6 +339,8 @@
help=VERIFY_CMD_MAPPING)
self.add_argument('-e', '--verify-env-variable', action='store_true',
help=VERIFY_ENV_VARIABLE)
+ self.add_argument('-g', '--generate-runner-cmd', action='store_true',
+ help=GENERATE_RUNNER_CMD)
# Options for Tradefed debug mode.
self.add_argument('-D', '--tf-debug', nargs='?', const=10888,
type=_positive_int, default=0,
@@ -311,7 +384,10 @@
# Option to filter the output of aggregate metrics content.
self.add_argument('--aggregate-metric-filter', action='append',
help=AGGREGATE_METRIC_FILTER)
-
+ # Option that allows building and running without regarding device
+ # availability even the given test is a device/host-driven test.
+ self.add_argument('--no-checking-device', action='store_true',
+ help=NO_CHECKING_DEVICE)
# This arg actually doesn't consume anything, it's primarily used for
# the help description and creating custom_args in the NameSpace object.
self.add_argument('--', dest='custom_args', nargs='*',
@@ -344,17 +420,21 @@
AGGREGATE_METRIC_FILTER=AGGREGATE_METRIC_FILTER,
ALL_ABI=ALL_ABI,
ANNOTATION_FILTER=ANNOTATION_FILTER,
+ AUTO_SHARDING=AUTO_SHARDING,
BUILD=BUILD,
BAZEL_MODE=BAZEL_MODE,
BAZEL_ARG=BAZEL_ARG,
CLEAR_CACHE=CLEAR_CACHE,
COLLECT_TESTS_ONLY=COLLECT_TESTS_ONLY,
+ COVERAGE=COVERAGE,
+ DEVICE_ONLY=DEVICE_ONLY,
DISABLE_TEARDOWN=DISABLE_TEARDOWN,
DISABLE_UPLOAD_RESULT=DISABLE_UPLOAD_RESULT,
DRY_RUN=DRY_RUN,
ENABLE_DEVICE_PREPARER=ENABLE_DEVICE_PREPARER,
ENABLE_FILE_PATTERNS=ENABLE_FILE_PATTERNS,
FLAKES_INFO=FLAKES_INFO,
+ GENERATE_RUNNER_CMD=GENERATE_RUNNER_CMD,
HELP_DESC=HELP_DESC,
HISTORY=HISTORY,
HOST=HOST,
@@ -365,15 +445,21 @@
INSTANT=INSTANT,
ITERATION=ITERATION,
LATEST_RESULT=LATEST_RESULT,
+ LD_LIB_PATH=LD_LIB_PATH,
LIST_MODULES=LIST_MODULES,
NO_ENABLE_ROOT=NO_ENABLE_ROOT,
NO_METRICS=NO_METRICS,
+ NO_CHECKING_DEVICE=NO_CHECKING_DEVICE,
+ FUZZY_SEARCH=FUZZY_SEARCH,
REBUILD_MODULE_INFO=REBUILD_MODULE_INFO,
+ ROBOLEAF_MODE=ROBOLEAF_MODE,
REQUEST_UPLOAD_RESULT=REQUEST_UPLOAD_RESULT,
RERUN_UNTIL_FAILURE=RERUN_UNTIL_FAILURE,
RETRY_ANY_FAILURE=RETRY_ANY_FAILURE,
SERIAL=SERIAL,
SHARDING=SHARDING,
+ BUILD_OUTPUT=BUILD_OUTPUT,
+ SMART_TESTING_LOCAL=SMART_TESTING_LOCAL,
START_AVD=START_AVD,
TEST=TEST,
TEST_CONFIG_SELECTION=TEST_CONFIG_SELECTION,
@@ -427,6 +513,12 @@
atest <test> -- --abi arm64-v8a # ARM 64-bit
atest <test> -- --abi armeabi-v7a # ARM 32-bit
+ --auto-ld-library-path
+ {LD_LIB_PATH}
+
+ --auto-sharding
+ {AUTO_SHARDING}
+
-b, --build
{BUILD} (implicit default)
@@ -436,6 +528,9 @@
--bazel-arg
{BAZEL_ARG}
+ --device-only
+ {DEVICE_ONLY}
+
-d, --disable-teardown
{DISABLE_TEARDOWN}
@@ -445,6 +540,9 @@
--enable-device-preparer
{ENABLE_DEVICE_PREPARER}
+ --experimental-coverage
+ {COVERAGE}
+
--host
{HOST}
@@ -457,15 +555,29 @@
-m, --rebuild-module-info
{REBUILD_MODULE_INFO}
+ --roboleaf-mode
+ {ROBOLEAF_MODE}
+
--no-enable-root
{NO_ENABLE_ROOT}
+ --no-checking-device
+ {NO_CHECKING_DEVICE}
+
-s, --serial [SERIAL]
{SERIAL}
--sharding [SHARD_NUMBER]
{SHARDING}
+ --smart-testing-local
+ {SMART_TESTING_LOCAL} e.g. Have modified code in packages/apps/Settings/tests/unit/src.
+ croot packages/apps/Settings/tests/unit/src
+ atest --smart-testing-local
+
+ will be equivalent to (from <android root>):
+ atest --smart-testing-local packages/apps/Settings/tests/unit/src
+
-t, --test [TEST1, TEST2, ...]
{TEST} (implicit default)
@@ -489,11 +601,15 @@
-w, --wait-for-debugger
{WAIT_FOR_DEBUGGER}
+ --use-modules-in
+ {USE_MODULES_IN}
+
+ [ Upload Test Result ]
--request-upload-result
{REQUEST_UPLOAD_RESULT}
- --use-modules-in
- {USE_MODULES_IN}
+ --disable-upload-result
+ {DISABLE_UPLOAD_RESULT}
[ Test Mapping ]
-p, --test-mapping
@@ -519,6 +635,9 @@
-L, --list-modules
{LIST_MODULES}
+ --[no-]fuzzy-search
+ {FUZZY_SEARCH}
+
--latest-result
{LATEST_RESULT}
@@ -528,6 +647,8 @@
-V, --version
{VERSION}
+ --build-output
+ {BUILD_OUTPUT}
[ Dry-Run and Caching ]
--dry-run
@@ -859,7 +980,7 @@
directories. You can also specify a target directory.
Example:
- atest (run presubmit tests in TEST_MAPPING files in current and parent directories)
+ atest (run presubmit tests in TEST_MAPPING files and host unit tests in current and parent directories)
atest --test-mapping </path/to/project>
(run presubmit tests in TEST_MAPPING files in </path/to/project> and its parent directories)
diff --git a/atest/atest_arg_parser_unittest.py b/atest/atest_arg_parser_unittest.py
index c56237e..b45ce86 100755
--- a/atest/atest_arg_parser_unittest.py
+++ b/atest/atest_arg_parser_unittest.py
@@ -19,7 +19,7 @@
import unittest
-import atest_arg_parser
+from atest import atest_arg_parser
class AtestArgParserUnittests(unittest.TestCase):
diff --git a/atest/atest_completion.sh b/atest/atest_completion.sh
index 37086e7..dc29503 100644
--- a/atest/atest_completion.sh
+++ b/atest/atest_completion.sh
@@ -14,54 +14,6 @@
ATEST_REL_DIR="tools/asuite/atest"
-_fetch_testable_modules() {
- [[ -z $ANDROID_BUILD_TOP ]] && return 0
- export ATEST_DIR="$ANDROID_BUILD_TOP/$ATEST_REL_DIR"
- /usr/bin/env python3 - << END
-import os
-import pickle
-import sys
-
-from pathlib import Path
-
-sys.path.append(os.getenv('ATEST_DIR'))
-import constants
-
-index_dir = Path(os.getenv(constants.ANDROID_HOST_OUT)).joinpath('indexes')
-module_index = index_dir.joinpath(constants.MODULE_INDEX)
-if os.path.isfile(module_index):
- with open(module_index, 'rb') as cache:
- try:
- print("\n".join(pickle.load(cache, encoding="utf-8")))
- except:
- print("\n".join(pickle.load(cache)))
-else:
- print("")
-END
- unset ATEST_DIR
-}
-
-# This function invoke get_args() and return each item
-# of the list for tab completion candidates.
-_fetch_atest_args() {
- [[ -z $ANDROID_BUILD_TOP ]] && return 0
- export ATEST_DIR="$ANDROID_BUILD_TOP/$ATEST_REL_DIR"
- /usr/bin/env python3 - << END
-import os
-import sys
-
-atest_dir = os.path.join(os.getenv('ATEST_DIR'))
-sys.path.append(atest_dir)
-
-import atest_arg_parser
-
-parser = atest_arg_parser.AtestArgParser()
-parser.add_atest_args()
-print("\n".join(parser.get_args()))
-END
- unset ATEST_DIR
-}
-
# This function returns devices recognised by adb.
_fetch_adb_devices() {
while read dev; do echo $dev | awk '{print $1}'; done < <(adb devices | egrep -v "^List|^$"||true)
@@ -73,23 +25,39 @@
find -maxdepth 5 -type f -name TEST_MAPPING |sed 's/^.\///g'| xargs dirname 2>/dev/null
}
+function _pip_install() {
+ if ! which $1 >/dev/null; then
+ install_cmd="pip3 install --user $1"
+ echo "${FUNCNAME[1]} requires $1 but not found. Installing..."
+ eval $install_cmd >/dev/null
+ fi
+}
+
# The main tab completion function.
_atest() {
- local cur prev
COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cmd=$(which $1)
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
_get_comp_words_by_ref -n : cur prev || true
+ if [[ "$cmd" == *prebuilts/asuite/atest/linux-x86/atest ]]; then
+ # prebuilts/asuite/atest/linux-x86/atest is shell script wrapper around
+ # atest-py3, which is what we should actually use.
+ cmd=$ANDROID_BUILD_TOP/prebuilts/asuite/atest/linux-x86/atest-py3
+ fi
+
case "$cur" in
-*)
- COMPREPLY=($(compgen -W "$(_fetch_atest_args)" -- $cur))
+ COMPREPLY=($(compgen -W "$(unzip -p $cmd atest/atest_flag_list_for_completion.txt)" -- $cur))
;;
*/*)
;;
*)
- local candidate_args=$(ls; _fetch_testable_modules)
- COMPREPLY=($(compgen -W "$candidate_args" -- $cur))
+ # Use grep instead of compgen -W because compgen -W is very slow. It takes
+ # ~0.7 seconds for compgen to read the all_modules.txt file.
+ # TODO(b/256228056) This fails if $cur has special characters in it
+ COMPREPLY=($(ls | grep "^$cur"; grep "^$cur" $ANDROID_PRODUCT_OUT/all_modules.txt 2>/dev/null))
;;
esac
@@ -136,19 +104,20 @@
# BASH version <= 4.3 doesn't have nosort option.
# Note that nosort has no effect for zsh.
local _atest_comp_options="-o default -o nosort"
- local _atest_executables=(atest atest-dev atest-src atest-py3)
+ local _atest_executables=(atest atest-dev atest-py3)
for exec in "${_atest_executables[*]}"; do
complete -F _atest $_atest_comp_options $exec 2>/dev/null || \
complete -F _atest -o default $exec
done
- # Install atest-src for the convenience of debugging.
- local atest_src="$T/$ATEST_REL_DIR/atest.py"
- [[ -f "$atest_src" ]] && alias atest-src="$atest_src"
+ function atest-src() {
+ echo "atest-src is deprecated, use m atest && atest-dev instead" >&2
+ return 1
+ }
# Use prebuilt python3 for atest-dev
function atest-dev() {
- atest_dev="$ANDROID_BUILD_TOP/out/host/$(uname -s | tr '[:upper:]' '[:lower:]')-x86/bin/atest-dev"
+ atest_dev="$ANDROID_SOONG_HOST_OUT/bin/atest-dev"
if [ ! -f $atest_dev ]; then
echo "Cannot find atest-dev. Run 'm atest' to generate one."
return 1
@@ -156,6 +125,39 @@
PREBUILT_TOOLS_DIR="$ANDROID_BUILD_TOP/prebuilts/build-tools/path/linux-x86"
PATH=$PREBUILT_TOOLS_DIR:$PATH $atest_dev "$@"
}
+
+ # pyinstrument profiler
+ function _atest_profile_cli() {
+ local T="$(gettop)"
+ profile="$HOME/.atest/$(date +'%FT%H-%M-%S').pyisession"
+ _pip_install pyinstrument
+ if [ "$?" -eq 0 ]; then
+ m atest && \
+ python3 $T/tools/asuite/atest/profiler.py pyinstrument $profile $ANDROID_SOONG_HOST_OUT/bin/atest-dev "$@" && \
+ python3 -m pyinstrument -t --show-all --load $profile && \
+ echo "$(tput setaf 3)$profile$(tput sgr0) saved."
+ fi
+ }
+
+ # cProfile profiler + snakeviz visualization
+ function _atest_profile_web() {
+ local T="$(gettop)"
+ profile="$HOME/.atest/$(date +'%F_%H-%M-%S').pstats"
+ m atest && \
+ python3 $T/tools/asuite/atest/profiler.py cProfile $profile $ANDROID_SOONG_HOST_OUT/bin/atest-dev "$@" && \
+ echo "$profile saved." || return 1
+
+ _pip_install snakeviz
+ if [ "$?" -eq 0 ]; then
+ run_cmd="snakeviz -H $HOSTNAME $profile >/dev/null 2>&1"
+ echo "$(tput bold)Use Ctrl-C to stop.$(tput sgr0)"
+ eval $run_cmd
+ echo
+ echo "To permanently start a web server, please run:"
+ echo $(tput setaf 3)"nohup $run_cmd &"$(tput sgr0)
+ echo "and share $(tput setaf 3)http://$HOSTNAME:8080/snakeviz/$profile$(tput sgr0)."
+ fi
+ }
}
_atest_main
diff --git a/atest/atest_enum.py b/atest/atest_enum.py
index 825f786..f60a17d 100644
--- a/atest/atest_enum.py
+++ b/atest/atest_enum.py
@@ -21,7 +21,7 @@
@unique
class DetectType(IntEnum):
"""An Enum class for local_detect_event."""
- # Detect type for local_detect_event; next expansion: 22
+ # Detect type for local_detect_event; next expansion: 43
BUG_DETECTED = 0
ACLOUD_CREATE = 1
FIND_BUILD = 2
@@ -46,10 +46,40 @@
ATEST_CONFIG = 15
TEST_WITH_ARGS = 16
TEST_NULL_ARGS = 17
- MODULE_MERGE = 18
- MODULE_INFO_INIT_TIME = 19
+ MODULE_MERGE = 18 # Deprecated. Use MODULE_MERGE_MS instead.
+ MODULE_INFO_INIT_TIME = 19 # Deprecated. Use MODULE_INFO_INIT_MS instead.
MODULE_MERGE_MS = 20
NATIVE_TEST_NOT_FOUND = 21
+ BAZEL_WORKSPACE_GENERATE_TIME = 22
+ MODULE_LOAD_MS = 23
+ MODULE_INFO_INIT_MS = 24
+ INIT_AND_FIND_MS = 25
+ FOUND_INSTRUMENTATION_TEST = 26
+ FOUND_TARGET_ARTIFACTS = 27
+ FIND_TEST_IN_DEPS=28
+ FULL_GENERATE_BAZEL_WORKSPACE_TIME = 29
+ # Below detect types are used for determine build conditions:
+ # 1. *_CLEAN_OUT: when out/ dir is empty or does not exist.
+ # 2. *_BPMK_CHANGE: when any Android.bp/Android.mk has changed.
+ # 3. *_ENV_CHANGE: when build-related variable has changed.
+ # 4. *_SRC_CHANGE: when source code has changed.
+ # 5. *_OTHER: none of above reasons that triggers renewal of ninja file.
+ # 6. *_INCREMENTAL: the build doesn't need to renew ninja file.
+ MODULE_INFO_CLEAN_OUT = 30
+ MODULE_INFO_BPMK_CHANGE = 31
+ MODULE_INFO_ENV_CHANGE = 32
+ MODULE_INFO_SRC_CHANGE = 33
+ MODULE_INFO_OTHER = 34
+ MODULE_INFO_INCREMENTAL = 35
+ BUILD_CLEAN_OUT = 36
+ BUILD_BPMK_CHANGE = 37
+ BUILD_ENV_CHANGE = 38
+ BUILD_SRC_CHANGE = 39
+ BUILD_OTHER = 40
+ BUILD_INCREMENTAL = 41
+ BUILD_TIME_PER_TARGET = 42
+ MODULE_INFO_GEN_NINJA = 43
+ BUILD_GEN_NINJA = 44
@unique
class ExitCode(IntEnum):
@@ -67,14 +97,15 @@
EXIT_BEFORE_MAIN = 10
DEVICE_NOT_FOUND = 11
MIXED_TYPE_FILTER = 12
+ INPUT_TEST_REFERENCE_ERROR = 13
+ CONFIG_INVALID_FORMAT = 14
+ INVALID_SMART_TESTING_PATH = 15
+ # The code > 100 are reserved for collecting data only, actually the run
+ # doesn't finish at the point.
+ COLLECT_ONLY_FILE_NOT_FOUND = 101
@unique
class FilterType(Enum):
"""An Enum class for filter types"""
WILDCARD_FILTER = 'wildcard class_method'
REGULAR_FILTER = 'regular class_method'
-
-# TODO: (b/218441706) Convert AtestEnum to a real Enum class.
-class AtestEnum(tuple):
- """enum library isn't a Python 2.7 built-in, so roll our own."""
- __getattr__ = tuple.index
diff --git a/atest/atest_error.py b/atest/atest_error.py
index 7ab8b5f..30afad3 100644
--- a/atest/atest_error.py
+++ b/atest/atest_error.py
@@ -35,6 +35,9 @@
class TooManyMethodsError(TestDiscoveryException):
"""Raised when input string contains more than one # character."""
+class MoreThanOneClassError(TestDiscoveryException):
+ """Raised when multiple classes given in 'classA,classB' pattern."""
+
class MethodWithoutClassError(TestDiscoveryException):
"""Raised when method is appended via # but no class file specified."""
diff --git a/atest/atest_execution_info.py b/atest/atest_execution_info.py
index 987b9d0..fbdc315 100644
--- a/atest/atest_execution_info.py
+++ b/atest/atest_execution_info.py
@@ -24,11 +24,11 @@
import os
import sys
-import atest_utils as au
-import constants
+import atest.atest_utils as au
+from atest import constants
-from atest_enum import ExitCode
-from metrics import metrics_utils
+from atest.atest_enum import ExitCode
+from atest.metrics import metrics_utils
_ARGS_KEY = 'args'
_STATUS_PASSED_KEY = 'PASSED'
@@ -280,30 +280,37 @@
"""
self.args = args
self.work_dir = work_dir
- self.result_file = None
+ self.result_file_obj = None
self.args_ns = args_ns
+ self.test_result = os.path.join(self.work_dir, _TEST_RESULT_NAME)
def __enter__(self):
"""Create and return information file object."""
- full_file_name = os.path.join(self.work_dir, _TEST_RESULT_NAME)
try:
- self.result_file = open(full_file_name, 'w')
+ self.result_file_obj = open(self.test_result, 'w')
except IOError:
- logging.error('Cannot open file %s', full_file_name)
- return self.result_file
+ logging.error('Cannot open file %s', self.test_result)
+ return self.result_file_obj
def __exit__(self, exit_type, value, traceback):
"""Write execution information and close information file."""
- if self.result_file and not has_non_test_options(self.args_ns):
- self.result_file.write(AtestExecutionInfo.
+ if self.result_file_obj and not has_non_test_options(self.args_ns):
+ self.result_file_obj.write(AtestExecutionInfo.
_generate_execution_detail(self.args))
- self.result_file.close()
+ self.result_file_obj.close()
+ au.prompt_suggestions(self.test_result)
+ au.generate_print_result_html(self.test_result)
symlink_latest_result(self.work_dir)
main_module = sys.modules.get(_MAIN_MODULE_KEY)
- main_exit_code = getattr(main_module, _EXIT_CODE_ATTR, ExitCode.ERROR)
- if main_exit_code == ExitCode.SUCCESS:
+ main_exit_code = value.code if isinstance(value, SystemExit) else (
+ getattr(main_module, _EXIT_CODE_ATTR, ExitCode.ERROR))
+ # Do not send stacktrace with send_exit_event when exit code is not
+ # ERROR.
+ if main_exit_code != ExitCode.ERROR:
+ logging.debug('send_exit_event:%s', main_exit_code)
metrics_utils.send_exit_event(main_exit_code)
else:
+ logging.debug('handle_exc_and_send_exit_event:%s', main_exit_code)
metrics_utils.handle_exc_and_send_exit_event(main_exit_code)
@staticmethod
diff --git a/atest/atest_execution_info_unittest.py b/atest/atest_execution_info_unittest.py
index 1694b35..c6749e7 100755
--- a/atest/atest_execution_info_unittest.py
+++ b/atest/atest_execution_info_unittest.py
@@ -21,10 +21,10 @@
import time
import unittest
-import atest_execution_info as aei
-import result_reporter
+from atest import atest_execution_info as aei
+from atest import result_reporter
-from test_runners import test_runner_base
+from atest.test_runners import test_runner_base
RESULT_TEST_TEMPLATE = test_runner_base.TestResult(
runner_name='someRunner',
diff --git a/atest-py2/atest_metrics.py b/atest/atest_flag_list_generator.py
old mode 100755
new mode 100644
similarity index 65%
rename from atest-py2/atest_metrics.py
rename to atest/atest_flag_list_generator.py
index d2ac3ad..23911e7
--- a/atest-py2/atest_metrics.py
+++ b/atest/atest_flag_list_generator.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-#
-# Copyright 2018, The Android Open Source Project
+# Copyright 2022, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,13 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Simple Metrics Functions"""
+"""Script that generates arguments for autocompletion."""
+from atest import atest_arg_parser
-import constants
-import asuite_metrics
-
-
-#pylint: disable=broad-except
-def log_start_event():
- """Log that atest started."""
- asuite_metrics.log_event(constants.METRICS_URL)
+if __name__ == "__main__":
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ print("\n".join(parser.get_args()))
diff --git a/atest/atest_integration_tests.py b/atest/atest_integration_tests.py
index 2230e82..d59a8d2 100755
--- a/atest/atest_integration_tests.py
+++ b/atest/atest_integration_tests.py
@@ -36,13 +36,15 @@
import time
import unittest
-import constants
_TEST_RUN_DIR_PREFIX = 'atest_integration_tests_%s_'
_LOG_FILE = 'integration_tests.log'
_FAILED_LINE_LIMIT = 50
_EXIT_TEST_FAILED = 1
-_ALTERNATIVES = ('-dev', '-py2')
+_ALTERNATIVES = {'-dev'}
+_INTEGRATION_TESTS = [os.path.join(
+ os.environ.get('ANDROID_BUILD_TOP', os.getcwd()),
+ 'tools/asuite/atest/test_plans/INTEGRATION_TESTS')]
class ATestIntegrationTest(unittest.TestCase):
"""ATest Integration Test Class."""
@@ -140,7 +142,7 @@
print('Running tests with {}\n'.format(ATestIntegrationTest.EXECUTABLE))
try:
LOG_PATH = os.path.join(create_test_run_dir(), _LOG_FILE)
- for TEST_PLANS in constants.INTEGRATION_TESTS:
+ for TEST_PLANS in _INTEGRATION_TESTS:
with open(TEST_PLANS) as test_plans:
for test in test_plans:
# Skip test when the line startswith #.
diff --git a/atest/atest.py b/atest/atest_main.py
similarity index 73%
rename from atest/atest.py
rename to atest/atest_main.py
index 794be87..277b9ac 100755
--- a/atest/atest.py
+++ b/atest/atest_main.py
@@ -24,7 +24,9 @@
"""
# pylint: disable=line-too-long
+# pylint: disable=no-member
# pylint: disable=too-many-lines
+# pylint: disable=wrong-import-position
from __future__ import print_function
@@ -35,30 +37,35 @@
import tempfile
import time
import platform
-import re
+from typing import Dict, List
+
+from dataclasses import dataclass
from pathlib import Path
-import atest_arg_parser
-import atest_configs
-import atest_error
-import atest_execution_info
-import atest_utils
-import bazel_mode
-import bug_detector
-import cli_translator
-import constants
-import module_info
-import result_reporter
-import test_runner_handler
+from atest import atest_arg_parser
+from atest import atest_configs
+from atest import atest_error
+from atest import atest_execution_info
+from atest import atest_utils
+from atest import bazel_mode
+from atest import bug_detector
+from atest import cli_translator
+from atest import constants
+from atest import module_info
+from atest import result_reporter
+from atest import test_runner_handler
-from atest_enum import DetectType, ExitCode
-from metrics import metrics
-from metrics import metrics_base
-from metrics import metrics_utils
-from test_finders import test_finder_utils
-from test_runners import regression_test_runner
-from tools import atest_tools as at
+from atest.atest_enum import DetectType, ExitCode
+from atest.coverage import coverage
+from atest.metrics import metrics
+from atest.metrics import metrics_base
+from atest.metrics import metrics_utils
+from atest.test_finders import test_finder_utils
+from atest.test_runners import regression_test_runner
+from atest.test_runners import roboleaf_test_runner
+from atest.test_finders.test_info import TestInfo
+from atest.tools import atest_tools as at
EXPECTED_VARS = frozenset([
constants.ANDROID_BUILD_TOP,
@@ -75,11 +82,6 @@
RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
TEST_COUNT = 'test_count'
TEST_TYPE = 'test_type'
-MAINLINE_MODULES_EXT_RE = re.compile(r'(.apex|.apks|.apk)$')
-# Tasks that must run in the build time but unable to build by soong.
-# (e.g subprocesses that invoke host commands.)
-ACLOUD_CREATE = at.acloud_create
-INDEX_TARGETS = at.index_targets
END_OF_OPTION = '--'
HAS_IGNORED_ARGS = False
# Conditions that atest should exit without sending result to metrics.
@@ -89,6 +91,56 @@
ExitCode.AVD_CREATE_FAILURE,
ExitCode.AVD_INVALID_ARGS]
+@dataclass
+class Steps:
+ """A Dataclass that stores steps and shows step assignments."""
+ _build: bool
+ _install: bool
+ _test: bool
+
+ def has_build(self):
+ """Return whether build is in steps."""
+ return self._build
+
+ def is_build_only(self):
+ """Return whether build is the only one in steps."""
+ return self._build and not any((self._test, self._install))
+
+ def has_install(self):
+ """Return whether install is in steps."""
+ return self._install
+
+ def has_test(self):
+ """Return whether install is the only one in steps."""
+ return self._test
+
+ def is_test_only(self):
+ """Return whether build is not in steps but test."""
+ return self._test and not any((self._build, self._install))
+
+
+def parse_steps(args: atest_arg_parser.AtestArgParser) -> Steps:
+ """Return Steps object.
+
+ Args:
+ args: an AtestArgParser object.
+
+ Returns:
+ Step object that stores the boolean of build, install and test.
+ """
+ # Implicitly running 'build', 'install' and 'test' when args.steps is None.
+ if not args.steps:
+ return Steps(True, True, True)
+ build = constants.BUILD_STEP in args.steps
+ test = constants.TEST_STEP in args.steps
+ install = constants.INSTALL_STEP in args.steps
+ if install and not test:
+ logging.warning('Installing without test step is currently not '
+ 'supported; Atest will proceed testing!')
+ test = True
+ return Steps(build, install, test)
+
+
def _get_args_from_config():
"""Get customized atest arguments in the config file.
@@ -144,7 +196,7 @@
argv: A list of arguments.
Returns:
- An argspace.Namespace class instance holding parsed args.
+ An argparse.Namespace class instance holding parsed args.
"""
# Store everything after '--' in custom_args.
pruned_argv = argv
@@ -221,8 +273,7 @@
extra_args = {}
if args.wait_for_debugger:
extra_args[constants.WAIT_FOR_DEBUGGER] = None
- steps = args.steps or constants.ALL_STEPS
- if constants.INSTALL_STEP not in steps:
+ if not parse_steps(args).has_install():
extra_args[constants.DISABLE_INSTALL] = None
# The key and its value of the dict can be called via:
# if args.aaaa:
@@ -231,7 +282,9 @@
'annotation_filter': constants.ANNOTATION_FILTER,
'bazel_arg': constants.BAZEL_ARG,
'collect_tests_only': constants.COLLECT_TESTS_ONLY,
+ 'experimental_coverage': constants.COVERAGE,
'custom_args': constants.CUSTOM_ARGS,
+ 'device_only': constants.DEVICE_ONLY,
'disable_teardown': constants.DISABLE_TEARDOWN,
'disable_upload_result': constants.DISABLE_UPLOAD_RESULT,
'dry_run': constants.DRY_RUN,
@@ -248,6 +301,7 @@
'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
'retry_any_failure': constants.RETRY_ANY_FAILURE,
'serial': constants.SERIAL,
+ 'auto_ld_library_path': constants.LD_LIBRARY_PATH,
'sharding': constants.SHARDING,
'test_filter': constants.TEST_FILTER,
'test_timeout': constants.TEST_TIMEOUT,
@@ -255,6 +309,7 @@
'tf_debug': constants.TF_DEBUG,
'tf_template': constants.TF_TEMPLATE,
'user_type': constants.USER_TYPE,
+ 'verbose': constants.VERBOSE,
'verify_env_variable': constants.VERIFY_ENV_VARIABLE}
not_match = [k for k in arg_maps if k not in vars(args)]
if not_match:
@@ -338,6 +393,11 @@
args: parsed args object.
test_info: TestInfo object.
"""
+ # No need to check device availability if the user does not acquire to test.
+ if not parse_steps(args).has_test():
+ return
+ if args.no_checking_device:
+ return
all_device_modes = {x.get_supported_exec_mode() for x in test_infos}
device_tests = [x.test_name for x in test_infos
if x.get_supported_exec_mode() != constants.DEVICELESS_TEST]
@@ -384,7 +444,7 @@
regression detection.
Args:
- args: parsed args object.
+ args: An argparse.Namespace object.
Returns:
True if there are tests to run, false otherwise.
@@ -584,6 +644,11 @@
atest_utils.colorful_print(
'Option `--host` specified. Skip running device tests.',
constants.MAGENTA)
+ elif extra_args.get(constants.DEVICE_ONLY):
+ test_runs = [(device_test_infos, extra_args, DEVICE_TESTS)]
+ atest_utils.colorful_print(
+ 'Option `--device-only` specified. Skip running deviceless tests.',
+ constants.MAGENTA)
else:
test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
@@ -634,8 +699,10 @@
A list of test commands.
"""
all_run_cmds = []
- for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
- runner = test_runner(results_dir, mod_info=mod_info)
+ for test_runner, tests in test_runner_handler.group_tests_by_test_runners(
+ test_infos):
+ runner = test_runner(results_dir, mod_info=mod_info,
+ extra_args=extra_args)
run_cmds = runner.generate_run_commands(tests, extra_args)
for run_cmd in run_cmds:
all_run_cmds.append(run_cmd)
@@ -671,7 +738,7 @@
--latest_result, etc.
Args:
- args: An argspace.Namespace class instance holding parsed args.
+ args: An argparse.Namespace object.
"""
if not _is_inside_android_root():
atest_utils.colorful_print(
@@ -679,9 +746,7 @@
constants.ANDROID_BUILD_TOP), constants.RED)
sys.exit(ExitCode.OUTSIDE_ROOT)
if args.version:
- if os.path.isfile(constants.VERSION_FILE):
- with open(constants.VERSION_FILE, encoding='utf8') as version_file:
- print(version_file.read())
+ print(atest_utils.get_atest_version())
sys.exit(ExitCode.SUCCESS)
if args.help:
atest_arg_parser.print_epilog_text()
@@ -714,7 +779,7 @@
"""Method which process --dry-run argument.
Args:
- args: An argspace.Namespace class instance holding parsed args.
+ args: An argparse.Namespace class instance holding parsed args.
result_dir: A string path of the results dir.
extra_args: A dict of extra args for test runners to utilize.
test_infos: A list of test_info.
@@ -722,8 +787,18 @@
Returns:
Exit code.
"""
- test_commands = atest_utils.get_verify_key(args.tests, extra_args)
dry_run_cmds = _dry_run(results_dir, extra_args, test_infos, mod_info)
+ if args.generate_runner_cmd:
+ dry_run_cmd_str = ' '.join(dry_run_cmds)
+ tests_str = ' '.join(args.tests)
+ test_commands = atest_utils.gen_runner_cmd_to_file(tests_str,
+ dry_run_cmd_str)
+ print("add command %s to file %s" % (
+ atest_utils.colorize(test_commands, constants.GREEN),
+ atest_utils.colorize(constants.RUNNER_COMMAND_PATH,
+ constants.GREEN)))
+ else:
+ test_commands = atest_utils.get_verify_key(args.tests, extra_args)
if args.verify_cmd_mapping:
try:
atest_utils.handle_test_runner_cmd(test_commands,
@@ -756,63 +831,93 @@
return shrank_build_targets
# pylint: disable=protected-access
-def need_rebuild_module_info(force_build):
+def need_rebuild_module_info(args: atest_arg_parser.AtestArgParser) -> bool:
"""Method that tells whether we need to rebuild module-info.json or not.
Args:
- force_build: A boolean flag that determine everything.
+ args: an AtestArgParser object.
+
+ +-----------------+
+ | Explicitly pass | yes
+ | '--test' +-------> False (won't rebuild)
+ +--------+--------+
+ | no
+ V
+ +-------------------------+
+ | Explicitly pass | yes
+ | '--rebuild-module-info' +-------> True (forcely rebuild)
+ +--------+----------------+
+ | no
+ V
+ +-------------------+
+ | Build files | no
+ | integrity is good +-------> True (smartly rebuild)
+ +--------+----------+
+ | yes
+ V
+ False (won't rebuild)
Returns:
- - When force_build is True, return True (will rebuild module-info).
- - When force_build is False, then check the consistency of build files.
- If the checksum file of build files is missing, considered check False
- (need to rebuild module-info.json)
+ True for forcely/smartly rebuild, otherwise False without rebuilding.
"""
- logging.debug('Examinating the consistency of build files...')
- if force_build:
+ if not parse_steps(args).has_build():
+ logging.debug('\"--test\" mode detected, will not rebuild module-info.')
+ return False
+ if args.rebuild_module_info:
msg = (f'`{constants.REBUILD_MODULE_INFO_FLAG}` is no longer needed '
f'since Atest can smartly rebuild {module_info._MODULE_INFO} '
r'only when needed.')
atest_utils.colorful_print(msg, constants.YELLOW)
return True
- if atest_utils.check_md5(constants.BUILDFILES_MD5, missing_ok=False):
- logging.debug('All build files stay untouched.')
- return False
- logging.debug('Found build files were changed.')
- return True
+ logging.debug('Examinating the consistency of build files...')
+ if not atest_utils.build_files_integrity_is_ok():
+ logging.debug('Found build files were changed.')
+ return True
+ return False
-def acloud_create_validator(results_dir, args):
- """Check lunch'd target before running 'acloud create'.
+def need_run_index_targets(args, extra_args):
+ """Method that determines whether Atest need to run index_targets or not.
+
+
+ There are 3 conditions that Atest does not run index_targets():
+ 1. dry-run flags were found.
+ 2. VERIFY_ENV_VARIABLE was found in extra_args.
+ 3. --test flag was found.
Args:
- results_dir: A string of the results directory.
- args: A list of arguments.
+ args: A list of argument.
+ extra_args: A list of extra argument.
Returns:
- If the target is valid:
- A tuple of (multiprocessing.Process,
- string of report file path)
- else:
- None, None
+ True when none of the above conditions were found.
"""
- if not any((args.acloud_create, args.start_avd)):
- return None, None
- if args.start_avd:
- args.acloud_create = ['--num=1']
- acloud_args = ' '.join(args.acloud_create)
- target = os.getenv('TARGET_PRODUCT', "")
- if 'cf_x86' in target:
- report_file = at.get_report_file(results_dir, acloud_args)
- acloud_proc = atest_utils.run_multi_proc(
- func=ACLOUD_CREATE,
- args=[report_file],
- kwargs={'args':acloud_args,
- 'no_metrics_notice':args.no_metrics})
- return acloud_proc, report_file
- atest_utils.colorful_print(
- '{} is not cf_x86 family; will not create any AVD.'.format(target),
- constants.RED)
- return None, None
+ ignore_args = (args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)
+ if any(ignore_args):
+ return False
+ if extra_args.get(constants.VERIFY_ENV_VARIABLE, False):
+ return False
+ if not parse_steps(args).has_build():
+ return False
+ return True
+
+def _all_tests_are_bazel_buildable(
+ roboleaf_tests: Dict[str, TestInfo],
+ tests: List[str]) -> bool:
+ """Method that determines whether all tests have been fully converted to
+ bazel mode (roboleaf).
+
+ If all tests are fully converted, then indexing, generating mod-info, and
+ generating atest bazel workspace can be skipped since dependencies are
+ mapped already with `b`.
+
+ Args:
+ roboleaf_tests: A dictionary keyed by testname of roboleaf tests.
+ tests: A list of testnames.
+
+ Returns:
+ True when none of the above conditions were found.
+ """
+ return roboleaf_tests and set(tests) == set(roboleaf_tests)
def perm_consistency_metrics(test_infos, mod_info, args):
"""collect inconsistency between preparer and device root permission.
@@ -820,7 +925,7 @@
Args:
test_infos: TestInfo obj.
mod_info: ModuleInfo obj.
- args: An argspace.Namespace class instance holding parsed args.
+ args: An argparse.Namespace class instance holding parsed args.
"""
try:
# whether device has root permission
@@ -835,6 +940,17 @@
logging.debug('perm_consistency_metrics raised exception: %s', err)
return
+
+def set_build_output_mode(mode: atest_utils.BuildOutputMode):
+ """Update environment variable dict accordingly to args.build_output."""
+ # Changing this variable does not retrigger builds.
+ atest_utils.update_build_env(
+ {'ANDROID_QUIET_BUILD': 'true',
+ #(b/271654778) Showing the reasons for the ninja file was regenerated.
+ 'SOONG_UI_NINJA_ARGS': '-d explain',
+ 'BUILD_OUTPUT_MODE': mode.value})
+
+
def get_device_count_config(test_infos, mod_info):
"""Get the amount of desired devices from the test config.
@@ -855,6 +971,23 @@
max_count = max(len(devices), max_count)
return max_count
+
+def _is_auto_shard_test(test_infos):
+ """Determine whether the given tests are in shardable test list.
+
+ Args:
+ test_infos: TestInfo objects.
+
+ Returns:
+ True if test in auto shardable list.
+ """
+ shardable_tests = atest_utils.get_local_auto_shardable_tests()
+ for test_info in test_infos:
+ if test_info.test_name in shardable_tests:
+ return True
+ return False
+
+
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-return-statements
@@ -864,41 +997,81 @@
Args:
argv: A list of arguments.
results_dir: A directory which stores the ATest execution information.
- args: An argspace.Namespace class instance holding parsed args.
+ args: An argparse.Namespace class instance holding parsed args.
Returns:
Exit code.
"""
+ _begin_time = time.time()
+
+ # Sets coverage environment variables.
+ if args.experimental_coverage:
+ atest_utils.update_build_env(coverage.build_env_vars())
+ set_build_output_mode(args.build_output)
+
_configure_logging(args.verbose)
_validate_args(args)
metrics_utils.get_start_time()
- os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
+ os_pyver = (f'{platform.platform()}:{platform.python_version()}/'
+ f'{atest_utils.get_manifest_branch(True)}:'
+ f'{atest_utils.get_atest_version()}')
metrics.AtestStartEvent(
command_line=' '.join(argv),
test_references=args.tests,
cwd=os.getcwd(),
os=os_pyver)
_non_action_validator(args)
- proc_acloud, report_file = acloud_create_validator(results_dir, args)
+
+ proc_acloud, report_file = None, None
+ if any((args.acloud_create, args.start_avd)):
+ proc_acloud, report_file = at.acloud_create_validator(results_dir, args)
is_clean = not os.path.exists(
os.environ.get(constants.ANDROID_PRODUCT_OUT, ''))
- # Do not index targets while the users intend to dry-run tests.
- dry_run_args = (args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)
extra_args = get_extra_args(args)
verify_env_variables = extra_args.get(constants.VERIFY_ENV_VARIABLE, False)
+
+ # Gather roboleaf tests now to see if we can skip mod info generation.
+ mod_info = module_info.ModuleInfo(no_generate=True)
+ if args.roboleaf_mode != roboleaf_test_runner.BazelBuildMode.OFF:
+ mod_info.roboleaf_tests = roboleaf_test_runner.RoboleafTestRunner(
+ results_dir).roboleaf_eligible_tests(
+ args.roboleaf_mode,
+ args.tests)
+ all_tests_are_bazel_buildable = _all_tests_are_bazel_buildable(
+ mod_info.roboleaf_tests,
+ args.tests)
+
+ # Run Test Mapping or coverage by no-bazel-mode.
+ if atest_utils.is_test_mapping(args) or args.experimental_coverage:
+ atest_utils.colorful_print('Not running using bazel-mode.', constants.YELLOW)
+ args.bazel_mode = False
+
proc_idx = None
- if not (any(dry_run_args) or verify_env_variables):
- proc_idx = atest_utils.run_multi_proc(INDEX_TARGETS)
- smart_rebuild = need_rebuild_module_info(args.rebuild_module_info)
- mod_start = time.time()
- mod_info = module_info.ModuleInfo(force_build=smart_rebuild)
- metrics.LocalDetectEvent(detect_type=DetectType.MODULE_INFO_INIT_TIME,
- result=int(time.time() - mod_start))
- atest_utils.generate_buildfiles_checksum()
- if args.bazel_mode:
- bazel_mode.generate_bazel_workspace(
- mod_info,
- enabled_features=set(args.bazel_mode_features or []))
+ if not all_tests_are_bazel_buildable:
+ # Do not index targets while the users intend to dry-run tests.
+ if need_run_index_targets(args, extra_args):
+ proc_idx = atest_utils.run_multi_proc(at.index_targets)
+ smart_rebuild = need_rebuild_module_info(args)
+
+ mod_start = time.time()
+ mod_info = module_info.ModuleInfo(force_build=smart_rebuild)
+ mod_stop = time.time() - mod_start
+ metrics.LocalDetectEvent(detect_type=DetectType.MODULE_INFO_INIT_MS,
+ result=int(mod_stop * 1000))
+ atest_utils.run_multi_proc(func=mod_info._save_module_info_checksum)
+ atest_utils.run_multi_proc(
+ func=atest_utils.generate_buildfiles_checksum,
+ args=[mod_info.module_index.parent])
+
+ if args.bazel_mode:
+ start = time.time()
+ bazel_mode.generate_bazel_workspace(
+ mod_info,
+ enabled_features=set(args.bazel_mode_features or []))
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.BAZEL_WORKSPACE_GENERATE_TIME,
+ result=int(time.time() - start))
+
translator = cli_translator.CLITranslator(
mod_info=mod_info,
print_cache_msg=not args.clear_cache,
@@ -908,14 +1081,17 @@
if args.list_modules:
_print_testable_modules(mod_info, args.list_modules)
return ExitCode.SUCCESS
- build_targets = set()
- mm_build_targets = set()
test_infos = set()
+ dry_run_args = (args.update_cmd_mapping, args.verify_cmd_mapping,
+ args.dry_run, args.generate_runner_cmd)
if _will_run_tests(args):
- if proc_idx:
+ # (b/242567487) index_targets may finish after cli_translator; to
+ # mitigate the overhead, the main waits until it finished when no index
+ # files are available (e.g. fresh repo sync)
+ if proc_idx and not atest_utils.has_index_files():
proc_idx.join()
find_start = time.time()
- build_targets, test_infos = translator.translate(args)
+ test_infos = translator.translate(args)
given_amount = len(args.serial) if args.serial else 0
required_amount = get_device_count_config(test_infos, mod_info)
args.device_count_config = required_amount
@@ -929,10 +1105,7 @@
f'but {given_amount} were given.',
constants.RED)
return 0
- # Remove MODULE-IN-* from build targets if not bazel mode and user not
- # force set --use-modules-in.
- if not args.bazel_mode and not args.use_modules_in:
- build_targets = _exclude_modules_in_targets(build_targets)
+
find_duration = time.time() - find_start
if not test_infos:
return ExitCode.TEST_NOT_FOUND
@@ -944,14 +1117,24 @@
extra_args = get_extra_args(args)
else:
_validate_tm_tests_exec_mode(args, test_infos)
- for test_info in test_infos:
- if test_info.mainline_modules:
- for module in test_info.mainline_modules.split('+'):
- mm_build_targets.add(re.sub(
- MAINLINE_MODULES_EXT_RE, '', module))
+ # Detect auto sharding and trigger creating AVDs
+ if args.auto_sharding and _is_auto_shard_test(test_infos):
+ extra_args.update({constants.SHARDING: constants.SHARD_NUM})
+ if not (any(dry_run_args) or verify_env_variables):
+ # TODO: check existing devices.
+ args.acloud_create = [f'--num-instances={constants.SHARD_NUM}']
+ proc_acloud, report_file = at.acloud_create_validator(
+ results_dir, args)
+ # TODO: change to another approach that put constants.CUSTOM_ARGS in the
+ # end of command to make sure that customized args can override default
+ # options.
# For TEST_MAPPING, set timeout to 600000ms.
- if args.test_timeout is None:
+ custom_timeout = False
+ for custom_args in args.custom_args:
+ if '-timeout' in custom_args:
+ custom_timeout = True
+ if args.test_timeout is None and not custom_timeout:
if is_from_test_mapping(test_infos):
extra_args.update({constants.TEST_TIMEOUT: 600000})
logging.debug(
@@ -960,8 +1143,13 @@
if args.info:
return _print_test_info(mod_info, test_infos)
- build_targets |= test_runner_handler.get_test_runner_reqs(
+
+ build_targets = test_runner_handler.get_test_runner_reqs(
mod_info, test_infos, extra_args=extra_args)
+ # Remove MODULE-IN-* from build targets by default.
+ if not args.use_modules_in:
+ build_targets = _exclude_modules_in_targets(build_targets)
+
if any(dry_run_args):
if not verify_env_variables:
return _dry_run_validator(args, results_dir, extra_args, test_infos,
@@ -974,22 +1162,28 @@
return 0
if args.detect_regression:
build_targets |= (regression_test_runner.RegressionTestRunner('')
- .get_test_runner_build_reqs())
- # args.steps will be None if none of -bit set, else list of params set.
- steps = args.steps if args.steps else constants.ALL_STEPS
- if build_targets and constants.BUILD_STEP in steps:
+ .get_test_runner_build_reqs([]))
+
+ steps = parse_steps(args)
+ if build_targets and steps.has_build():
+ if args.experimental_coverage:
+ build_targets.add('jacoco_to_lcov_converter')
+
# Add module-info.json target to the list of build targets to keep the
# file up to date.
build_targets.add(mod_info.module_info_target)
+
build_start = time.time()
- success = atest_utils.build(build_targets, verbose=args.verbose,
- mm_build_targets=mm_build_targets)
+ success = atest_utils.build(build_targets)
build_duration = time.time() - build_start
- build_targets.update(mm_build_targets)
metrics.BuildFinishEvent(
duration=metrics_utils.convert_duration(build_duration),
success=success,
targets=build_targets)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.BUILD_TIME_PER_TARGET,
+ result=int(build_duration/len(build_targets))
+ )
rebuild_module_info = DetectType.NOT_REBUILD_MODULE_INFO
if is_clean:
rebuild_module_info = DetectType.CLEAN_BUILD
@@ -1004,35 +1198,24 @@
return ExitCode.BUILD_FAILURE
if proc_acloud:
proc_acloud.join()
- status = at.probe_acloud_status(report_file)
+ status = at.probe_acloud_status(
+ report_file, find_duration + build_duration)
if status != 0:
return status
- acloud_duration = at.get_acloud_duration(report_file)
- find_build_duration = find_duration + build_duration
- if find_build_duration - acloud_duration >= 0:
- # find+build took longer, saved acloud create time.
- logging.debug('Saved acloud create time: %ss.',
- acloud_duration)
- metrics.LocalDetectEvent(
- detect_type=DetectType.ACLOUD_CREATE,
- result=round(acloud_duration))
- else:
- # acloud create took longer, saved find+build time.
- logging.debug('Saved Find and Build time: %ss.',
- find_build_duration)
- metrics.LocalDetectEvent(
- detect_type=DetectType.FIND_BUILD,
- result=round(find_build_duration))
# After build step 'adb' command will be available, and stop forward to
# Tradefed if the tests require a device.
_validate_adb_devices(args, test_infos)
- elif constants.TEST_STEP not in steps:
- logging.warning('Install step without test step currently not '
- 'supported, installing AND testing instead.')
- steps.append(constants.TEST_STEP)
+
tests_exit_code = ExitCode.SUCCESS
test_start = time.time()
- if constants.TEST_STEP in steps:
+ if steps.has_test():
+ # Only send duration to metrics when no --build.
+ if not steps.has_build():
+ _init_and_find = time.time() - _begin_time
+ logging.debug('Initiation and finding tests took %ss', _init_and_find)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.INIT_AND_FIND_MS,
+ result=int(_init_and_find*1000))
perm_consistency_metrics(test_infos, mod_info, args)
if not is_from_test_mapping(test_infos):
tests_exit_code, reporter = test_runner_handler.run_all_tests(
@@ -1041,6 +1224,8 @@
else:
tests_exit_code = _run_test_mapping_tests(
results_dir, test_infos, extra_args, mod_info)
+ if args.experimental_coverage:
+ coverage.generate_coverage_report(results_dir, test_infos, mod_info)
if args.detect_regression:
regression_args = _get_regression_detection_args(args, results_dir)
# TODO(b/110485713): Should not call run_tests here.
@@ -1062,16 +1247,18 @@
test=[])
if tests_exit_code != ExitCode.SUCCESS:
tests_exit_code = ExitCode.TEST_FAILURE
+
return tests_exit_code
if __name__ == '__main__':
RESULTS_DIR = make_test_run_dir()
- final_args = [*sys.argv[1:], *_get_args_from_config()]
if END_OF_OPTION in sys.argv:
end_position = sys.argv.index(END_OF_OPTION)
final_args = [*sys.argv[1:end_position],
*_get_args_from_config(),
*sys.argv[end_position:]]
+ else:
+ final_args = [*sys.argv[1:], *_get_args_from_config()]
if final_args != sys.argv[1:]:
print('The actual cmd will be: \n\t{}\n'.format(
atest_utils.colorize("atest " + " ".join(final_args),
@@ -1090,7 +1277,7 @@
final_args, RESULTS_DIR,
atest_configs.GLOBAL_ARGS) as result_file:
if not atest_configs.GLOBAL_ARGS.no_metrics:
- atest_utils.print_data_collection_notice()
+ metrics_utils.print_data_collection_notice()
USER_FROM_TOOL = os.getenv(constants.USER_FROM_TOOL, '')
if USER_FROM_TOOL == '':
metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
@@ -1110,4 +1297,23 @@
result=DETECTOR.caught_result)
if result_file:
print("Run 'atest --history' to review test result history.")
+
+ # Only asking internal google user to do this survey.
+ if metrics_base.get_user_type() == metrics_base.INTERNAL_USER:
+ # The bazel_mode value will only be false if user apply --no-bazel-mode.
+ if not atest_configs.GLOBAL_ARGS.bazel_mode:
+ MESSAGE = ('\nDear `--no-bazel-mode` users,\n'
+ 'We are conducting a survey to understand why you are '
+ 'still using `--no-bazel-mode`. The survey should '
+ 'take less than 3 minutes and your responses will be '
+ 'kept confidential and will only be used to improve '
+ 'our understanding of the situation. Please click on '
+ 'the link below to begin the survey:\n\n'
+ 'http://go/atest-no-bazel-survey\n\n'
+ 'Thanks for your time and feedback.\n\n'
+ 'Sincerely,\n'
+ 'The ATest Team')
+
+ print(atest_utils.colorize(MESSAGE, constants.BLACK, bp_color=constants.CYAN))
+
sys.exit(EXIT_CODE)
diff --git a/atest/atest_main_unittest.py b/atest/atest_main_unittest.py
new file mode 100755
index 0000000..49c1223
--- /dev/null
+++ b/atest/atest_main_unittest.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for atest."""
+
+# pylint: disable=line-too-long
+
+import datetime
+import os
+import sys
+import tempfile
+import unittest
+
+from importlib import reload
+from io import StringIO
+from unittest import mock
+from pyfakefs import fake_filesystem_unittest
+
+from atest import atest_main
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+
+from atest.metrics import metrics_utils
+from atest.test_finders import test_info
+
+GREEN= '\x1b[1;32m'
+CYAN = '\x1b[1;36m'
+MAGENTA = '\x1b[1;35m'
+END = '\x1b[0m'
+
+
+#pylint: disable=protected-access
+class AtestUnittests(unittest.TestCase):
+ """Unit tests for atest_main.py"""
+
+ @mock.patch('os.environ.get', return_value=None)
+ def test_missing_environment_variables_uninitialized(self, _):
+ """Test _has_environment_variables when no env vars."""
+ self.assertTrue(atest_main._missing_environment_variables())
+
+ @mock.patch('os.environ.get', return_value='out/testcases/')
+ def test_missing_environment_variables_initialized(self, _):
+ """Test _has_environment_variables when env vars."""
+ self.assertFalse(atest_main._missing_environment_variables())
+
+ def test_parse_args(self):
+ """Test _parse_args parses command line args."""
+ test_one = 'test_name_one'
+ test_two = 'test_name_two'
+ custom_arg = '--custom_arg'
+ custom_arg_val = 'custom_arg_val'
+ pos_custom_arg = 'pos_custom_arg'
+
+ # Test out test and custom args are properly retrieved.
+ args = [test_one, test_two, '--', custom_arg, custom_arg_val]
+ parsed_args = atest_main._parse_args(args)
+ self.assertEqual(parsed_args.tests, [test_one, test_two])
+ self.assertEqual(parsed_args.custom_args, [custom_arg, custom_arg_val])
+
+ # Test out custom positional args with no test args.
+ args = ['--', pos_custom_arg, custom_arg_val]
+ parsed_args = atest_main._parse_args(args)
+ self.assertEqual(parsed_args.tests, [])
+ self.assertEqual(parsed_args.custom_args, [pos_custom_arg,
+ custom_arg_val])
+
+ def test_has_valid_test_mapping_args(self):
+ """Test _has_valid_test_mapping_args method."""
+ # Test test mapping related args are not mixed with incompatible args.
+ options_no_tm_support = [
+ ('--generate-baseline', '5'),
+ ('--detect-regression', 'path'),
+ ('--generate-new-metrics', '5')
+ ]
+ tm_options = [
+ '--test-mapping',
+ '--include-subdirs'
+ ]
+
+ for tm_option in tm_options:
+ for no_tm_option, no_tm_option_value in options_no_tm_support:
+ args = [tm_option, no_tm_option]
+ if no_tm_option_value is not None:
+ args.append(no_tm_option_value)
+ parsed_args = atest_main._parse_args(args)
+ self.assertFalse(
+ atest_main._has_valid_test_mapping_args(parsed_args),
+ 'Failed to validate: %s' % args)
+
+ @mock.patch.object(atest_utils, 'get_adb_devices')
+ @mock.patch.object(metrics_utils, 'send_exit_event')
+ def test_validate_exec_mode(self, _send_exit, _devs):
+ """Test _validate_exec_mode."""
+ _devs.return_value = ['127.0.0.1:34556']
+ args = []
+ no_install_test_info = test_info.TestInfo(
+ 'mod', '', set(), data={}, module_class=["JAVA_LIBRARIES"],
+ install_locations=set(['device']))
+ host_test_info = test_info.TestInfo(
+ 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
+ install_locations=set(['host']))
+ device_test_info = test_info.TestInfo(
+ 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
+ install_locations=set(['device']))
+ both_test_info = test_info.TestInfo(
+ 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
+ install_locations=set(['host', 'device']))
+
+ # $atest <Both-support>
+ parsed_args = atest_main._parse_args(args)
+ test_infos = [host_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos)
+ self.assertFalse(parsed_args.host)
+
+ # $atest <Both-support> with host_tests set to True
+ parsed_args = atest_main._parse_args([])
+ test_infos = [host_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos, host_tests=True)
+ # Make sure the host option is not set.
+ self.assertFalse(parsed_args.host)
+
+ # $atest <Both-support> with host_tests set to False
+ parsed_args = atest_main._parse_args([])
+ test_infos = [host_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos, host_tests=False)
+ self.assertFalse(parsed_args.host)
+
+ # $atest <device-only> with host_tests set to False
+ parsed_args = atest_main._parse_args([])
+ test_infos = [device_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos, host_tests=False)
+ # Make sure the host option is not set.
+ self.assertFalse(parsed_args.host)
+
+ # $atest <device-only> with host_tests set to True
+ parsed_args = atest_main._parse_args([])
+ test_infos = [device_test_info]
+ self.assertRaises(SystemExit, atest_main._validate_exec_mode,
+ parsed_args, test_infos, host_tests=True)
+
+ # $atest <Both-support>
+ parsed_args = atest_main._parse_args([])
+ test_infos = [both_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos)
+ self.assertFalse(parsed_args.host)
+
+ # $atest <no_install_test_info>
+ parsed_args = atest_main._parse_args([])
+ test_infos = [no_install_test_info]
+ atest_main._validate_exec_mode(parsed_args, test_infos)
+ self.assertFalse(parsed_args.host)
+
+ def test_make_test_run_dir(self):
+ """Test make_test_run_dir."""
+ tmp_dir = tempfile.mkdtemp()
+ constants.ATEST_RESULT_ROOT = tmp_dir
+ date_time = None
+
+ work_dir = atest_main.make_test_run_dir()
+ folder_name = os.path.basename(work_dir)
+ date_time = datetime.datetime.strptime('_'.join(folder_name.split('_')[0:2]),
+ atest_main.TEST_RUN_DIR_PREFIX)
+ reload(constants)
+ self.assertTrue(date_time)
+
+
+# pylint: disable=missing-function-docstring
+class AtestUnittestFixture(fake_filesystem_unittest.TestCase):
+ """Fixture for ModuleInfo tests."""
+
+ def setUp(self):
+ self.setUpPyfakefs()
+
+ # pylint: disable=protected-access
+ def create_empty_module_info(self):
+ fake_temp_file_name = next(tempfile._get_candidate_names())
+ self.fs.create_file(fake_temp_file_name, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+ def create_module_info(self, modules=None):
+ mod_info = self.create_empty_module_info()
+ modules = modules or []
+
+ for m in modules:
+ mod_info.name_to_module_info[m['module_name']] = m
+
+ return mod_info
+
+ def create_test_info(
+ self,
+ test_name='hello_world_test',
+ test_runner='AtestTradefedRunner',
+ build_targets=None):
+ """Create a test_info.TestInfo object."""
+ if not build_targets:
+ build_targets = set()
+ return test_info.TestInfo(test_name, test_runner, build_targets)
+
+
+class PrintModuleInfoTest(AtestUnittestFixture):
+ """Test conditions for _print_module_info."""
+
+ def tearDown(self):
+ sys.stdout = sys.__stdout__
+
+ @mock.patch('atest.atest_utils._has_colors', return_value=True)
+ def test_print_module_info_from_module_name(self, _):
+ """Test _print_module_info_from_module_name method."""
+ mod_info = self.create_module_info(
+ [module(
+ name='mod1',
+ path=['src/path/mod1'],
+ installed=['installed/path/mod1'],
+ compatibility_suites=['device_test_mod1', 'native_test_mod1']
+ )]
+ )
+ correct_output = (f'{GREEN}mod1{END}\n'
+ f'{CYAN}\tCompatibility suite{END}\n'
+ '\t\tdevice_test_mod1\n'
+ '\t\tnative_test_mod1\n'
+ f'{CYAN}\tSource code path{END}\n'
+ '\t\t[\'src/path/mod1\']\n'
+ f'{CYAN}\tInstalled path{END}\n'
+ '\t\tinstalled/path/mod1\n')
+ capture_output = StringIO()
+ sys.stdout = capture_output
+
+ atest_main._print_module_info_from_module_name(mod_info, 'mod1')
+
+ # Check the function correctly printed module_info in color to stdout
+ self.assertEqual(correct_output, capture_output.getvalue())
+
+ @mock.patch('atest.atest_utils._has_colors', return_value=True)
+ def test_print_test_info(self, _):
+ """Test _print_test_info method."""
+ modules = []
+ for index in {1, 2, 3}:
+ modules.append(
+ module(
+ name=f'mod{index}',
+ path=[f'path/mod{index}'],
+ installed=[f'installed/mod{index}'],
+ compatibility_suites=[f'suite_mod{index}']
+ )
+ )
+ mod_info = self.create_module_info(modules)
+ test_infos = {
+ self.create_test_info(
+ test_name='mod1',
+ test_runner='mock_runner',
+ build_targets={'mod1', 'mod2', 'mod3'},
+ ),
+ }
+ correct_output = (f'{GREEN}mod1{END}\n'
+ f'{CYAN}\tCompatibility suite{END}\n'
+ '\t\tsuite_mod1\n'
+ f'{CYAN}\tSource code path{END}\n'
+ '\t\t[\'path/mod1\']\n'
+ f'{CYAN}\tInstalled path{END}\n'
+ '\t\tinstalled/mod1\n'
+ f'{MAGENTA}\tRelated build targets{END}\n'
+ '\t\tmod1, mod2, mod3\n'
+ f'{GREEN}mod2{END}\n'
+ f'{CYAN}\tCompatibility suite{END}\n'
+ '\t\tsuite_mod2\n'
+ f'{CYAN}\tSource code path{END}\n'
+ '\t\t[\'path/mod2\']\n'
+ f'{CYAN}\tInstalled path{END}\n'
+ '\t\tinstalled/mod2\n'
+ f'{GREEN}mod3{END}\n'
+ f'{CYAN}\tCompatibility suite{END}\n'
+ '\t\tsuite_mod3\n'
+ f'{CYAN}\tSource code path{END}\n'
+ '\t\t[\'path/mod3\']\n'
+ f'{CYAN}\tInstalled path{END}\n'
+ '\t\tinstalled/mod3\n'
+ f'\x1b[1;37m{END}\n')
+ capture_output = StringIO()
+ sys.stdout = capture_output
+
+ # The _print_test_info() will print the module_info of the test_info's
+ # test_name first. Then, print its related build targets. If the build
+ # target be printed before(e.g. build_target == test_info's test_name),
+ # it will skip it and print the next build_target.
+ # Since the build_targets of test_info are mod_one, mod_two, and
+ # mod_three, it will print mod_one first, then mod_two, and mod_three.
+ #
+ # _print_test_info() calls _print_module_info_from_module_name() to
+ # print the module_info. And _print_module_info_from_module_name()
+ # calls get_module_info() to get the module_info. So we can mock
+ # get_module_info() to achieve that.
+ atest_main._print_test_info(mod_info, test_infos)
+
+ self.assertEqual(correct_output, capture_output.getvalue())
+
+
+# pylint: disable=too-many-arguments
+def module(
+ name=None,
+ path=None,
+ installed=None,
+ classes=None,
+ auto_test_config=None,
+ test_config=None,
+ shared_libs=None,
+ dependencies=None,
+ runtime_dependencies=None,
+ data=None,
+ data_dependencies=None,
+ compatibility_suites=None,
+ host_dependencies=None,
+ srcs=None,
+):
+ name = name or 'libhello'
+
+ m = {}
+
+ m['module_name'] = name
+ m['class'] = classes
+ m['path'] = [path or '']
+ m['installed'] = installed or []
+ m['is_unit_test'] = 'false'
+ m['auto_test_config'] = auto_test_config or []
+ m['test_config'] = test_config or []
+ m['shared_libs'] = shared_libs or []
+ m['runtime_dependencies'] = runtime_dependencies or []
+ m['dependencies'] = dependencies or []
+ m['data'] = data or []
+ m['data_dependencies'] = data_dependencies or []
+ m['compatibility_suites'] = compatibility_suites or []
+ m['host_dependencies'] = host_dependencies or []
+ m['srcs'] = srcs or []
+ return m
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/atest/atest_metrics.py b/atest/atest_metrics.py
index c3ef764..fb425ba 100755
--- a/atest/atest_metrics.py
+++ b/atest/atest_metrics.py
@@ -17,8 +17,8 @@
"""Simple Metrics Functions"""
-import constants
-import asuite_metrics
+from atest import constants
+from atest import asuite_metrics
#pylint: disable=broad-except
diff --git a/atest/atest_run_unittests.py b/atest/atest_run_unittests.py
index dfc8269..45a46c0 100755
--- a/atest/atest_run_unittests.py
+++ b/atest/atest_run_unittests.py
@@ -22,24 +22,21 @@
import unittest
from importlib import import_module
+from unittest import mock
-import atest_utils
-
-COVERAGE = 'coverage'
-RUN_COVERAGE = COVERAGE in sys.argv
-SHOW_MISSING = '--show-missing' in sys.argv
-BUILD_TOP = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- '../../..')
-# list of 3rd party libraries
-_PYFAKEFS = os.path.join(BUILD_TOP, 'external', 'python', 'pyfakefs')
-EXTERNAL_PYTHONPATHS = [_PYFAKEFS]
-for lib in EXTERNAL_PYTHONPATHS:
- if os.path.exists(lib):
- sys.path.insert(0, lib)
# Setup logging to be silent so unittests can pass through TF.
logging.disable(logging.ERROR)
+ENV = {
+ 'ANDROID_BUILD_TOP': '/',
+ 'ANDROID_PRODUCT_OUT': '/out/prod',
+ 'ANDROID_TARGET_OUT_TESTCASES': '/out/prod/tcases',
+ 'ANDROID_HOST_OUT': '/out/host',
+ 'ANDROID_HOST_OUT_TESTCASES': '/out/host/tcases',
+ 'TARGET_PRODUCT': 'aosp_cf_x86_64',
+ 'TARGET_BUILD_VARIANT': 'userdebug',
+}
+
def get_test_modules():
"""Returns a list of testable modules.
@@ -51,9 +48,10 @@
List of strings (the testable module import path).
"""
testable_modules = []
- base_path = os.path.dirname(os.path.realpath(__file__))
+ package = os.path.dirname(os.path.realpath(__file__))
+ base_path = os.path.dirname(package)
- for dirpath, _, files in os.walk(base_path):
+ for dirpath, _, files in os.walk(package):
for f in files:
if f.endswith("_unittest.py"):
# Now transform it into a no-absolute import path.
@@ -82,45 +80,11 @@
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(test_suite)
-# pylint: disable=import-outside-toplevel
-def main(run_coverage=False, show_missing=False):
- """Main unittest entry.
- Args:
- cov_args: A list of coverage arguments.
-
- Returns:
- 0 if success. None-zero if fails.
- """
- if not all((run_coverage, atest_utils.has_python_module(COVERAGE))):
+if __name__ == '__main__':
+ print(sys.version_info)
+ with mock.patch.dict('os.environ', ENV):
result = run_test_modules(get_test_modules())
if not result.wasSuccessful():
sys.exit(not result.wasSuccessful())
sys.exit(0)
-
- # pylint: disable=import-error
- from coverage import coverage
- # The cover_pylib=False ignores only std libs; therefore, these 3rd-party
- # libs must be omitted before creating coverage class.
- ignore_libs = ['*/__init__.py',
- '*dist-packages/*.py',
- '*site-packages/*.py']
- cov = coverage(omit=ignore_libs)
- cov.erase()
- cov.start()
- result = run_test_modules(get_test_modules())
- if not result.wasSuccessful():
- cov.erase()
- sys.exit(not result.wasSuccessful())
- cov.stop()
- cov.save()
- cov.report(show_missing=show_missing)
- cov.html_report()
-
-
-if __name__ == '__main__':
- print(sys.version_info)
- if len(sys.argv) > 1:
- main(RUN_COVERAGE, SHOW_MISSING)
- else:
- main()
diff --git a/atest/atest_script_help.sh b/atest/atest_script_help.sh
index e3ca59d..0783024 100755
--- a/atest/atest_script_help.sh
+++ b/atest/atest_script_help.sh
@@ -42,16 +42,16 @@
# check java version
java_version_string=$(${TF_JAVA} -version 2>&1)
-JAVA_VERSION=$(echo "$java_version_string" | grep 'version [ "]\(1\.8\|9\|11\).*[ "]')
+JAVA_VERSION=$(echo "$java_version_string" | grep 'version [ "]\(1\.8\|9\|11\|17\).*[ "]')
if [ "${JAVA_VERSION}" == "" ]; then
- >&2 echo "Wrong java version. 1.8, 9 or 11 is required. Found $java_version_string"
+ >&2 echo "Wrong java version. 1.8, 9, 11 or 17 is required. Found $java_version_string"
>&2 echo "PATH value:"
>&2 echo "$PATH"
exit 8
fi
# check if java is above 9 and supports add-opens
-JAVA_VERSION=$(echo "$java_version_string" | grep 'version [ "]\(9\|11\).*[ "]')
+JAVA_VERSION=$(echo "$java_version_string" | grep 'version [ "]\(9\|11\|17\).*[ "]')
if [ "${JAVA_VERSION}" != "" ]; then
ADD_OPENS_FLAG="--add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/sun.reflect.annotation=ALL-UNNAMED"
fi
diff --git a/atest/atest_tradefed.sh b/atest/atest_tradefed.sh
index c86379c..ba61a99 100755
--- a/atest/atest_tradefed.sh
+++ b/atest/atest_tradefed.sh
@@ -42,7 +42,8 @@
csuite-harness.jar
tradefed-isolation.jar
host-libprotobuf-java-full.jar
- cts-dalvik-host-test-runner.jar"
+ cts-dalvik-host-test-runner.jar
+ compatibility-tradefed.jar"
for dep in $deps; do
if [ -f "$ANDROID_HOST_OUT/framework/$dep" ]; then
TF_PATH+=":$ANDROID_HOST_OUT/framework/$dep"
@@ -71,7 +72,7 @@
fi
# Note: must leave $RDBG_FLAG and $TRADEFED_OPTS unquoted so that they go away when unset
-${TF_JAVA} $RDBG_FLAG \
+LOCAL_MODE=1 START_FEATURE_SERVER=1 ${TF_JAVA} $RDBG_FLAG \
-XX:+HeapDumpOnOutOfMemoryError \
-XX:-OmitStackTraceInFastThrow \
$TRADEFED_OPTS \
diff --git a/atest/atest_unittest.py b/atest/atest_unittest.py
deleted file mode 100755
index 17d79d9..0000000
--- a/atest/atest_unittest.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittests for atest."""
-
-# pylint: disable=line-too-long
-
-import datetime
-import os
-import sys
-import tempfile
-import unittest
-
-from importlib import reload
-from io import StringIO
-from unittest import mock
-
-# pylint: disable=wrong-import-order
-import atest
-import atest_utils
-import constants
-import module_info
-
-from metrics import metrics_utils
-from test_finders import test_info
-
-#pylint: disable=protected-access
-class AtestUnittests(unittest.TestCase):
- """Unit tests for atest.py"""
-
- @mock.patch('os.environ.get', return_value=None)
- def test_missing_environment_variables_uninitialized(self, _):
- """Test _has_environment_variables when no env vars."""
- self.assertTrue(atest._missing_environment_variables())
-
- @mock.patch('os.environ.get', return_value='out/testcases/')
- def test_missing_environment_variables_initialized(self, _):
- """Test _has_environment_variables when env vars."""
- self.assertFalse(atest._missing_environment_variables())
-
- def test_parse_args(self):
- """Test _parse_args parses command line args."""
- test_one = 'test_name_one'
- test_two = 'test_name_two'
- custom_arg = '--custom_arg'
- custom_arg_val = 'custom_arg_val'
- pos_custom_arg = 'pos_custom_arg'
-
- # Test out test and custom args are properly retrieved.
- args = [test_one, test_two, '--', custom_arg, custom_arg_val]
- parsed_args = atest._parse_args(args)
- self.assertEqual(parsed_args.tests, [test_one, test_two])
- self.assertEqual(parsed_args.custom_args, [custom_arg, custom_arg_val])
-
- # Test out custom positional args with no test args.
- args = ['--', pos_custom_arg, custom_arg_val]
- parsed_args = atest._parse_args(args)
- self.assertEqual(parsed_args.tests, [])
- self.assertEqual(parsed_args.custom_args, [pos_custom_arg,
- custom_arg_val])
-
- def test_has_valid_test_mapping_args(self):
- """Test _has_valid_test_mapping_args method."""
- # Test test mapping related args are not mixed with incompatible args.
- options_no_tm_support = [
- ('--generate-baseline', '5'),
- ('--detect-regression', 'path'),
- ('--generate-new-metrics', '5')
- ]
- tm_options = [
- '--test-mapping',
- '--include-subdirs'
- ]
-
- for tm_option in tm_options:
- for no_tm_option, no_tm_option_value in options_no_tm_support:
- args = [tm_option, no_tm_option]
- if no_tm_option_value is not None:
- args.append(no_tm_option_value)
- parsed_args = atest._parse_args(args)
- self.assertFalse(
- atest._has_valid_test_mapping_args(parsed_args),
- 'Failed to validate: %s' % args)
-
- @mock.patch.object(module_info.ModuleInfo, '_merge_build_system_infos')
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
- @mock.patch('json.load', return_value={})
- @mock.patch('builtins.open', new_callable=mock.mock_open)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch('atest_utils._has_colors', return_value=True)
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
- def test_print_module_info_from_module_name(self, mock_get_module_info,
- _mock_has_colors, _isfile,
- _open, _json, _merge):
- """Test _print_module_info_from_module_name method."""
- mod_one_name = 'mod1'
- mod_one_path = ['src/path/mod1']
- mod_one_installed = ['installed/path/mod1']
- mod_one_suites = ['device_test_mod1', 'native_test_mod1']
- mod_one = {constants.MODULE_NAME: mod_one_name,
- constants.MODULE_PATH: mod_one_path,
- constants.MODULE_INSTALLED: mod_one_installed,
- constants.MODULE_COMPATIBILITY_SUITES: mod_one_suites}
-
- # Case 1: The testing_module('mod_one') can be found in module_info.
- mock_get_module_info.return_value = mod_one
- capture_output = StringIO()
- sys.stdout = capture_output
- mod_info = module_info.ModuleInfo()
- # Check return value = True, since 'mod_one' can be found.
- self.assertTrue(
- atest._print_module_info_from_module_name(mod_info, mod_one_name))
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tdevice_test_mod1\n'
- '\t\tnative_test_mod1\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tsrc/path/mod1\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/path/mod1\n')
- # Check the function correctly printed module_info in color to stdout
- self.assertEqual(capture_output.getvalue(), correct_output)
-
- # Case 2: The testing_module('mod_one') can NOT be found in module_info.
- mock_get_module_info.return_value = None
- capture_output = StringIO()
- sys.stdout = capture_output
- # Check return value = False, since 'mod_one' can NOT be found.
- self.assertFalse(
- atest._print_module_info_from_module_name(mod_info, mod_one_name))
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- null_output = ''
- # Check if no module_info, then nothing printed to screen.
- self.assertEqual(capture_output.getvalue(), null_output)
-
- @mock.patch.object(module_info.ModuleInfo, '_merge_build_system_infos')
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
- @mock.patch('json.load', return_value={})
- @mock.patch('builtins.open', new_callable=mock.mock_open)
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch('atest_utils._has_colors', return_value=True)
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
- def test_print_test_info(self, mock_get_module_info, _mock_has_colors,
- _isfile, _open, _json, _merge):
- """Test _print_test_info method."""
- mod_one_name = 'mod1'
- mod_one = {constants.MODULE_NAME: mod_one_name,
- constants.MODULE_PATH: ['path/mod1'],
- constants.MODULE_INSTALLED: ['installed/mod1'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod1']}
- mod_two_name = 'mod2'
- mod_two = {constants.MODULE_NAME: mod_two_name,
- constants.MODULE_PATH: ['path/mod2'],
- constants.MODULE_INSTALLED: ['installed/mod2'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod2']}
- mod_three_name = 'mod3'
- mod_three = {constants.MODULE_NAME: mod_two_name,
- constants.MODULE_PATH: ['path/mod3'],
- constants.MODULE_INSTALLED: ['installed/mod3'],
- constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod3']}
- test_name = mod_one_name
- build_targets = set([mod_one_name, mod_two_name, mod_three_name])
- t_info = test_info.TestInfo(test_name, 'mock_runner', build_targets)
- test_infos = set([t_info])
-
- # The _print_test_info() will print the module_info of the test_info's
- # test_name first. Then, print its related build targets. If the build
- # target be printed before(e.g. build_target == test_info's test_name),
- # it will skip it and print the next build_target.
- # Since the build_targets of test_info are mod_one, mod_two, and
- # mod_three, it will print mod_one first, then mod_two, and mod_three.
- #
- # _print_test_info() calls _print_module_info_from_module_name() to
- # print the module_info. And _print_module_info_from_module_name()
- # calls get_module_info() to get the module_info. So we can mock
- # get_module_info() to achieve that.
- mock_get_module_info.side_effect = [mod_one, mod_two, mod_three]
-
- capture_output = StringIO()
- sys.stdout = capture_output
- mod_info = module_info.ModuleInfo()
- atest._print_test_info(mod_info, test_infos)
- # Assign sys.stdout back to default.
- sys.stdout = sys.__stdout__
- correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod1\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod1\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod1\n'
- '\x1b[1;35m\tRelated build targets\x1b[0m\n'
- '\t\tmod1, mod2, mod3\n'
- '\x1b[1;32mmod2\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod2\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod2\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod2\n'
- '\x1b[1;32mmod3\x1b[0m\n'
- '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
- '\t\tsuite_mod3\n'
- '\x1b[1;36m\tSource code path\x1b[0m\n'
- '\t\tpath/mod3\n'
- '\x1b[1;36m\tInstalled path\x1b[0m\n'
- '\t\tinstalled/mod3\n'
- '\x1b[1;37m\x1b[0m\n')
- self.assertEqual(capture_output.getvalue(), correct_output)
-
- @mock.patch.object(atest_utils, 'get_adb_devices')
- @mock.patch.object(metrics_utils, 'send_exit_event')
- def test_validate_exec_mode(self, _send_exit, _devs):
- """Test _validate_exec_mode."""
- _devs.return_value = ['127.0.0.1:34556']
- args = []
- no_install_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["JAVA_LIBRARIES"],
- install_locations=set(['device']))
- host_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['host']))
- device_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['device']))
- both_test_info = test_info.TestInfo(
- 'mod', '', set(), data={}, module_class=["NATIVE_TESTS"],
- install_locations=set(['host', 'device']))
-
- # $atest <Both-support>
- parsed_args = atest._parse_args(args)
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- # $atest <Both-support> with host_tests set to True
- parsed_args = atest._parse_args([])
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=True)
- # Make sure the host option is not set.
- self.assertFalse(parsed_args.host)
-
- # $atest <Both-support> with host_tests set to False
- parsed_args = atest._parse_args([])
- test_infos = [host_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=False)
- self.assertFalse(parsed_args.host)
-
- # $atest <device-only> with host_tests set to False
- parsed_args = atest._parse_args([])
- test_infos = [device_test_info]
- atest._validate_exec_mode(parsed_args, test_infos, host_tests=False)
- # Make sure the host option is not set.
- self.assertFalse(parsed_args.host)
-
- # $atest <device-only> with host_tests set to True
- parsed_args = atest._parse_args([])
- test_infos = [device_test_info]
- self.assertRaises(SystemExit, atest._validate_exec_mode,
- parsed_args, test_infos, host_tests=True)
-
- # $atest <Both-support>
- parsed_args = atest._parse_args([])
- test_infos = [both_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- # $atest <no_install_test_info>
- parsed_args = atest._parse_args([])
- test_infos = [no_install_test_info]
- atest._validate_exec_mode(parsed_args, test_infos)
- self.assertFalse(parsed_args.host)
-
- def test_make_test_run_dir(self):
- """Test make_test_run_dir."""
- tmp_dir = tempfile.mkdtemp()
- constants.ATEST_RESULT_ROOT = tmp_dir
- date_time = None
-
- work_dir = atest.make_test_run_dir()
- folder_name = os.path.basename(work_dir)
- date_time = datetime.datetime.strptime('_'.join(folder_name.split('_')[0:2]),
- atest.TEST_RUN_DIR_PREFIX)
- reload(constants)
- self.assertTrue(date_time)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/atest/atest_utils.py b/atest/atest_utils.py
index 036b291..914d8f5 100644
--- a/atest/atest_utils.py
+++ b/atest/atest_utils.py
@@ -22,8 +22,11 @@
from __future__ import print_function
+import enum
+import datetime
import fnmatch
import hashlib
+import html
import importlib
import itertools
import json
@@ -35,69 +38,33 @@
import shutil
import subprocess
import sys
-import sysconfig
import time
+import urllib
import zipfile
+from dataclasses import dataclass
from multiprocessing import Process
from pathlib import Path
+from typing import Any, Dict, List, Set
import xml.etree.ElementTree as ET
-from atest_enum import DetectType, FilterType
+from atest.atest_enum import DetectType, ExitCode, FilterType
-# This is a workaround of b/144743252, where the http.client failed to loaded
-# because the googleapiclient was found before the built-in libs; enabling
-# embedded launcher(b/135639220) has not been reliable and other issue will
-# raise.
-# The workaround is repositioning the built-in libs before other 3rd libs in
-# PYTHONPATH(sys.path) to eliminate the symptom of failed loading http.client.
-for lib in (sysconfig.get_paths()['stdlib'], sysconfig.get_paths()['purelib']):
- if lib in sys.path:
- sys.path.remove(lib)
- sys.path.insert(0, lib)
-# (b/219847353) Move googleapiclient to the last position of sys.path when
-# existed.
-for lib in sys.path:
- if 'googleapiclient' in lib:
- sys.path.remove(lib)
- sys.path.append(lib)
- break
#pylint: disable=wrong-import-position
-import atest_decorator
-import atest_error
-import constants
+from atest import atest_decorator
+from atest import atest_error
+from atest import constants
-# This proto related module will be auto generated in build time.
-# pylint: disable=no-name-in-module
-# pylint: disable=import-error
-try:
- from tools.asuite.atest.tf_proto import test_record_pb2
-except ImportError as err:
- pass
-# b/147562331 only occurs when running atest in source code. We don't encourge
-# the users to manually "pip3 install protobuf", therefore when the exception
-# occurs, we don't collect data and the tab completion is for args is silence.
-try:
- from metrics import metrics
- from metrics import metrics_base
- from metrics import metrics_utils
-except ImportError as err:
- # TODO(b/182854938): remove this ImportError after refactor metrics dir.
- try:
- from asuite.metrics import metrics
- from asuite.metrics import metrics_base
- from asuite.metrics import metrics_utils
- except ImportError as err:
- # This exception occurs only when invoking atest in source code.
- print("You shouldn't see this message unless you ran 'atest-src'. "
- "To resolve the issue, please run:\n\t{}\n"
- "and try again.".format('pip3 install protobuf'))
- print('Import error: ', err)
- print('sys.path:\n', '\n'.join(sys.path))
- sys.exit(constants.IMPORT_FAILURE)
+from atest.metrics import metrics
+from atest.metrics import metrics_utils
+from atest.tf_proto import test_record_pb2
_BASH_RESET_CODE = '\033[0m\n'
+DIST_OUT_DIR = Path(os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd())
+ + '/out/dist/')
+MAINLINE_MODULES_EXT_RE = re.compile(r'(.apex|.apks|.apk)$')
+
# Arbitrary number to limit stdout for failed runs in _run_limited_output.
# Reason for its use is that the make command itself has its own carriage
# return output mechanism that when collected line by line causes the streaming
@@ -131,21 +98,46 @@
_REGEX_CHARS = {'[', '(', '{', '|', '\\', '*', '?', '+', '^'}
_WILDCARD_CHARS = {'?', '*'}
-# TODO: (b/180394948) remove this after the universal build script lands.
-# Variables for building mainline modules:
-_VARS_FOR_MAINLINE = {
- "TARGET_BUILD_DENSITY": "alldpi",
- "TARGET_BUILD_TYPE": "release",
- "OVERRIDE_PRODUCT_COMPRESSED_APEX": "false",
- "UNBUNDLED_BUILD_SDKS_FROM_SOURCE": "true",
- "ALWAYS_EMBED_NOTICES": "true",
-}
-
_ROOT_PREPARER = "com.android.tradefed.targetprep.RootTargetPreparer"
_WILDCARD_FILTER_RE = re.compile(r'.*[?|*]$')
_REGULAR_FILTER_RE = re.compile(r'.*\w$')
+SUGGESTIONS = {
+ # (b/198581508) Do not run "adb sync" for the users.
+ 'CANNOT LINK EXECUTABLE': 'Please run "adb sync" or reflash the device(s).',
+ # (b/177626045) If Atest does not install target application properly.
+ 'Runner reported an invalid method': 'Please reflash the device(s).'
+}
+
+_BUILD_ENV = {}
+
+
+@dataclass
+class BuildEnvProfiler:
+ """Represents the condition before and after trigging build."""
+ ninja_file: Path
+ ninja_file_mtime: float
+ variable_file: Path
+ variable_file_md5: str
+ clean_out: bool
+ build_files_integrity: bool
+
+
+@enum.unique
+class BuildOutputMode(enum.Enum):
+ "Represents the different ways to display build output."
+ STREAMED = 'streamed'
+ LOGGED = 'logged'
+
+ def __init__(self, arg_name: str):
+ self._description = arg_name
+
+ # pylint: disable=missing-function-docstring
+ def description(self):
+ return self._description
+
+
def get_build_cmd(dump=False):
"""Compose build command with no-absolute path and flag "--make-mode".
@@ -259,7 +251,7 @@
raise subprocess.CalledProcessError(proc.returncode, cmd, output)
-def get_build_out_dir():
+def get_build_out_dir() -> str:
"""Get android build out directory.
The order of the rules are:
@@ -298,57 +290,18 @@
return user_out_dir
return os.path.join(build_top, "out")
-
-def get_mainline_build_cmd(build_targets):
- """Method that assembles cmd for building mainline modules.
-
- Args:
- build_targets: A set of strings of build targets to make.
-
- Returns:
- A list of build command.
- """
- print('%s\n%s' % (
- colorize("Building Mainline Modules...", constants.CYAN),
- ', '.join(build_targets)))
- logging.debug('Building Mainline Modules: %s', ' '.join(build_targets))
- # TODO: (b/180394948) use the consolidated build script when it lands.
- config = get_android_config()
- branch = config.get('BUILD_ID')
- arch = config.get('TARGET_ARCH')
- # 2. Assemble TARGET_BUILD_APPS and TARGET_PRODUCT.
- target_build_apps = 'TARGET_BUILD_APPS={}'.format(
- ' '.join(build_targets))
- target_product = 'TARGET_PRODUCT=mainline_modules_{}'.format(arch)
- if 'AOSP' in branch:
- target_product = 'TARGET_PRODUCT=module_{}'.format(arch)
- # 3. Assemble DIST_DIR and the rest of static targets.
- dist_dir = 'DIST_DIR={}'.format(
- os.path.join('out', 'dist', 'mainline_modules_{}'.format(arch)))
- static_targets = [
- 'dist',
- 'apps_only',
- 'merge_zips',
- 'aapt2'
- ]
- cmd = get_build_cmd()
- cmd.append(target_build_apps)
- cmd.append(target_product)
- cmd.append(dist_dir)
- cmd.extend(static_targets)
- return cmd
+def update_build_env(env: Dict[str, str]):
+ """Method that updates build environment variables."""
+ # pylint: disable=global-statement
+ global _BUILD_ENV
+ _BUILD_ENV.update(env)
-def build(build_targets, verbose=False, env_vars=None, mm_build_targets=None):
+def build(build_targets: Set[str]):
"""Shell out and invoke run_build_cmd to make build_targets.
Args:
build_targets: A set of strings of build targets to make.
- verbose: Optional arg. If True output is streamed to the console.
- If False, only the last line of the build output is outputted.
- env_vars: Optional arg. Dict of env vars to set during build.
- mm_build_targets: A set of string like build_targets, but will build
- in unbundled(mainline) module mode.
Returns:
Boolean of whether build command was successful, True if nothing to
@@ -357,51 +310,47 @@
if not build_targets:
logging.debug('No build targets, skipping build.')
return True
+
+ # pylint: disable=global-statement
+ global _BUILD_ENV
full_env_vars = os.environ.copy()
- if env_vars:
- full_env_vars.update(env_vars)
- if mm_build_targets:
- # Set up necessary variables for building mainline modules.
- full_env_vars.update(_VARS_FOR_MAINLINE)
- if not os.getenv('TARGET_BUILD_VARIANT'):
- full_env_vars.update({'TARGET_BUILD_VARIANT': 'user'})
- # Inject APEX_BUILD_FOR_PRE_S_DEVICES=true for all products.
- # TODO: support _bundled(S+) artifacts that link shared libs.
- colorful_print(
- '\nWARNING: Only support building pre-S products for now.',
- constants.YELLOW)
- full_env_vars.update({'APEX_BUILD_FOR_PRE_S_DEVICES': 'true'})
- mm_build_cmd = get_mainline_build_cmd(mm_build_targets)
- status = run_build_cmd(mm_build_cmd, verbose, full_env_vars)
- if not status:
- return status
+ update_build_env(full_env_vars)
print('\n%s\n%s' % (
colorize("Building Dependencies...", constants.CYAN),
', '.join(build_targets)))
logging.debug('Building Dependencies: %s', ' '.join(build_targets))
cmd = get_build_cmd() + list(build_targets)
- return run_build_cmd(cmd, verbose, full_env_vars)
+ return _run_build_cmd(cmd, _BUILD_ENV)
-def run_build_cmd(cmd, verbose=False, env_vars=None):
+
+def _run_build_cmd(cmd: List[str], env_vars: Dict[str, str]):
"""The main process of building targets.
Args:
cmd: A list of soong command.
- verbose: Optional arg. If True output is streamed to the console.
- If False, only the last line of the build output is outputted.
- env_vars: Optional arg. Dict of env vars to set during build.
-
+ env_vars: Dict of environment variables used for build.
Returns:
Boolean of whether build command was successful, True if nothing to
build.
"""
logging.debug('Executing command: %s', cmd)
+ build_profiler = _build_env_profiling()
try:
- if verbose:
+ if env_vars.get('BUILD_OUTPUT_MODE') == BuildOutputMode.STREAMED.value:
+ print()
subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
else:
- # TODO: Save output to a log file.
+ # Note that piping stdout forces Soong to switch to 'dumb terminal
+ # mode' which only prints completed actions. This gives users the
+ # impression that actions are taking longer than they really are.
+ # See b/233044822 for more details.
+ log_path = Path(get_build_out_dir()).joinpath('verbose.log.gz')
+ print('\n(Build log may not reflect actual status in simple output'
+ 'mode; check {} for detail after build finishes.)'.format(
+ colorize(f'{log_path}', constants.CYAN)
+ ), end='')
_run_limited_output(cmd, env_vars=env_vars)
+ _send_build_condition_metrics(build_profiler, cmd)
logging.info('Build successful')
return True
except subprocess.CalledProcessError as err:
@@ -411,21 +360,6 @@
return False
-def _can_upload_to_result_server():
- """Return True if we can talk to result server."""
- # TODO: Also check if we have a slow connection to result server.
- if constants.RESULT_SERVER:
- try:
- from urllib.request import urlopen
- urlopen(constants.RESULT_SERVER,
- timeout=constants.RESULT_SERVER_TIMEOUT).close()
- return True
- # pylint: disable=broad-except
- except Exception as err:
- logging.debug('Talking to result server raised exception: %s', err)
- return False
-
-
# pylint: disable=unused-argument
def get_result_server_args(for_test_mapping=False):
"""Return list of args for communication with result server.
@@ -450,7 +384,8 @@
which means the test value is a test group name in TEST_MAPPING file, e.g.,
`:postsubmit`.
- If --host-unit-test-only be applied, it's not test mapping.
+ If --host-unit-test-only or --smart-testing-local was applied, it doesn't
+ intend to be a test_mapping test.
If any test mapping options is specified, the atest command must also be
set to run tests in test mapping files.
@@ -461,12 +396,12 @@
True if the args indicates atest shall run tests in test mapping. False
otherwise.
"""
- return (
- not args.host_unit_test_only and
- (args.test_mapping or
- args.include_subdirs or
- not args.tests or
- (len(args.tests) == 1 and args.tests[0][0] == ':')))
+ if any((args.host_unit_test_only, args.smart_testing_local)):
+ return False
+ if any((args.test_mapping, args.include_subdirs, not args.tests)):
+ return True
+ # ':postsubmit' implicitly indicates running in test-mapping mode.
+ return all((len(args.tests) == 1, args.tests[0][0] == ':'))
@atest_decorator.static_var("cached_has_colors", {})
@@ -496,14 +431,15 @@
return cached_has_colors[stream]
-def colorize(text, color, highlight=False):
+def colorize(text, color, bp_color=None):
""" Convert to colorful string with ANSI escape code.
Args:
text: A string to print.
- color: ANSI code shift for colorful print. They are defined
- in constants_default.py.
- highlight: True to print with highlight.
+ color: Forground(Text) color which is an ANSI code shift for colorful
+ print. They are defined in constants_default.py.
+ bp_color: Backgroud color which is an ANSI code shift for colorful
+ print.
Returns:
Colorful string with ANSI escape code.
@@ -512,27 +448,33 @@
clr_suff = '\033[0m'
has_colors = _has_colors(sys.stdout)
if has_colors:
- if highlight:
- ansi_shift = 40 + color
+ background_color = ''
+ if bp_color:
+ # Foreground(Text) ranges from 30-37
+ text_color = 30 + color
+ # Background ranges from 40-47
+ background_color = ';%d' % (40 + bp_color)
else:
- ansi_shift = 30 + color
- clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
+ text_color = 30 + color
+ clr_str = "%s%d%sm%s%s" % (clr_pref, text_color, background_color,
+ text, clr_suff)
else:
clr_str = text
return clr_str
-def colorful_print(text, color, highlight=False, auto_wrap=True):
+def colorful_print(text, color, bp_color=None, auto_wrap=True):
"""Print out the text with color.
Args:
text: A string to print.
- color: ANSI code shift for colorful print. They are defined
- in constants_default.py.
- highlight: True to print with highlight.
+ color: Forground(Text) color which is an ANSI code shift for colorful
+ print. They are defined in constants_default.py.
+ bp_color: Backgroud color which is an ANSI code shift for colorful
+ print.
auto_wrap: If True, Text wraps while print.
"""
- output = colorize(text, color, highlight)
+ output = colorize(text, color, bp_color)
if auto_wrap:
print(output)
else:
@@ -553,44 +495,6 @@
return columns, rows
-def is_external_run():
- # TODO(b/133905312): remove this function after aidegen calling
- # metrics_base.get_user_type directly.
- """Check is external run or not.
-
- Determine the internal user by passing at least one check:
- - whose git mail domain is from google
- - whose hostname is from google
- Otherwise is external user.
-
- Returns:
- True if this is an external run, False otherwise.
- """
- return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
-
-
-def print_data_collection_notice():
- """Print the data collection notice."""
- anonymous = ''
- user_type = 'INTERNAL'
- if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
- anonymous = ' anonymous'
- user_type = 'EXTERNAL'
- notice = (' We collect%s usage statistics in accordance with our Content '
- 'Licenses (%s), Contributor License Agreement (%s), Privacy '
- 'Policy (%s) and Terms of Service (%s).'
- ) % (anonymous,
- constants.CONTENT_LICENSES_URL,
- constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
- constants.PRIVACY_POLICY_URL,
- constants.TERMS_SERVICE_URL
- )
- print(delimiter('=', 18, prenl=1))
- colorful_print("Notice:", constants.RED)
- colorful_print("%s" % notice, constants.GREEN)
- print(delimiter('=', 18, postnl=1))
-
-
def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
result_path=constants.VERIFY_DATA_PATH):
"""Handle the runner command of input tests.
@@ -650,6 +554,9 @@
"""
_cmd = ' '.join(cmd_list).split()
for cmd in _cmd:
+ if cmd.startswith('--skip-all-system-status-check'):
+ _cmd.remove(cmd)
+ continue
if cmd.startswith('--atest-log-file-path'):
_cmd.remove(cmd)
continue
@@ -737,22 +644,19 @@
- True if the checksum is consistent with the actual MD5.
- False otherwise.
"""
- if not os.path.isfile(check_file):
+ if not Path(check_file).is_file():
if not missing_ok:
logging.debug(
'Unable to verify: %s not found.', check_file)
return missing_ok
- if not is_valid_json_file(check_file):
- logging.debug(
- 'Unable to verify: %s invalid JSON format.', check_file)
- return missing_ok
- with open(check_file, 'r+') as _file:
- content = json.load(_file)
+ content = load_json_safely(check_file)
+ if content:
for filename, md5 in content.items():
if md5sum(filename) != md5:
logging.debug('%s has altered.', filename)
return False
- return True
+ return True
+ return False
def save_md5(filenames, save_file):
"""Method equivalent to 'md5sum file1 file2 > /file/to/check'
@@ -856,7 +760,8 @@
ValueError,
TypeError,
EOFError,
- IOError) as err:
+ IOError,
+ ImportError) as err:
# Won't break anything, just remove the old cache, log this error,
# and collect the exception by metrics.
logging.debug('Exception raised: %s', err)
@@ -952,8 +857,18 @@
"""
match_files = []
for root, _, filenames in os.walk(path):
- for filename in fnmatch.filter(filenames, file_name):
- match_files.append(os.path.join(root, filename))
+ try:
+ for filename in fnmatch.filter(filenames, file_name):
+ match_files.append(os.path.join(root, filename))
+ except re.error as e:
+ msg = "Unable to locate %s among %s" % (file_name, filenames)
+ logging.debug(msg)
+ logging.debug("Exception: %s", e)
+ metrics.AtestExitEvent(
+ duration=metrics_utils.convert_duration(0),
+ exit_code=ExitCode.COLLECT_ONLY_FILE_NOT_FOUND,
+ stacktrace=msg,
+ logs=str(e))
return match_files
def extract_zip_text(zip_path):
@@ -1114,68 +1029,144 @@
"""
return bool(importlib.util.find_spec(module_name))
-def is_valid_json_file(path):
- """Detect if input path exist and content is valid.
+def load_json_safely(jsonfile):
+ """Load the given json file as an object.
Args:
- path: The json file path.
+ jsonfile: The json file path.
Returns:
- True if file exist and content is valid, False otherwise.
+ The content of the give json file. Null dict when:
+ 1. the given path doesn't exist.
+ 2. the given path is not a json or invalid format.
"""
- if isinstance(path, bytes):
- path = path.decode('utf-8')
+ if isinstance(jsonfile, bytes):
+ jsonfile = jsonfile.decode('utf-8')
+ if Path(jsonfile).is_file():
+ try:
+ with open(jsonfile, 'r') as cache:
+ return json.load(cache)
+ except json.JSONDecodeError:
+ logging.debug('Exception happened while loading %s.', jsonfile)
+ else:
+ logging.debug('%s: File not found.', jsonfile)
+ return {}
+
+def get_atest_version():
+ """Get atest version.
+
+ Returns:
+ Version string from the VERSION file, e.g. prebuilt
+ 2022-11-24_9314547 (<release_date>_<build_id>)
+
+ If VERSION does not exist (src or local built):
+ 2022-11-24_5d448c50 (<commit_date>_<commit_id>)
+
+ If the git command fails for unexpected reason:
+ 2022-11-24_unknown (<today_date>_unknown)
+ """
+ atest_dir = Path(__file__).resolve().parent
+ version_file = atest_dir.joinpath('VERSION')
+ if Path(version_file).is_file():
+ return open(version_file).read()
+
+ # Try fetching commit date (%ci) and commit hash (%h).
+ git_cmd = 'git log -1 --pretty=format:"%ci;%h"'
try:
- if os.path.isfile(path):
- with open(path) as json_file:
- json.load(json_file)
- return True
- logging.debug('%s: File not found.', path)
- except json.JSONDecodeError:
- logging.debug('Exception happened while loading %s.', path)
- return False
+ # commit date/hash are only available when running from the source
+ # and the local built.
+ result = subprocess.run(
+ git_cmd, shell=True, check=False, capture_output=True,
+ cwd=Path(
+ os.getenv(constants.ANDROID_BUILD_TOP), '').joinpath(
+ 'tools/asuite/atest'))
+ if result.stderr:
+ raise subprocess.CalledProcessError(
+ returncode=0, cmd=git_cmd)
+ raw_date, commit = result.stdout.decode().split(';')
+ date = datetime.datetime.strptime(raw_date,
+ '%Y-%m-%d %H:%M:%S %z').date()
+ # atest_dir doesn't exist will throw FileNotFoundError.
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ # Use today as the commit date for unexpected conditions.
+ date = datetime.datetime.today().date()
+ commit = 'unknown'
+ return f'{date}_{commit}'
-def get_manifest_branch():
- """Get the manifest branch via repo info command.
+def get_manifest_branch(show_aosp=False):
+ """Get the manifest branch.
+
+ (portal xml) (default xml)
+ +--------------------+ _get_include() +-----------------------------+
+ | .repo/manifest.xml |--------------->| .repo/manifests/default.xml |
+ +--------------------+ +---------------+-------------+
+ <default revision="master" |
+ remote="aosp" | _get_revision()
+ sync-j="4"/> V
+ +--------+
+ | master |
+ +--------+
+
+ Args:
+ show_aosp: A boolean that shows 'aosp' prefix by checking the 'remote'
+ attribute.
Returns:
- None if no system environment parameter ANDROID_BUILD_TOP or
- running 'repo info' command error, otherwise the manifest branch
+ The value of 'revision' of the included xml or default.xml.
+
+ None when no ANDROID_BUILD_TOP or unable to access default.xml.
"""
- build_top = os.getenv(constants.ANDROID_BUILD_TOP, None)
+ build_top = os.getenv(constants.ANDROID_BUILD_TOP)
if not build_top:
return None
- splitter = ':'
- env_vars = os.environ.copy()
- orig_pythonpath = env_vars['PYTHONPATH'].split(splitter)
- # Command repo imports stdlib "http.client", so adding non-default lib
- # e.g. googleapiclient, may cause repo command execution error.
- # The temporary dir is not presumably always /tmp, especially in MacOS.
- # b/169936306, b/190647636 are the cases we should never ignore.
- soong_path_re = re.compile(r'.*/Soong.python_.*/')
- default_python_path = [p for p in orig_pythonpath
- if not soong_path_re.match(p)]
- env_vars['PYTHONPATH'] = splitter.join(default_python_path)
- proc = subprocess.Popen(f'repo info '
- f'-o {constants.ASUITE_REPO_PROJECT_NAME}',
- shell=True,
- env=env_vars,
- cwd=build_top,
- universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- try:
- cmd_out, err_out = proc.communicate()
- branch_re = re.compile(r'Manifest branch:\s*(?P<branch>.*)')
- match = branch_re.match(cmd_out)
- if match:
- return match.group('branch')
- logging.warning('Unable to detect branch name through:\n %s, %s',
- cmd_out, err_out)
- except subprocess.TimeoutExpired:
- logging.warning('Exception happened while getting branch')
- proc.kill()
- return None
+ portal_xml = Path(build_top).joinpath('.repo', 'manifest.xml')
+ default_xml = Path(build_top).joinpath('.repo/manifests', 'default.xml')
+ def _get_revision(xml):
+ try:
+ xml_root = ET.parse(xml).getroot()
+ except (IOError, OSError, ET.ParseError):
+ # TODO(b/274989179) Change back to warning once warning if not going
+ # to be treat as test failure. Or test_get_manifest_branch unit test
+ # could be fix if return None if portal_xml or default_xml not
+ # exist.
+ logging.info('%s could not be read.', xml)
+ return ''
+ default_tags = xml_root.findall('./default')
+ if default_tags:
+ prefix = ''
+ for tag in default_tags:
+ branch = tag.attrib.get('revision')
+ if show_aosp and tag.attrib.get('remote') == 'aosp':
+ prefix = 'aosp-'
+ return f'{prefix}{branch}'
+ return ''
+ def _get_include(xml):
+ try:
+ xml_root = ET.parse(xml).getroot()
+ except (IOError, OSError, ET.ParseError):
+ # TODO(b/274989179) Change back to warning once warning if not going
+ # to be treat as test failure. Or test_get_manifest_branch unit test
+ # could be fix if return None if portal_xml or default_xml not
+ # exist.
+ logging.info('%s could not be read.', xml)
+ return Path()
+ include_tags = xml_root.findall('./include')
+ if include_tags:
+ for tag in include_tags:
+ name = tag.attrib.get('name')
+ if name:
+ return Path(build_top).joinpath('.repo/manifests', name)
+ return default_xml
+
+ # 1. Try getting revision from .repo/manifests/default.xml
+ if default_xml.is_file():
+ return _get_revision(default_xml)
+ # 2. Try getting revision from the included xml of .repo/manifest.xml
+ include_xml = _get_include(portal_xml)
+ if include_xml.is_file():
+ return _get_revision(include_xml)
+ # 3. Try getting revision directly from manifest.xml (unlikely to happen)
+ return _get_revision(portal_xml)
def get_build_target():
"""Get the build target form system environment TARGET_PRODUCT."""
@@ -1184,22 +1175,26 @@
os.getenv(constants.TARGET_BUILD_VARIANT, None))
return build_target
-def parse_mainline_modules(test):
- """Parse test reference into test and mainline modules.
+def build_module_info_target(module_info_target):
+ """Build module-info.json after deleting the original one.
Args:
- test: An String of test reference.
-
- Returns:
- A string of test without mainline modules,
- A string of mainline modules.
+ module_info_target: the target name that soong is going to build.
"""
- result = constants.TEST_WITH_MAINLINE_MODULES_RE.match(test)
- if not result:
- return test, ""
- test_wo_mainline_modules = result.group('test')
- mainline_modules = result.group('mainline_modules')
- return test_wo_mainline_modules, mainline_modules
+ module_file = 'module-info.json'
+ logging.debug('Generating %s - this is required for '
+ 'initial runs or forced rebuilds.', module_file)
+ build_start = time.time()
+ product_out = os.getenv(constants.ANDROID_PRODUCT_OUT, None)
+ module_info_path = Path(product_out).joinpath('module-info.json')
+ if module_info_path.is_file():
+ os.remove(module_info_path)
+ if not build([module_info_target]):
+ sys.exit(ExitCode.BUILD_FAILURE)
+ build_duration = time.time() - build_start
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.ONLY_BUILD_MODULE_INFO,
+ result=int(build_duration))
def has_wildcard(test_name):
""" Tell whether the test_name(either a list or string) contains wildcard
@@ -1341,11 +1336,16 @@
A set include all the device name of the input config.
"""
devices = set()
- xml_root = ET.parse(test_config).getroot()
- device_tags = xml_root.findall('.//device')
- for tag in device_tags:
- name = tag.attrib['name'].strip()
- devices.add(name)
+ try:
+ xml_root = ET.parse(test_config).getroot()
+ device_tags = xml_root.findall('.//device')
+ for tag in device_tags:
+ name = tag.attrib['name'].strip()
+ devices.add(name)
+ except ET.ParseError as e:
+ colorful_print('Config has invalid format.', constants.RED)
+ colorful_print('File %s : %s' % (test_config, str(e)), constants.YELLOW)
+ sys.exit(ExitCode.CONFIG_INVALID_FORMAT)
return devices
def get_mainline_param(test_config):
@@ -1582,6 +1582,35 @@
test_commands.sort()
return ' '.join(test_commands)
+def gen_runner_cmd_to_file(tests, dry_run_cmd,
+ result_path=constants.RUNNER_COMMAND_PATH):
+ """Generate test command and save to file.
+
+ Args:
+ tests: A String of input tests.
+ dry_run_cmd: A String of dry run command.
+ result_path: A file path for saving result.
+ Returns:
+ A composed run commands.
+ """
+ normalized_cmd = dry_run_cmd
+ root_path = os.environ.get(constants.ANDROID_BUILD_TOP)
+ if root_path in dry_run_cmd:
+ normalized_cmd = dry_run_cmd.replace(root_path,
+ f"${constants.ANDROID_BUILD_TOP}")
+ results = {}
+ if not os.path.isfile(result_path):
+ results[tests] = normalized_cmd
+ else:
+ with open(result_path) as json_file:
+ results = json.load(json_file)
+ if results.get(tests) != normalized_cmd:
+ results[tests] = normalized_cmd
+ with open(result_path, 'w+') as _file:
+ json.dump(results, _file, indent=0)
+ return results.get(tests, '')
+
+
def handle_test_env_var(input_test, result_path=constants.VERIFY_ENV_PATH,
pre_verify=False):
"""Handle the environment variable of input tests.
@@ -1622,21 +1651,22 @@
raise atest_error.DryRunVerificationError('\n'.join(verify_error))
return 1
-def generate_buildfiles_checksum():
+def generate_buildfiles_checksum(target_dir: Path):
""" Method that generate md5 checksum of Android.{bp,mk} files.
The checksum of build files are stores in
$ANDROID_HOST_OUT/indexes/buildfiles.md5
"""
- if os.path.isfile(constants.LOCATE_CACHE):
- cmd = (f'locate -d{constants.LOCATE_CACHE} --existing '
- r'--regex "/Android.(bp|mk)$"')
+ plocate_db = Path(target_dir).joinpath(constants.LOCATE_CACHE)
+ checksum_file = Path(target_dir).joinpath(constants.BUILDFILES_MD5)
+ if plocate_db.is_file():
+ cmd = (f'locate -d{plocate_db} --existing '
+ r'--regex "/Android\.(bp|mk)$"')
try:
result = subprocess.check_output(cmd, shell=True).decode('utf-8')
- save_md5(result.split(), constants.BUILDFILES_MD5)
+ save_md5(result.split(), checksum_file)
except subprocess.CalledProcessError:
- logging.error('Failed to generate %s',
- constants.BUILDFILES_MD5)
+ logging.error('Failed to generate %s', checksum_file)
def run_multi_proc(func, *args, **kwargs):
"""Start a process with multiprocessing and return Process object.
@@ -1703,7 +1733,7 @@
keyword_re = re.compile(
r'import\s+(?P<fqcn>.*\.{})(|;)$'.format(class_name), re.I)
build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP, ''))
- for f in module_info.get('srcs'):
+ for f in module_info.get(constants.MODULE_SRCS, []):
full_path = build_top.joinpath(f)
with open(full_path, 'r') as cache:
for line in cache.readlines():
@@ -1767,3 +1797,312 @@
tf_filter, FilterType.REGULAR_FILTER.value)
type_set.add(FilterType.REGULAR_FILTER.value)
return type_set
+
+def has_index_files():
+ """Determine whether the essential index files are done.
+
+ (b/206886222) checksum may be different even the src is not changed; so
+ the main process needs to wait when the essential index files do not exist.
+
+ Returns:
+ False if one of the index file does not exist; True otherwise.
+ """
+ return all(Path(f).is_file() for f in [
+ constants.CLASS_INDEX,
+ constants.CC_CLASS_INDEX,
+ constants.QCLASS_INDEX,
+ constants.PACKAGE_INDEX])
+
+# pylint: disable=anomalous-backslash-in-string,too-many-branches
+def get_bp_content(filename: Path, module_type: str) -> Dict:
+ """Get essential content info from an Android.bp.
+ By specifying module_type (e.g. 'android_test', 'android_app'), this method
+ can parse the given starting point and grab 'name', 'instrumentation_for'
+ and 'manifest'.
+
+ Returns:
+ A dict of mapping test module and target module; e.g.
+ {
+ 'FooUnitTests':
+ {'manifest': 'AndroidManifest.xml', 'target_module': 'Foo'},
+ 'Foo':
+ {'manifest': 'AndroidManifest-common.xml', 'target_module': ''}
+ }
+ Null dict if there is no content of the given module_type.
+ """
+ build_file = Path(filename)
+ if not any((build_file.suffix == '.bp', build_file.is_file())):
+ return {}
+ start_from = re.compile(f'^{module_type}\s*\{{')
+ end_with = re.compile(r'^\}$')
+ context_re = re.compile(
+ r'\s*(?P<key>(name|manifest|instrumentation_for))\s*:'
+ r'\s*\"(?P<value>.*)\"\s*,', re.M)
+ with open(build_file, 'r') as cache:
+ data = cache.readlines()
+ content_dict = {}
+ start_recording = False
+ for _line in data:
+ line = _line.strip()
+ if re.match(start_from, line):
+ start_recording = True
+ _dict = {}
+ continue
+ if start_recording:
+ if not re.match(end_with, line):
+ match = re.match(context_re, line)
+ if match:
+ _dict.update(
+ {match.group('key'): match.group('value')})
+ else:
+ start_recording = False
+ module_name = _dict.get('name')
+ if module_name:
+ content_dict.update(
+ {module_name: {
+ 'manifest': _dict.get(
+ 'manifest', 'AndroidManifest.xml'),
+ 'target_module': _dict.get(
+ 'instrumentation_for', '')}
+ })
+ return content_dict
+
+def get_manifest_info(manifest: Path) -> Dict[str, Any]:
+ """Get the essential info from the given manifest file.
+ This method cares only three attributes:
+ * package
+ * targetPackage
+ * persistent
+ For an instrumentation test, the result will be like:
+ {
+ 'package': 'com.android.foo.tests.unit',
+ 'targetPackage': 'com.android.foo',
+ 'persistent': False
+ }
+ For a target module of the instrumentation test:
+ {
+ 'package': 'com.android.foo',
+ 'targetPackage': '',
+ 'persistent': True
+ }
+ """
+ mdict = {'package': '', 'target_package': '', 'persistent': False}
+ try:
+ xml_root = ET.parse(manifest).getroot()
+ except (ET.ParseError, FileNotFoundError):
+ return mdict
+ manifest_package_re = re.compile(r'[a-z][\w]+(\.[\w]+)*')
+ # 1. Must probe 'package' name from the top.
+ for item in xml_root.findall('.'):
+ if 'package' in item.attrib.keys():
+ pkg = item.attrib.get('package')
+ match = manifest_package_re.match(pkg)
+ if match:
+ mdict['package'] = pkg
+ break
+ for item in xml_root.findall('*'):
+ # 2. Probe 'targetPackage' in 'instrumentation' tag.
+ if item.tag == 'instrumentation':
+ for key, value in item.attrib.items():
+ if 'targetPackage' in key:
+ mdict['target_package'] = value
+ break
+ # 3. Probe 'persistent' in any tags.
+ for key, value in item.attrib.items():
+ if 'persistent' in key:
+ mdict['persistent'] = value.lower() == 'true'
+ break
+ return mdict
+
+# pylint: disable=broad-except
+def generate_print_result_html(result_file: Path):
+ """Generate a html that collects all log files."""
+ result_file = Path(result_file)
+ search_dir = Path(result_file).parent.joinpath('log')
+ result_html = Path(search_dir, 'test_logs.html')
+ try:
+ logs = sorted(find_files(str(search_dir), file_name='*'))
+ with open(result_html, 'w') as cache:
+ cache.write('<!DOCTYPE html><html><body>')
+ result = load_json_safely(result_file)
+ if result:
+ cache.write(f'<h1>{"atest " + result.get("args")}</h1>')
+ timestamp = datetime.datetime.fromtimestamp(
+ result_file.stat().st_ctime)
+ cache.write(f'<h2>{timestamp}</h2>')
+ for log in logs:
+ cache.write(f'<p><a href="{urllib.parse.quote(log)}">'
+ f'{html.escape(Path(log).name)}</a></p>')
+ cache.write('</body></html>')
+ print(f'\nTo access logs, press "ctrl" and click on\n'
+ f'file://{result_html}\n')
+ except Exception as e:
+ logging.debug('Did not generate log html for reason: %s', e)
+
+# pylint: disable=broad-except
+def prompt_suggestions(result_file: Path):
+ """Generate suggestions when detecting keywords in logs."""
+ result_file = Path(result_file)
+ search_dir = Path(result_file).parent.joinpath('log')
+ logs = sorted(find_files(str(search_dir), file_name='*'))
+ for log in logs:
+ for keyword, suggestion in SUGGESTIONS.items():
+ try:
+ with open(log, 'r') as cache:
+ content = cache.read()
+ if keyword in content:
+ colorful_print(
+ '[Suggestion] ' + suggestion, color=constants.RED)
+ break
+ # If the given is not a plain text, just ignore it.
+ except Exception:
+ pass
+
+def build_files_integrity_is_ok() -> bool:
+ """Return Whether the integrity of build files is OK."""
+ # 0. Inexistence of the checksum file means a fresh repo sync.
+ if not Path(constants.BUILDFILES_MD5).is_file():
+ return False
+ # 1. Ensure no build files were added/deleted.
+ with open(constants.BUILDFILES_MD5, 'r') as cache:
+ recorded_amount = len(json.load(cache).keys())
+ cmd = (f'locate -d{constants.LOCATE_CACHE} --regex '
+ r'"/Android\.(bp|mk)$" | wc -l')
+ if int(subprocess.getoutput(cmd)) != recorded_amount:
+ return False
+ # 2. Ensure the consistency of all build files.
+ return check_md5(constants.BUILDFILES_MD5, missing_ok=False)
+
+
+def _build_env_profiling() -> BuildEnvProfiler:
+ """Determine the status profile before build.
+
+ The BuildEnvProfiler object can help use determine whether a build is:
+ 1. clean build. (empty out/ dir)
+ 2. Build files Integrity (Android.bp/Android.mk changes).
+ 3. Environment variables consistency.
+ 4. New Ninja file generated. (mtime of soong/build.ninja)
+
+ Returns:
+ the BuildProfile object.
+ """
+ out_dir = Path(get_build_out_dir())
+ ninja_file = out_dir.joinpath('soong/build.ninja')
+ mtime = ninja_file.stat().st_mtime if ninja_file.is_file() else 0
+ variables_file = out_dir.joinpath('soong/soong.environment.used.build')
+
+ return BuildEnvProfiler(
+ ninja_file=ninja_file,
+ ninja_file_mtime=mtime,
+ variable_file=variables_file,
+ variable_file_md5=md5sum(variables_file),
+ clean_out=not ninja_file.exists(),
+ build_files_integrity=build_files_integrity_is_ok()
+ )
+
+
+def _send_build_condition_metrics(
+ build_profile: BuildEnvProfiler, cmd: List[str]):
+ """Send build conditions by comparing build env profilers."""
+
+ # when build module-info.json only, 'module-info.json' will be
+ # the last element.
+ m_mod_info_only = 'module-info.json' in cmd.pop()
+
+ def ninja_file_is_changed(env_profiler: BuildEnvProfiler) -> bool:
+ """Determine whether the ninja file had been renewal."""
+ if not env_profiler.ninja_file.is_file():
+ return True
+ return (env_profiler.ninja_file.stat().st_mtime !=
+ env_profiler.ninja_file_mtime)
+
+ def env_var_is_changed(env_profiler: BuildEnvProfiler) -> bool:
+ """Determine whether soong-related variables had changed."""
+ return (md5sum(env_profiler.variable_file) !=
+ env_profiler.variable_file_md5)
+
+ def send_data(detect_type):
+ """A simple wrapper of metrics.LocalDetectEvent."""
+ metrics.LocalDetectEvent(detect_type=detect_type, result=1)
+
+ # Determine the correct detect type before profiling.
+ # (build module-info.json or build dependencies.)
+ clean_out = (DetectType.MODULE_INFO_CLEAN_OUT
+ if m_mod_info_only else DetectType.BUILD_CLEAN_OUT)
+ ninja_generation = (DetectType.MODULE_INFO_GEN_NINJA
+ if m_mod_info_only else DetectType.BUILD_GEN_NINJA)
+ bpmk_change = (DetectType.MODULE_INFO_BPMK_CHANGE
+ if m_mod_info_only else DetectType.BUILD_BPMK_CHANGE)
+ env_change = (DetectType.MODULE_INFO_ENV_CHANGE
+ if m_mod_info_only else DetectType.BUILD_ENV_CHANGE)
+ src_change = (DetectType.MODULE_INFO_SRC_CHANGE
+ if m_mod_info_only else DetectType.BUILD_SRC_CHANGE)
+ other = (DetectType.MODULE_INFO_OTHER
+ if m_mod_info_only else DetectType.BUILD_OTHER)
+ incremental =(DetectType.MODULE_INFO_INCREMENTAL
+ if m_mod_info_only else DetectType.BUILD_INCREMENTAL)
+
+ if build_profile.clean_out:
+ send_data(clean_out)
+ else:
+ send_data(incremental)
+
+ if ninja_file_is_changed(build_profile):
+ send_data(ninja_generation)
+
+ other_condition = True
+ if not build_profile.build_files_integrity:
+ send_data(bpmk_change)
+ other_condition = False
+ if env_var_is_changed(build_profile):
+ send_data(env_change)
+ other_condition = False
+ if bool(get_modified_files(os.getcwd())):
+ send_data(src_change)
+ other_condition = False
+ if other_condition:
+ send_data(other)
+
+
+def get_local_auto_shardable_tests():
+ """Get the auto shardable test names in shardable file.
+
+ The path will be ~/.atest/auto_shard/local_auto_shardable_tests
+
+ Returns:
+ A list of auto shardable test names.
+ """
+ shardable_tests_file = Path(get_misc_dir()).joinpath(
+ '.atest/auto_shard/local_auto_shardable_tests')
+ if not shardable_tests_file.exists():
+ return []
+ return open(shardable_tests_file, 'r').read().split()
+
+def update_shardable_tests(test_name: str, run_time_in_sec: int):
+ """Update local_auto_shardable_test file.
+
+ Strategy:
+ - Determine to add the module by the run time > 10 mins.
+ - local_auto_shardable_test file path :
+ ~/.atest/auto_shard/local_auto_shardable_tests
+ - The file content template is module name per line:
+ <module1>
+ <module2>
+ ...
+ """
+ if run_time_in_sec < 600:
+ return
+ shardable_tests = get_local_auto_shardable_tests()
+ if test_name not in shardable_tests:
+ shardable_tests.append(test_name)
+ logging.info('%s takes %ss (> 600s) to finish. Adding to shardable '
+ 'test list.', test_name, run_time_in_sec)
+
+ if not shardable_tests:
+ logging.info('No shardable tests to run.')
+ return
+ shardable_dir = Path(get_misc_dir()).joinpath('.atest/auto_shard')
+ shardable_dir.mkdir(parents=True, exist_ok=True)
+ shardable_tests_file = shardable_dir.joinpath('local_auto_shardable_tests')
+ with open(shardable_tests_file, 'w') as file:
+ file.write('\n'.join(shardable_tests))
diff --git a/atest/atest_utils_unittest.py b/atest/atest_utils_unittest.py
index c0a5de0..f284948 100755
--- a/atest/atest_utils_unittest.py
+++ b/atest/atest_utils_unittest.py
@@ -27,17 +27,21 @@
import unittest
from io import StringIO
+from pathlib import Path
from unittest import mock
-import atest_error
-import atest_utils
-import constants
-import unittest_utils
-import unittest_constants
+# pylint: disable=import-error
+from pyfakefs import fake_filesystem_unittest
-from test_finders import test_info
-from atest_enum import FilterType
+from atest import atest_arg_parser
+from atest import atest_error
+from atest import atest_utils
+from atest import constants
+from atest import unittest_utils
+from atest import unittest_constants
+from atest.test_finders import test_info
+from atest.atest_enum import FilterType
TEST_MODULE_NAME_A = 'ModuleNameA'
TEST_RUNNER_A = 'FakeTestRunnerA'
@@ -63,7 +67,8 @@
----------------------------
'''
-#pylint: disable=protected-access
+# pylint: disable=protected-access
+# pylint: disable=too-many-public-methods
class AtestUtilsUnittests(unittest.TestCase):
"""Unit tests for atest_utils.py"""
@@ -83,49 +88,59 @@
self.assertEqual(want_list,
atest_utils._capture_fail_section(test_list))
- def test_is_test_mapping(self):
+ def test_is_test_mapping_none_test_mapping_args(self):
"""Test method is_test_mapping."""
- tm_option_attributes = [
- 'test_mapping',
- 'include_subdirs'
- ]
- for attr_to_test in tm_option_attributes:
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, attr == attr_to_test)
- args.tests = []
- args.host_unit_test_only = False
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ non_tm_args = ['--host-unit-test-only', '--smart-testing-local']
+
+ for argument in non_tm_args:
+ args = parser.parse_args([argument])
+ self.assertFalse(
+ atest_utils.is_test_mapping(args),
+ 'Option %s indicates NOT a test_mapping!' % argument)
+
+ def test_is_test_mapping_test_mapping_args(self):
+ """Test method is_test_mapping."""
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ tm_args = ['--test-mapping', '--include-subdirs']
+
+ for argument in tm_args:
+ args = parser.parse_args([argument])
self.assertTrue(
atest_utils.is_test_mapping(args),
- 'Failed to validate option %s' % attr_to_test)
+ 'Option %s indicates a test_mapping!' % argument)
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = []
- args.host_unit_test_only = True
- self.assertFalse(atest_utils.is_test_mapping(args))
+ def test_is_test_mapping_implicit_test_mapping(self):
+ """Test method is_test_mapping."""
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = [':group_name']
- args.host_unit_test_only = False
- self.assertTrue(atest_utils.is_test_mapping(args))
+ args = parser.parse_args(['--test', '--build', ':postsubmit'])
+ self.assertTrue(
+ atest_utils.is_test_mapping(args),
+ 'Option %s indicates a test_mapping!' % args)
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = [':test1', 'test2']
- args.host_unit_test_only = False
- self.assertFalse(atest_utils.is_test_mapping(args))
+ def test_is_test_mapping_with_testname(self):
+ """Test method is_test_mapping."""
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ irrelevant_args = ['--test', ':postsubmit', 'testname']
- args = mock.Mock()
- for attr in tm_option_attributes:
- setattr(args, attr, False)
- args.tests = ['test2']
- args.host_unit_test_only = False
- self.assertFalse(atest_utils.is_test_mapping(args))
+ args = parser.parse_args(irrelevant_args)
+ self.assertFalse(
+ atest_utils.is_test_mapping(args),
+ 'Option %s indicates a test_mapping!' % args)
+
+ def test_is_test_mapping_false(self):
+ """Test method is_test_mapping."""
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ args = parser.parse_args(['--test', '--build', 'hello_atest'])
+
+ self.assertFalse(
+ atest_utils.is_test_mapping(args))
def test_has_colors(self):
"""Test method _has_colors."""
@@ -145,7 +160,7 @@
self.assertTrue(atest_utils._has_colors(stream))
- @mock.patch('atest_utils._has_colors')
+ @mock.patch('atest.atest_utils._has_colors')
def test_colorize(self, mock_has_colors):
"""Test method colorize."""
original_str = "test string"
@@ -154,25 +169,24 @@
# _has_colors() return False.
mock_has_colors.return_value = False
converted_str = atest_utils.colorize(original_str, green_no,
- highlight=True)
+ bp_color=constants.RED)
self.assertEqual(original_str, converted_str)
- # Green with highlight.
+ # Green text with red background.
mock_has_colors.return_value = True
converted_str = atest_utils.colorize(original_str, green_no,
- highlight=True)
- green_highlight_string = '\x1b[1;42m%s\x1b[0m' % original_str
+ bp_color=constants.RED)
+ green_highlight_string = '\x1b[1;32;41m%s\x1b[0m' % original_str
self.assertEqual(green_highlight_string, converted_str)
- # Green, no highlight.
+ # Green text, no background.
mock_has_colors.return_value = True
- converted_str = atest_utils.colorize(original_str, green_no,
- highlight=False)
+ converted_str = atest_utils.colorize(original_str, green_no)
green_no_highlight_string = '\x1b[1;32m%s\x1b[0m' % original_str
self.assertEqual(green_no_highlight_string, converted_str)
- @mock.patch('atest_utils._has_colors')
+ @mock.patch('atest.atest_utils._has_colors')
def test_colorful_print(self, mock_has_colors):
"""Test method colorful_print."""
testing_str = "color_print_test"
@@ -182,130 +196,58 @@
mock_has_colors.return_value = False
capture_output = StringIO()
sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
+ atest_utils.colorful_print(testing_str, green_no,
+ bp_color=constants.RED,
auto_wrap=False)
sys.stdout = sys.__stdout__
uncolored_string = testing_str
self.assertEqual(capture_output.getvalue(), uncolored_string)
- # Green with highlight, but no wrap.
+ # Green text with red background, but no wrap.
mock_has_colors.return_value = True
capture_output = StringIO()
sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
+ atest_utils.colorful_print(testing_str, green_no,
+ bp_color=constants.RED,
auto_wrap=False)
sys.stdout = sys.__stdout__
- green_highlight_no_wrap_string = '\x1b[1;42m%s\x1b[0m' % testing_str
+ green_highlight_no_wrap_string = '\x1b[1;32;41m%s\x1b[0m' % testing_str
self.assertEqual(capture_output.getvalue(),
green_highlight_no_wrap_string)
- # Green, no highlight, no wrap.
+ # Green text, no background, no wrap.
mock_has_colors.return_value = True
capture_output = StringIO()
sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=False,
+ atest_utils.colorful_print(testing_str, green_no,
auto_wrap=False)
sys.stdout = sys.__stdout__
green_no_high_no_wrap_string = '\x1b[1;32m%s\x1b[0m' % testing_str
self.assertEqual(capture_output.getvalue(),
green_no_high_no_wrap_string)
- # Green with highlight and wrap.
+ # Green text with red background and wrap.
mock_has_colors.return_value = True
capture_output = StringIO()
sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=True,
+ atest_utils.colorful_print(testing_str, green_no,
+ bp_color=constants.RED,
auto_wrap=True)
sys.stdout = sys.__stdout__
- green_highlight_wrap_string = '\x1b[1;42m%s\x1b[0m\n' % testing_str
+ green_highlight_wrap_string = '\x1b[1;32;41m%s\x1b[0m\n' % testing_str
self.assertEqual(capture_output.getvalue(), green_highlight_wrap_string)
- # Green with wrap, but no highlight.
+ # Green text with wrap, but no background.
mock_has_colors.return_value = True
capture_output = StringIO()
sys.stdout = capture_output
- atest_utils.colorful_print(testing_str, green_no, highlight=False,
+ atest_utils.colorful_print(testing_str, green_no,
auto_wrap=True)
sys.stdout = sys.__stdout__
green_wrap_no_highlight_string = '\x1b[1;32m%s\x1b[0m\n' % testing_str
self.assertEqual(capture_output.getvalue(),
green_wrap_no_highlight_string)
- @mock.patch('socket.gethostname')
- @mock.patch('subprocess.check_output')
- def test_is_external_run(self, mock_output, mock_hostname):
- """Test method is_external_run."""
- mock_output.return_value = ''
- mock_hostname.return_value = ''
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.google.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'abc.google.def.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@google.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'c.googlers.com'
- self.assertFalse(atest_utils.is_external_run())
-
- mock_output.return_value = 'test@other.com'
- mock_hostname.return_value = 'a.googlers.com'
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.side_effect = OSError()
- self.assertTrue(atest_utils.is_external_run())
-
- mock_output.side_effect = subprocess.CalledProcessError(1, 'cmd')
- self.assertTrue(atest_utils.is_external_run())
-
- @mock.patch('metrics.metrics_base.get_user_type')
- def test_print_data_collection_notice(self, mock_get_user_type):
- """Test method print_data_collection_notice."""
-
- # get_user_type return 1(external).
- mock_get_user_type.return_value = 1
- notice_str = ('\n==================\nNotice:\n'
- ' We collect anonymous usage statistics'
- ' in accordance with our'
- ' Content Licenses (https://source.android.com/setup/start/licenses),'
- ' Contributor License Agreement (https://opensource.google.com/docs/cla/),'
- ' Privacy Policy (https://policies.google.com/privacy) and'
- ' Terms of Service (https://policies.google.com/terms).'
- '\n==================\n\n')
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.print_data_collection_notice()
- sys.stdout = sys.__stdout__
- uncolored_string = notice_str
- self.assertEqual(capture_output.getvalue(), uncolored_string)
-
- # get_user_type return 0(internal).
- mock_get_user_type.return_value = 0
- notice_str = ('\n==================\nNotice:\n'
- ' We collect usage statistics'
- ' in accordance with our'
- ' Content Licenses (https://source.android.com/setup/start/licenses),'
- ' Contributor License Agreement (https://cla.developers.google.com/),'
- ' Privacy Policy (https://policies.google.com/privacy) and'
- ' Terms of Service (https://policies.google.com/terms).'
- '\n==================\n\n')
- capture_output = StringIO()
- sys.stdout = capture_output
- atest_utils.print_data_collection_notice()
- sys.stdout = sys.__stdout__
- uncolored_string = notice_str
- self.assertEqual(capture_output.getvalue(), uncolored_string)
-
@mock.patch('builtins.input')
@mock.patch('json.load')
def test_update_test_runner_cmd(self, mock_json_load_data, mock_input):
@@ -401,6 +343,7 @@
build_top = '/home/a/b/c'
rel_path = 'd/e'
mock_cwd.return_value = os.path.join(build_top, rel_path)
+ # TODO: (b/264015241) Stop mocking build variables.
os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top}
with mock.patch.dict('os.environ', os_environ_mock, clear=True):
expected_cmd = ['../../build/soong/soong_ui.bash', '--make-mode']
@@ -461,7 +404,7 @@
@mock.patch('os.chmod')
@mock.patch('shutil.copy2')
- @mock.patch('atest_utils.has_valid_cert')
+ @mock.patch('atest.atest_utils.has_valid_cert')
@mock.patch('subprocess.check_output')
@mock.patch('os.path.exists')
def test_get_flakes(self, mock_path_exists, mock_output, mock_valid_cert,
@@ -490,9 +433,9 @@
# raise subprocess.CalledProcessError
mock_call.raiseError.side_effect = subprocess.CalledProcessError
self.assertFalse(atest_utils.has_valid_cert())
- with mock.patch("constants.CERT_STATUS_CMD", ''):
+ with mock.patch("atest.constants.CERT_STATUS_CMD", ''):
self.assertFalse(atest_utils.has_valid_cert())
- with mock.patch("constants.CERT_STATUS_CMD", 'CMD'):
+ with mock.patch("atest.constants.CERT_STATUS_CMD", 'CMD'):
# has valid cert
mock_call.return_value = 0
self.assertTrue(atest_utils.has_valid_cert())
@@ -503,52 +446,84 @@
# pylint: disable=no-member
def test_read_test_record_proto(self):
"""Test method read_test_record."""
- test_record_file_path = os.path.join(unittest_constants.TEST_DATA_DIR,
- "test_record.proto.testonly")
+ test_record_file_path = os.path.join(
+ unittest_constants.TEST_DATA_DIR,
+ "test_record.proto.testonly")
test_record = atest_utils.read_test_record(test_record_file_path)
- self.assertEqual(test_record.children[0].inline_test_record.test_record_id,
- 'x86 hello_world_test')
+ self.assertEqual(
+ test_record.children[0].inline_test_record.test_record_id,
+ 'x86 hello_world_test')
- def test_is_valid_json_file_file_not_exist(self):
- """Test method is_valid_json_file if file not exist."""
- json_file_path = os.path.join(unittest_constants.TEST_DATA_DIR,
- "not_exist.json")
- self.assertFalse(atest_utils.is_valid_json_file(json_file_path))
+ def test_load_json_safely_file_inexistent(self):
+ """Test method load_json_safely if file does not exist."""
+ json_file_path = Path(
+ unittest_constants.TEST_DATA_DIR).joinpath("not_exist.json")
+ self.assertEqual({}, atest_utils.load_json_safely(json_file_path))
- def test_is_valid_json_file_content_valid(self):
- """Test method is_valid_json_file if file exist and content is valid."""
- json_file_path = os.path.join(unittest_constants.TEST_DATA_DIR,
- "module-info.json")
- self.assertTrue(atest_utils.is_valid_json_file(json_file_path))
+ def test_load_json_safely_valid_json_format(self):
+ """Test method load_json_safely if file exists and format is valid."""
+ json_file_path = Path(
+ unittest_constants.TEST_DATA_DIR).joinpath("module-info.json")
+ content = atest_utils.load_json_safely(json_file_path)
+ self.assertEqual('MainModule1', content.get('MainModule1').get('module_name'))
+ self.assertEqual([], content.get('MainModule2').get('test_mainline_modules'))
- def test_is_valid_json_file_content_not_valid(self):
- """Test method is_valid_json_file if file exist but content is valid."""
- json_file_path = os.path.join(unittest_constants.TEST_DATA_DIR,
- "not-valid-module-info.json")
- self.assertFalse(atest_utils.is_valid_json_file(json_file_path))
+ def test_load_json_safely_invalid_json_format(self):
+ """Test method load_json_safely if file exist but content is invalid."""
+ json_file_path = Path(
+ unittest_constants.TEST_DATA_DIR).joinpath("not-valid-module-info.json")
+ self.assertEqual({}, atest_utils.load_json_safely(json_file_path))
- @mock.patch('subprocess.Popen')
@mock.patch('os.getenv')
- def test_get_manifest_branch(self, mock_env, mock_popen):
+ def test_get_manifest_branch(self, mock_env):
"""Test method get_manifest_branch"""
- mock_env.return_value = 'any_path'
- process = mock_popen.return_value
- process.communicate.return_value = (REPO_INFO_OUTPUT, '')
- self.assertEqual('test_branch', atest_utils.get_manifest_branch())
+ build_top = tempfile.TemporaryDirectory()
+ mock_env.return_value = build_top.name
+ repo_dir = Path(build_top.name).joinpath('.repo')
+ portal_xml = repo_dir.joinpath('manifest.xml')
+ manifest_dir = repo_dir.joinpath('manifests')
+ target_xml = manifest_dir.joinpath('Default.xml')
+ repo_dir.mkdir()
+ manifest_dir.mkdir()
+ content_portal = '<manifest><include name="Default.xml" /></manifest>'
+ content_manifest = '''<manifest>
+ <remote name="aosp" fetch=".." review="https://android-review.googlesource.com/" />
+ <default revision="MONSTER-dev" remote="aosp" sync-j="4" />
+ </manifest>'''
- mock_env.return_value = 'any_path'
- process.communicate.return_value = ('not_matched_branch_pattern.', '')
- self.assertEqual(None, atest_utils.get_manifest_branch())
+ # 1. The manifest.xml(portal) contains 'include' directive: 'Default.xml'.
+ # Search revision in .repo/manifests/Default.xml.
+ with open(portal_xml, 'w') as cache:
+ cache.write(content_portal)
+ with open(target_xml, 'w') as cache:
+ cache.write(content_manifest)
+ self.assertEqual("MONSTER-dev", atest_utils.get_manifest_branch())
+ self.assertEqual("aosp-MONSTER-dev", atest_utils.get_manifest_branch(True))
+ os.remove(target_xml)
+ os.remove(portal_xml)
- mock_env.return_value = 'any_path'
- process.communicate.side_effect = subprocess.TimeoutExpired(
- 1,
- 'repo info')
- self.assertEqual(None, atest_utils.get_manifest_branch())
+ # 2. The manifest.xml contains neither 'include' nor 'revision' directive,
+ # keep searching revision in .repo/manifests/default.xml by default.
+ with open(portal_xml, 'w') as cache:
+ cache.write('<manifest></manifest>')
+ default_xml = manifest_dir.joinpath('default.xml')
+ with open(default_xml, 'w') as cache:
+ cache.write(content_manifest)
+ self.assertEqual("MONSTER-dev", atest_utils.get_manifest_branch())
+ os.remove(default_xml)
+ os.remove(portal_xml)
- mock_env.return_value = None
- process.communicate.return_value = (REPO_INFO_OUTPUT, '')
- self.assertEqual(None, atest_utils.get_manifest_branch())
+ # 3. revision was directly defined in 'manifest.xml'.
+ with open(portal_xml, 'w') as cache:
+ cache.write(content_manifest)
+ self.assertEqual("MONSTER-dev", atest_utils.get_manifest_branch())
+ os.remove(portal_xml)
+
+ # 4. Return None if the included xml does not exist.
+ with open(portal_xml, 'w') as cache:
+ cache.write(content_portal)
+ self.assertEqual('', atest_utils.get_manifest_branch())
+ os.remove(portal_xml)
def test_has_wildcard(self):
"""Test method of has_wildcard"""
@@ -609,7 +584,7 @@
inexist_string = os.path.join(unittest_constants.TEST_DATA_DIR,
unittest_constants.CLASS_NAME)
self.assertEqual(
- atest_utils.md5sum(exist_string), 'f02c1a648f16e5e9d7035bb11486ac2b')
+ atest_utils.md5sum(exist_string), '062160df00c20b1ee4d916b7baf71346')
self.assertEqual(
atest_utils.md5sum(inexist_string), '')
@@ -749,5 +724,131 @@
expect_types = set([FilterType.WILDCARD_FILTER.value])
self.assertEqual(atest_utils.get_filter_types(filters), expect_types)
+ def test_get_bp_content(self):
+ """Method get_bp_content."""
+ # 1. "manifest" and "instrumentation_for" are defined.
+ content = '''android_test {
+ // comment
+ instrumentation_for: "AmSlam", // comment
+ manifest: "AndroidManifest-test.xml",
+ name: "AmSlamTests",
+ }'''
+ expected_result = {"AmSlamTests":
+ {"target_module": "AmSlam", "manifest": "AndroidManifest-test.xml"}}
+ temp_dir = tempfile.TemporaryDirectory()
+ tmpbp = Path(temp_dir.name).joinpath('Android.bp')
+ with open(tmpbp, 'w') as cache:
+ cache.write(content)
+ self.assertEqual(atest_utils.get_bp_content(tmpbp, 'android_test'),
+ expected_result)
+ temp_dir.cleanup()
+
+ # 2. Only name is defined, will give default manifest and null target_module.
+ content = '''android_app {
+ // comment
+ name: "AmSlam",
+ srcs: ["src1.java", "src2.java"]
+ }'''
+ expected_result = {"AmSlam":
+ {"target_module": "", "manifest": "AndroidManifest.xml"}}
+ temp_dir = tempfile.TemporaryDirectory()
+ tmpbp = Path(temp_dir.name).joinpath('Android.bp')
+ with open(tmpbp, 'w') as cache:
+ cache.write(content)
+ self.assertEqual(atest_utils.get_bp_content(tmpbp, 'android_app'),
+ expected_result)
+ temp_dir.cleanup()
+
+ # 3. Not even an Android.bp.
+ content = '''LOCAL_PATH := $(call my-dir)
+ # comment
+ include $(call all-subdir-makefiles)
+ LOCAL_MODULE := atest_foo_test
+ }'''
+ temp_dir = tempfile.TemporaryDirectory()
+ tmpbp = Path(temp_dir.name).joinpath('Android.mk')
+ with open(tmpbp, 'w') as cache:
+ cache.write(content)
+ self.assertEqual(atest_utils.get_bp_content(tmpbp, 'android_app'), {})
+ temp_dir.cleanup()
+
+ def test_get_manifest_info(self):
+ """test get_manifest_info method."""
+ # An instrumentation test:
+ test_xml = os.path.join(unittest_constants.TEST_DATA_DIR,
+ 'foo/bar/AmSlam/test/AndroidManifest.xml')
+ expected = {
+ 'package': 'com.android.settings.tests.unit',
+ 'target_package': 'c0m.andr0id.settingS',
+ 'persistent': False
+ }
+ self.assertEqual(expected, atest_utils.get_manifest_info(test_xml))
+
+ # A target module:
+ target_xml = os.path.join(unittest_constants.TEST_DATA_DIR,
+ 'foo/bar/AmSlam/AndroidManifest.xml')
+ expected = {
+ 'package': 'c0m.andr0id.settingS',
+ 'target_package': '',
+ 'persistent': False
+ }
+ self.assertEqual(expected, atest_utils.get_manifest_info(target_xml))
+
+# pylint: disable=missing-function-docstring
+class AutoShardUnittests(fake_filesystem_unittest.TestCase):
+ """Tests for auto shard functions"""
+ def setUp(self):
+ self.setUpPyfakefs()
+
+ def test_get_local_auto_shardable_tests(self):
+ """test get local auto shardable list"""
+ shardable_tests_file = Path(atest_utils.get_misc_dir()).joinpath(
+ '.atest/auto_shard/local_auto_shardable_tests')
+
+ self.fs.create_file(shardable_tests_file, contents='abc\ndef')
+
+ long_duration_tests = atest_utils.get_local_auto_shardable_tests()
+
+ expected_list = ['abc', 'def']
+ self.assertEqual(long_duration_tests , expected_list)
+
+ def test_update_shardable_tests_with_time_less_than_600(self):
+ """test update local auto shardable list"""
+ shardable_tests_file = Path(atest_utils.get_misc_dir()).joinpath(
+ '.atest/auto_shard/local_auto_shardable_tests')
+
+ self.fs.create_file(shardable_tests_file, contents='')
+
+ atest_utils.update_shardable_tests('test1', 10)
+
+ with open(shardable_tests_file) as f:
+ self.assertEqual('', f.read())
+
+ def test_update_shardable_tests_with_time_larger_than_600(self):
+ """test update local auto shardable list"""
+ shardable_tests_file = Path(atest_utils.get_misc_dir()).joinpath(
+ '.atest/auto_shard/local_auto_shardable_tests')
+
+ self.fs.create_file(shardable_tests_file, contents='')
+
+ atest_utils.update_shardable_tests('test2', 1000)
+
+ with open(shardable_tests_file) as f:
+ self.assertEqual('test2', f.read())
+
+ def test_update_shardable_tests_with_time_larger_than_600_twice(self):
+ """test update local auto shardable list"""
+ shardable_tests_file = Path(atest_utils.get_misc_dir()).joinpath(
+ '.atest/auto_shard/local_auto_shardable_tests')
+ # access the fake_filesystem object via fake_fs
+ self.fs.create_file(shardable_tests_file, contents='')
+
+ atest_utils.update_shardable_tests('test3', 1000)
+ atest_utils.update_shardable_tests('test3', 601)
+
+ with open(shardable_tests_file) as f:
+ self.assertEqual('test3', f.read())
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/atest/bazel/WORKSPACE b/atest/bazel/WORKSPACE
deleted file mode 100644
index 996c058..0000000
--- a/atest/bazel/WORKSPACE
+++ /dev/null
@@ -1,3 +0,0 @@
-register_toolchains(
- "//prebuilts/build-tools:py_toolchain"
-)
diff --git a/atest/bazel/configs/rbe/config/BUILD b/atest/bazel/configs/rbe/config/BUILD
deleted file mode 100755
index 3816269..0000000
--- a/atest/bazel/configs/rbe/config/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-package(default_visibility = ["//visibility:public"])
-
-platform(
- name = "platform",
- constraint_values = [
- "@bazel_tools//platforms:linux",
- "@bazel_tools//platforms:x86_64",
- ],
- exec_properties = {
- "container-image": "docker://l.gcr.io/google/rbe-ubuntu16-04@sha256:f6568d8168b14aafd1b707019927a63c2d37113a03bcee188218f99bd0327ea1",
- "OSFamily": "Linux",
- },
- parents = ["@local_config_platform//:host"],
-)
diff --git a/atest/bazel/reporter/Android.bp b/atest/bazel/reporter/Android.bp
index f9a1e63..74f8cc7 100644
--- a/atest/bazel/reporter/Android.bp
+++ b/atest/bazel/reporter/Android.bp
@@ -22,6 +22,8 @@
"src/com/android/tradefed/result/BazelExitCodeResultReporter.java",
"src/com/android/tradefed/result/BazelXmlResultReporter.java"
],
+ // b/267831518: Pin tradefed and dependencies to Java 11.
+ java_version: "11",
libs: [
"tradefed",
],
diff --git a/atest/bazel/reporter/BUILD.bazel b/atest/bazel/reporter/BUILD.bazel
new file mode 100644
index 0000000..24c6805
--- /dev/null
+++ b/atest/bazel/reporter/BUILD.bazel
@@ -0,0 +1,18 @@
+package(default_visibility = ["//visibility:public"])
+
+java_library(
+ name = "bazel-result-reporter",
+ srcs = glob(["src/**/*.java"]),
+ target_compatible_with = ["//build/bazel/platforms/os:linux"],
+ deps = [
+ ":tradefed",
+ ],
+)
+
+java_import(
+ name = "tradefed",
+ jars = [
+ "//tools/tradefederation/prebuilts/filegroups/tradefed:tradefed-prebuilt",
+ ],
+ target_compatible_with = ["//build/bazel/platforms/os:linux"],
+)
diff --git a/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java b/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
index 59ef2e9..5537aeb 100644
--- a/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
+++ b/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
@@ -54,7 +54,7 @@
reporter.invocationStarted(DEFAULT_CONTEXT);
reporter.invocationEnded(1);
- assertFileContentsEquals("4", exitCodeFile);
+ assertFileContentsEquals("0", exitCodeFile);
}
@Test
diff --git a/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java b/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
index 840818e..cbf9621 100644
--- a/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
+++ b/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
@@ -49,7 +49,6 @@
private boolean mHasRunFailures;
private boolean mHasTestFailures;
- private int mTestCount = 0;
@VisibleForTesting
BazelExitCodeResultReporter(FileSystem fs) {
@@ -61,21 +60,6 @@
}
@Override
- public void testRunStarted(String name, int numTests) {
- testRunStarted(name, numTests, 0);
- }
-
- @Override
- public void testRunStarted(String name, int numTests, int attemptNumber) {
- testRunStarted(name, numTests, attemptNumber, System.currentTimeMillis());
- }
-
- @Override
- public void testRunStarted(String name, int numTests, int attemptNumber, long startTime) {
- mTestCount += numTests;
- }
-
- @Override
public void testRunFailed(String errorMessage) {
mHasRunFailures = true;
}
@@ -127,18 +111,12 @@
return ExitCode.TESTS_FAILED;
}
- // Return NO_TESTS_FOUND only when there are no run failures.
- if (mTestCount == 0) {
- return ExitCode.NO_TESTS_FOUND;
- }
-
return ExitCode.SUCCESS;
}
private enum ExitCode {
SUCCESS(0),
TESTS_FAILED(3),
- NO_TESTS_FOUND(4),
RUN_FAILURE(6);
private final int value;
diff --git a/atest/bazel/resources/WORKSPACE b/atest/bazel/resources/WORKSPACE
new file mode 100644
index 0000000..55e72ed
--- /dev/null
+++ b/atest/bazel/resources/WORKSPACE
@@ -0,0 +1,12 @@
+register_toolchains(
+ "//prebuilts/build-tools:py_toolchain",
+ "//prebuilts/jdk/jdk17:runtime_toolchain_definition",
+)
+
+# `device_infra` repository provides rules needed to start cuttlefish devices
+# remotely. This repository is loaded when Bazel needs a target from it,
+# otherwise won't load.
+local_repository(
+ name = "device_infra",
+ path = "vendor/google/tools/atest/device_infra",
+)
diff --git a/atest/bazel/resources/bazel.sh b/atest/bazel/resources/bazel.sh
new file mode 100755
index 0000000..66fcfce
--- /dev/null
+++ b/atest/bazel/resources/bazel.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Script to run Bazel in AOSP.
+#
+# This script sets up startup and environment variables to run Bazel with the
+# AOSP JDK.
+#
+# Usage: bazel.sh [<startup options>] <command> [<args>]
+
+set -eo pipefail
+
+SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
+
+JDK_PATH="${SCRIPT_DIR}"/prebuilts/jdk/jdk17/linux-x86
+BAZEL_BINARY="${SCRIPT_DIR}"/prebuilts/bazel/linux-x86_64/bazel
+
+PROCESS_PATH="${JDK_PATH}"/bin:"${PATH}"
+
+JAVA_HOME="${JDK_PATH}" \
+PATH="${PROCESS_PATH}" \
+ "${BAZEL_BINARY}" \
+ --server_javabase="${JDK_PATH}" \
+ "$@"
diff --git a/atest/bazel/bazelrc b/atest/bazel/resources/bazelrc
similarity index 76%
rename from atest/bazel/bazelrc
rename to atest/bazel/resources/bazelrc
index 3e98d92..b052d94 100644
--- a/atest/bazel/bazelrc
+++ b/atest/bazel/resources/bazelrc
@@ -5,21 +5,19 @@
# Show the full set of flags for observability and debuggability.
common --announce_rc
+# Enforce consistent action environment variables to improve remote cache hit
+# rate.
+build --incompatible_strict_action_env
+
+# Use the JDK defined by local_java_runtime in //prebuilts/jdk/jdk<VERSION>
+build --java_runtime_version=jdk17
+
# Depending on how many machines are in the remote execution instance, setting
# this higher can make builds faster by allowing more jobs to run in parallel.
# Setting it too high can result in jobs that timeout, however, while waiting
# for a remote machine to execute them.
build:remote --jobs=200
-# Set the Java used to launch Tradefed.
-build:remote --action_env=TF_JAVA_HOME=/usr/lib/jvm/11.29.3-ca-jdk11.0.2/reduced
-
-# Set a host platform specifying the Docker container image used by the RBE
-# instance.
-# See https://docs.bazel.build/versions/master/platforms.html for more about
-# platforms.
-build:remote --host_platform=//bazel/configs/rbe/config:platform
-
# Enable the remote cache so that action results can be shared across machines,
# developers, and workspaces.
build:remote --remote_cache=grpcs://remotebuildexecution.googleapis.com
diff --git a/atest/bazel/resources/configs/rbe/config/BUILD b/atest/bazel/resources/configs/rbe/config/BUILD
new file mode 100755
index 0000000..0bf601f
--- /dev/null
+++ b/atest/bazel/resources/configs/rbe/config/BUILD
@@ -0,0 +1,15 @@
+package(default_visibility = ["//visibility:public"])
+
+platform(
+ name = "platform",
+ constraint_values = [
+ "@platforms//os:linux",
+ "@platforms//cpu:x86_64",
+ ],
+ exec_properties = {
+ "container-image": "docker://gcr.io/cloud-marketplace/google/rbe-ubuntu18-04@sha256:48b67b41118dbcdfc265e7335f454fbefa62681ab8d47200971fc7a52fb32054",
+ "gceMachineType": "e2-standard-16",
+ "OSFamily": "Linux",
+ },
+ parents = ["@local_config_platform//:host"],
+)
diff --git a/atest/bazel/resources/device_def/BUILD.bazel b/atest/bazel/resources/device_def/BUILD.bazel
new file mode 100644
index 0000000..3926fc5
--- /dev/null
+++ b/atest/bazel/resources/device_def/BUILD.bazel
@@ -0,0 +1,21 @@
+load("//bazel/rules:soong_prebuilt.bzl", "soong_prebuilt")
+load("//bazel/rules/device:cuttlefish_device.bzl", "cuttlefish_device")
+load("@device_infra//remote_device:download_cvd_artifact.bzl", "build_id", "download_cvd_artifact")
+
+package(default_visibility = ["//visibility:public"])
+
+build_id(
+ name = "cvd_build_id",
+ build_setting_default = "",
+)
+
+download_cvd_artifact(
+ name = "cvd_artifacts",
+ build_id = ":cvd_build_id",
+)
+
+cuttlefish_device(
+ name = "cf_x86_64_phone",
+ out = "android_cuttlefish.sh",
+ build_files = ":cvd_artifacts",
+)
diff --git a/atest/bazel/format_as_soong_module_name.cquery b/atest/bazel/resources/format_as_soong_module_name.cquery
similarity index 75%
rename from atest/bazel/format_as_soong_module_name.cquery
rename to atest/bazel/resources/format_as_soong_module_name.cquery
index 401d31c..7d784b5 100644
--- a/atest/bazel/format_as_soong_module_name.cquery
+++ b/atest/bazel/resources/format_as_soong_module_name.cquery
@@ -6,5 +6,5 @@
soong_prebuilt_info = p.get(
"//bazel/rules:soong_prebuilt.bzl%SoongPrebuiltInfo")
if soong_prebuilt_info:
- return soong_prebuilt_info.module_name
+ return "%s:%s" % (soong_prebuilt_info.module_name, soong_prebuilt_info.platform_flavor)
return ""
diff --git a/atest/bazel/resources/rules/BUILD.bazel b/atest/bazel/resources/rules/BUILD.bazel
new file mode 100644
index 0000000..00cbb2b
--- /dev/null
+++ b/atest/bazel/resources/rules/BUILD.bazel
@@ -0,0 +1,40 @@
+load("//bazel/rules:common_settings.bzl", "string_flag")
+load("//bazel/rules:common_settings.bzl", "string_list_flag")
+load("//bazel/rules/device:single_local_device.bzl", "local_device")
+
+package(default_visibility = ["//visibility:public"])
+
+string_flag(
+ name = "platform_flavor",
+ build_setting_default = "",
+)
+
+local_device(
+ name = "local_device",
+ out = "single_local_device.sh",
+)
+
+label_flag(
+ name = "target_device",
+ build_setting_default = ":local_device",
+)
+
+string_list_flag(
+ name = "extra_tradefed_result_reporters",
+ build_setting_default = [],
+)
+
+config_setting(
+ name = "device",
+ flag_values = {":platform_flavor": "device"},
+)
+
+config_setting(
+ name = "host",
+ flag_values = {":platform_flavor": "host"},
+)
+
+exports_files([
+ "tradefed_test.sh.template",
+ "device_test.sh.template",
+])
diff --git a/atest/bazel/rules/common_settings.bzl b/atest/bazel/resources/rules/common_settings.bzl
similarity index 100%
rename from atest/bazel/rules/common_settings.bzl
rename to atest/bazel/resources/rules/common_settings.bzl
diff --git a/atest/bazel/resources/rules/device/BUILD.bazel b/atest/bazel/resources/rules/device/BUILD.bazel
new file mode 100644
index 0000000..3c25c29
--- /dev/null
+++ b/atest/bazel/resources/rules/device/BUILD.bazel
@@ -0,0 +1,6 @@
+package(default_visibility = ["//visibility:public"])
+
+exports_files([
+ "create_cuttlefish.sh.template",
+ "single_local_device.sh",
+])
diff --git a/atest/bazel/resources/rules/device/create_cuttlefish.sh.template b/atest/bazel/resources/rules/device/create_cuttlefish.sh.template
new file mode 100644
index 0000000..c90b3ae
--- /dev/null
+++ b/atest/bazel/resources/rules/device/create_cuttlefish.sh.template
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+DEVICE_IMAGE_PATH="{img_path}"
+DEVICE_IMAGE_DIR=$(dirname "$DEVICE_IMAGE_PATH")
+CVD_HOST_PACKAGE_PATH="{cvd_host_package_path}"
+
+PATH_ADDITIONS="{path_additions}"
+TEST_EXECUTABLE="$1"
+shift
+
+LOCAL_TOOL="$(dirname "$CVD_HOST_PACKAGE_PATH")"
+
+user="$(whoami)"
+
+su - << EOF
+export PATH="${LOCAL_TOOL}:${PATH_ADDITIONS}:${PATH}"
+/usr/sbin/service rsyslog restart
+/etc/init.d/cuttlefish-common start
+/usr/sbin/usermod -aG kvm "${USER}"
+
+pushd "${LOCAL_TOOL}"
+tar xvf "${CVD_HOST_PACKAGE_PATH}"
+popd
+
+pushd "${DEVICE_IMAGE_DIR}"
+unzip -o "${DEVICE_IMAGE_PATH}"
+popd
+
+HOME="${LOCAL_TOOL}" "${LOCAL_TOOL}"/bin/launch_cvd \
+ -daemon \
+ -config=phone \
+ -system_image_dir "${DEVICE_IMAGE_DIR}" \
+ -undefok=report_anonymous_usage_stats,config \
+ -report_anonymous_usage_stats=y \
+ -instance_dir=/tmp/cvd \
+ -guest_enforce_security=false
+adb connect localhost:6520
+exit
+EOF
+
+"${TEST_EXECUTABLE}" "$@"
\ No newline at end of file
diff --git a/atest/bazel/resources/rules/device/cuttlefish_device.bzl b/atest/bazel/resources/rules/device/cuttlefish_device.bzl
new file mode 100644
index 0000000..2432e82
--- /dev/null
+++ b/atest/bazel/resources/rules/device/cuttlefish_device.bzl
@@ -0,0 +1,82 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rule used to generate a Cuttlefish device environment.
+
+This rule creates a device environment rule to run tests on a Cuttlefish Android
+Virtual Device. Test targets that run in this environment will start a new
+dedicated virtual device for each execution.
+
+Device properties such as the image used can be configured via an attribute.
+"""
+
+load("//bazel/rules:platform_transitions.bzl", "host_transition")
+load("//bazel/rules:device_test.bzl", "DeviceEnvironment")
+load("@device_infra//remote_device:download_cvd_artifact.bzl", "ImageProvider")
+load(
+ "//:constants.bzl",
+ "adb_label",
+)
+
+_BAZEL_WORK_DIR = "${TEST_SRCDIR}/${TEST_WORKSPACE}/"
+
+def _cuttlefish_device_impl(ctx):
+ path_additions = [_BAZEL_WORK_DIR + ctx.file._adb.dirname]
+ image_file = ctx.attr.build_files[ImageProvider].image
+ cvd_host_file = ctx.attr.build_files[ImageProvider].cvd_host_package
+ ctx.actions.expand_template(
+ template = ctx.file._create_script_template,
+ output = ctx.outputs.out,
+ is_executable = True,
+ substitutions = {
+ "{img_path}": _BAZEL_WORK_DIR + image_file.short_path,
+ "{cvd_host_package_path}": _BAZEL_WORK_DIR + cvd_host_file.short_path,
+ "{path_additions}": ":".join(path_additions),
+ },
+ )
+
+ return DeviceEnvironment(
+ runner = depset([ctx.outputs.out]),
+ data = ctx.runfiles(files = [
+ cvd_host_file,
+ ctx.outputs.out,
+ image_file,
+ ]),
+ )
+
+cuttlefish_device = rule(
+ attrs = {
+ "build_files": attr.label(
+ providers = [ImageProvider],
+ mandatory = True,
+ ),
+ "out": attr.output(mandatory = True),
+ "_create_script_template": attr.label(
+ default = "//bazel/rules/device:create_cuttlefish.sh.template",
+ allow_single_file = True,
+ ),
+ # This attribute is required to use Starlark transitions. It allows
+ # allowlisting usage of this rule. For more information, see
+ # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
+ "_allowlist_function_transition": attr.label(
+ default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
+ ),
+ "_adb": attr.label(
+ default = adb_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ ),
+ },
+ implementation = _cuttlefish_device_impl,
+)
diff --git a/atest/bazel/resources/rules/device/single_local_device.bzl b/atest/bazel/resources/rules/device/single_local_device.bzl
new file mode 100644
index 0000000..552d1e6
--- /dev/null
+++ b/atest/bazel/resources/rules/device/single_local_device.bzl
@@ -0,0 +1,24 @@
+load("//bazel/rules:device_test.bzl", "DeviceEnvironment")
+
+def _local_device_impl(ctx):
+ ctx.actions.expand_template(
+ template = ctx.file._source_script,
+ output = ctx.outputs.out,
+ is_executable = True,
+ )
+
+ return DeviceEnvironment(
+ runner = depset([ctx.outputs.out]),
+ data = ctx.runfiles(files = [ctx.outputs.out]),
+ )
+
+local_device = rule(
+ attrs = {
+ "_source_script": attr.label(
+ default = "//bazel/rules/device:single_local_device.sh",
+ allow_single_file = True,
+ ),
+ "out": attr.output(mandatory = True),
+ },
+ implementation = _local_device_impl,
+)
diff --git a/atest/bazel/resources/rules/device/single_local_device.sh b/atest/bazel/resources/rules/device/single_local_device.sh
new file mode 100644
index 0000000..c0083c9
--- /dev/null
+++ b/atest/bazel/resources/rules/device/single_local_device.sh
@@ -0,0 +1,3 @@
+TEST_EXECUTABLE="$1"
+shift
+"${TEST_EXECUTABLE}" "$@"
\ No newline at end of file
diff --git a/atest/bazel/resources/rules/device_test.bzl b/atest/bazel/resources/rules/device_test.bzl
new file mode 100644
index 0000000..7ae559c
--- /dev/null
+++ b/atest/bazel/resources/rules/device_test.bzl
@@ -0,0 +1,74 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rules used to run device tests"""
+
+_TEST_SRCDIR = "${TEST_SRCDIR}"
+_BAZEL_WORK_DIR = "%s/${TEST_WORKSPACE}/" % _TEST_SRCDIR
+_PY_TOOLCHAIN = "@bazel_tools//tools/python:toolchain_type"
+_TOOLCHAINS = [_PY_TOOLCHAIN]
+
+DeviceEnvironment = provider(
+ "Represents the environment a test will run under. Concretely this is an " +
+ "executable and any runfiles required to trigger execution in the " +
+ "environment.",
+ fields = {
+ "runner": "depset of executable to to setup test environment and execute test.",
+ "data": "runfiles of all needed artifacts in the executable.",
+ },
+)
+
+def device_test_impl(ctx):
+ runner_script = _BAZEL_WORK_DIR + ctx.attr.run_with[DeviceEnvironment].runner.to_list()[0].short_path
+ test_script = _BAZEL_WORK_DIR + ctx.file.test.short_path
+ script = ctx.actions.declare_file("device_test_%s.sh" % ctx.label.name)
+ path_additions = []
+
+ ctx.actions.expand_template(
+ template = ctx.file._device_test_template,
+ output = script,
+ is_executable = True,
+ substitutions = {
+ "{runner}": runner_script,
+ "{test_script}": test_script,
+ },
+ )
+
+ test_runfiles = ctx.runfiles().merge(
+ ctx.attr.test[DefaultInfo].default_runfiles,
+ )
+ device_runfiles = ctx.runfiles().merge(
+ ctx.attr.run_with[DeviceEnvironment].data,
+ )
+ all_runfiles = test_runfiles.merge_all([device_runfiles])
+ return [DefaultInfo(
+ executable = script,
+ runfiles = all_runfiles,
+ )]
+
+device_test = rule(
+ attrs = {
+ "run_with": attr.label(default = "//bazel/rules:target_device"),
+ "test": attr.label(
+ allow_single_file = True,
+ ),
+ "_device_test_template": attr.label(
+ default = "//bazel/rules:device_test.sh.template",
+ allow_single_file = True,
+ ),
+ },
+ test = True,
+ implementation = device_test_impl,
+ doc = "Runs a test under a device environment",
+)
diff --git a/atest/bazel/resources/rules/device_test.sh.template b/atest/bazel/resources/rules/device_test.sh.template
new file mode 100644
index 0000000..c0e5480
--- /dev/null
+++ b/atest/bazel/resources/rules/device_test.sh.template
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -e
+set -x
+
+RUNNER_EXECUTABLE="{runner}"
+TEST_EXECUTABLE="{test_script}"
+
+if [ -z "$RUNNER_EXECUTABLE" ]
+then
+ echo "No devices setup script"
+else
+ echo "There is devices setup script"
+ $RUNNER_EXECUTABLE $TEST_EXECUTABLE
+fi
\ No newline at end of file
diff --git a/atest/bazel/rules/platform_transitions.bzl b/atest/bazel/resources/rules/platform_transitions.bzl
similarity index 100%
rename from atest/bazel/rules/platform_transitions.bzl
rename to atest/bazel/resources/rules/platform_transitions.bzl
diff --git a/atest/bazel/resources/rules/soong_prebuilt.bzl b/atest/bazel/resources/rules/soong_prebuilt.bzl
new file mode 100644
index 0000000..d2cd475
--- /dev/null
+++ b/atest/bazel/resources/rules/soong_prebuilt.bzl
@@ -0,0 +1,226 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rule used to import artifacts prebuilt by Soong into the Bazel workspace.
+
+The rule returns a DefaultInfo provider with all artifacts and runtime dependencies,
+and a SoongPrebuiltInfo provider with the original Soong module name, artifacts,
+runtime dependencies and data dependencies.
+"""
+
+load("//bazel/rules:platform_transitions.bzl", "device_transition")
+load("//bazel/rules:common_settings.bzl", "BuildSettingInfo")
+
+SoongPrebuiltInfo = provider(
+ doc = "Info about a prebuilt Soong build module",
+ fields = {
+ "module_name": "Name of the original Soong build module",
+ # This field contains this target's outputs and all runtime dependency
+ # outputs.
+ "transitive_runtime_outputs": "Files required in the runtime environment",
+ "transitive_test_files": "Files of test modules",
+ "platform_flavor": "The platform flavor that this target will be built on",
+ },
+)
+
+def _soong_prebuilt_impl(ctx):
+ files = ctx.files.files
+
+ # Ensure that soong_prebuilt targets always have at least one file to avoid
+ # evaluation errors when running Bazel cquery on a clean tree to find
+ # dependencies.
+ #
+ # This happens because soong_prebuilt dependency target globs don't match
+ # any files when the workspace symlinks are broken and point to build
+ # artifacts that still don't exist. This in turn causes errors in rules
+ # that reference these targets via attributes with allow_single_file=True
+ # and which expect a file to be present.
+ #
+ # Note that the below action is never really executed during cquery
+ # evaluation but fails when run as part of a test execution to signal that
+ # prebuilts were not correctly imported.
+ if not files:
+ placeholder_file = ctx.actions.declare_file(ctx.label.name + ".missing")
+
+ progress_message = (
+ "Attempting to import missing artifacts for Soong module '%s'; " +
+ "please make sure that the module is built with Soong before " +
+ "running Bazel"
+ ) % ctx.attr.module_name
+
+ # Note that we don't write the file for the action to always be
+ # executed and display the warning message.
+ ctx.actions.run_shell(
+ outputs = [placeholder_file],
+ command = "/bin/false",
+ progress_message = progress_message,
+ )
+ files = [placeholder_file]
+
+ runfiles = ctx.runfiles(files = files).merge_all([
+ dep[DefaultInfo].default_runfiles
+ for dep in ctx.attr.runtime_deps + ctx.attr.data + ctx.attr.device_data
+ ])
+
+ # We exclude the outputs of static dependencies from the runfiles since
+ # they're already embedded in this target's output. Note that this is done
+ # recursively such that only transitive runtime dependency outputs are
+ # included. For example, in a chain A -> B -> C -> D where B and C are
+ # statically linked, only A's and D's outputs would remain in the runfiles.
+ runfiles = runfiles.merge_all([
+ ctx.runfiles(
+ files = _exclude_files(
+ dep[DefaultInfo].default_runfiles.files,
+ dep[DefaultInfo].files,
+ ).to_list(),
+ )
+ for dep in ctx.attr.static_deps
+ ])
+
+ return [
+ _make_soong_prebuilt_info(
+ ctx.attr.module_name,
+ ctx.attr._platform_flavor[BuildSettingInfo].value,
+ files = files,
+ runtime_deps = ctx.attr.runtime_deps,
+ static_deps = ctx.attr.static_deps,
+ data = ctx.attr.data,
+ device_data = ctx.attr.device_data,
+ suites = ctx.attr.suites,
+ ),
+ DefaultInfo(
+ files = depset(files),
+ runfiles = runfiles,
+ ),
+ ]
+
+soong_prebuilt = rule(
+ attrs = {
+ "module_name": attr.string(),
+ # Artifacts prebuilt by Soong.
+ "files": attr.label_list(allow_files = True),
+ # Targets that are needed by this target during runtime.
+ "runtime_deps": attr.label_list(),
+ # Note that while the outputs of static deps are not required for test
+ # execution we include them since they have their own runtime
+ # dependencies.
+ "static_deps": attr.label_list(),
+ "data": attr.label_list(),
+ "device_data": attr.label_list(
+ cfg = device_transition,
+ ),
+ "suites": attr.string_list(),
+ "_platform_flavor": attr.label(default = "//bazel/rules:platform_flavor"),
+ # This attribute is required to use Starlark transitions. It allows
+ # allowlisting usage of this rule. For more information, see
+ # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
+ "_allowlist_function_transition": attr.label(
+ default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
+ ),
+ },
+ implementation = _soong_prebuilt_impl,
+ doc = "A rule that imports artifacts prebuilt by Soong into the Bazel workspace",
+)
+
+def _soong_uninstalled_prebuilt_impl(ctx):
+ runfiles = ctx.runfiles().merge_all([
+ dep[DefaultInfo].default_runfiles
+ for dep in ctx.attr.runtime_deps
+ ])
+
+ return [
+ _make_soong_prebuilt_info(
+ ctx.attr.module_name,
+ ctx.attr._platform_flavor[BuildSettingInfo].value,
+ runtime_deps = ctx.attr.runtime_deps,
+ ),
+ DefaultInfo(
+ runfiles = runfiles,
+ ),
+ ]
+
+soong_uninstalled_prebuilt = rule(
+ attrs = {
+ "module_name": attr.string(),
+ "runtime_deps": attr.label_list(),
+ "_platform_flavor": attr.label(default = "//bazel/rules:platform_flavor"),
+ },
+ implementation = _soong_uninstalled_prebuilt_impl,
+ doc = "A rule for targets with no runtime outputs",
+)
+
+def _make_soong_prebuilt_info(
+ module_name,
+ platform_flavor,
+ files = [],
+ runtime_deps = [],
+ static_deps = [],
+ data = [],
+ device_data = [],
+ suites = []):
+ """Build a SoongPrebuiltInfo based on the given information.
+
+ Args:
+ runtime_deps: List of runtime dependencies required by this target.
+ static_deps: List of static dependencies required by this target.
+ data: List of data required by this target.
+ device_data: List of data on device variant required by this target.
+ suites: List of test suites this target belongs to.
+
+ Returns:
+ An instance of SoongPrebuiltInfo.
+ """
+ transitive_runtime_outputs = [
+ dep[SoongPrebuiltInfo].transitive_runtime_outputs
+ for dep in runtime_deps
+ ]
+
+ # We exclude the outputs of static dependencies and data dependencies from
+ # the transitive runtime outputs since static dependencies are already
+ # embedded in this target's output and the data dependencies shouldn't be
+ # present in the runtime paths. Note that this is done recursively such that
+ # only transitive runtime dependency outputs are included. For example, in a
+ # chain A -> B -> C -> D where B and C are statically linked or data
+ # dependencies, only A's and D's outputs would remain in the transitive
+ # runtime outputs.
+ transitive_runtime_outputs.extend([
+ _exclude_files(
+ dep[SoongPrebuiltInfo].transitive_runtime_outputs,
+ dep[DefaultInfo].files,
+ )
+ for dep in static_deps + data
+ ])
+ return SoongPrebuiltInfo(
+ module_name = module_name,
+ platform_flavor = platform_flavor,
+ transitive_runtime_outputs = depset(files, transitive = transitive_runtime_outputs),
+ transitive_test_files = depset(
+ # Note that `suites` is never empty for test files. This because
+ # test build modules that do not explicitly specify a `test_suites`
+ # Soong attribute belong to `null-suite`.
+ files if suites else [],
+ transitive = [
+ dep[SoongPrebuiltInfo].transitive_test_files
+ for dep in data + device_data + runtime_deps
+ ],
+ ),
+ )
+
+def _exclude_files(all_files, files_to_exclude):
+ files = []
+ files_to_exclude = {f: None for f in files_to_exclude.to_list()}
+ for f in all_files.to_list():
+ if f not in files_to_exclude:
+ files.append(f)
+ return depset(files)
diff --git a/atest/bazel/resources/rules/tradefed_test.bzl b/atest/bazel/resources/rules/tradefed_test.bzl
new file mode 100644
index 0000000..f38d4b3
--- /dev/null
+++ b/atest/bazel/resources/rules/tradefed_test.bzl
@@ -0,0 +1,480 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rules used to run tests using Tradefed."""
+
+load("//bazel/rules:platform_transitions.bzl", "device_transition", "host_transition")
+load("//bazel/rules:tradefed_test_aspects.bzl", "soong_prebuilt_tradefed_test_aspect")
+load("//bazel/rules:tradefed_test_dependency_info.bzl", "TradefedTestDependencyInfo")
+load("//bazel/rules:common_settings.bzl", "BuildSettingInfo")
+load(
+ "//:constants.bzl",
+ "aapt2_label",
+ "aapt_label",
+ "adb_label",
+ "atest_script_help_sh_label",
+ "atest_tradefed_label",
+ "atest_tradefed_sh_label",
+ "bazel_result_reporter_label",
+ "compatibility_tradefed_label",
+ "tradefed_label",
+ "tradefed_test_framework_label",
+ "vts_core_tradefed_harness_label",
+)
+load("//bazel/rules:device_test.bzl", "device_test")
+
+TradefedTestInfo = provider(
+ doc = "Info about a Tradefed test module",
+ fields = {
+ "module_name": "Name of the original Tradefed test module",
+ },
+)
+
+_BAZEL_WORK_DIR = "${TEST_SRCDIR}/${TEST_WORKSPACE}/"
+_PY_TOOLCHAIN = "@bazel_tools//tools/python:toolchain_type"
+_JAVA_TOOLCHAIN = "@bazel_tools//tools/jdk:runtime_toolchain_type"
+_TOOLCHAINS = [_PY_TOOLCHAIN, _JAVA_TOOLCHAIN]
+
+_TRADEFED_TEST_ATTRIBUTES = {
+ "module_name": attr.string(),
+ "_tradefed_test_template": attr.label(
+ default = "//bazel/rules:tradefed_test.sh.template",
+ allow_single_file = True,
+ ),
+ "_tradefed_classpath_jars": attr.label_list(
+ default = [
+ atest_tradefed_label,
+ tradefed_label,
+ tradefed_test_framework_label,
+ bazel_result_reporter_label,
+ ],
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_atest_tradefed_launcher": attr.label(
+ default = atest_tradefed_sh_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_atest_helper": attr.label(
+ default = atest_script_help_sh_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_adb": attr.label(
+ default = adb_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_extra_tradefed_result_reporters": attr.label(
+ default = "//bazel/rules:extra_tradefed_result_reporters",
+ ),
+ # This attribute is required to use Starlark transitions. It allows
+ # allowlisting usage of this rule. For more information, see
+ # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
+ "_allowlist_function_transition": attr.label(
+ default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
+ ),
+}
+
+def _add_dicts(*dictionaries):
+ """Creates a new `dict` that has all the entries of the given dictionaries.
+
+ This function serves as a replacement for the `+` operator which does not
+ work with dictionaries. The implementation is inspired by Skylib's
+ `dict.add` and duplicated to avoid the dependency. See
+ https://github.com/bazelbuild/bazel/issues/6461 for more details.
+
+ Note, if the same key is present in more than one of the input dictionaries,
+ the last of them in the argument list overrides any earlier ones.
+
+ Args:
+ *dictionaries: Dictionaries to be added.
+
+ Returns:
+ A new `dict` that has all the entries of the given dictionaries.
+ """
+ result = {}
+ for d in dictionaries:
+ result.update(d)
+ return result
+
+def _tradefed_deviceless_test_impl(ctx):
+ return _tradefed_test_impl(
+ ctx,
+ tradefed_options = [
+ "-n",
+ "--prioritize-host-config",
+ "--skip-host-arch-check",
+ ],
+ test_host_deps = ctx.attr.test,
+ )
+
+tradefed_deviceless_test = rule(
+ attrs = _add_dicts(
+ _TRADEFED_TEST_ATTRIBUTES,
+ {
+ "test": attr.label(
+ mandatory = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ },
+ ),
+ test = True,
+ implementation = _tradefed_deviceless_test_impl,
+ toolchains = _TOOLCHAINS,
+ doc = "A rule used to run host-side deviceless tests using Tradefed",
+)
+
+def _tradefed_robolectric_test_impl(ctx):
+ def add_android_all_files(ctx, tradefed_test_dir):
+ android_all_files = []
+ for target in ctx.attr._android_all:
+ for f in target.files.to_list():
+ # Tradefed expects a flat `android-all` directory structure for
+ # Robolectric tests.
+ symlink = _symlink(ctx, f, "%s/android-all/%s" % (tradefed_test_dir, f.basename))
+ android_all_files.append(symlink)
+ return android_all_files
+
+ return _tradefed_test_impl(
+ ctx,
+ data = [ctx.attr.jdk],
+ tradefed_options = [
+ "-n",
+ "--prioritize-host-config",
+ "--skip-host-arch-check",
+ "--test-arg",
+ "com.android.tradefed.testtype.IsolatedHostTest:java-folder:%s" % ctx.attr.jdk.label.package,
+ ],
+ test_host_deps = ctx.attr.test,
+ add_extra_tradefed_test_files = add_android_all_files,
+ )
+
+tradefed_robolectric_test = rule(
+ attrs = _add_dicts(
+ _TRADEFED_TEST_ATTRIBUTES,
+ {
+ "test": attr.label(
+ mandatory = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "jdk": attr.label(
+ mandatory = True,
+ ),
+ "_android_all": attr.label_list(
+ default = ["//android-all:android-all"],
+ ),
+ },
+ ),
+ test = True,
+ implementation = _tradefed_robolectric_test_impl,
+ toolchains = _TOOLCHAINS,
+ doc = "A rule used to run Robolectric tests using Tradefed",
+)
+
+def _tradefed_device_test_impl(ctx):
+ tradefed_deps = []
+ tradefed_deps.extend(ctx.attr._aapt)
+ tradefed_deps.extend(ctx.attr._aapt2)
+ tradefed_deps.extend(ctx.attr.tradefed_deps)
+
+ test_device_deps = []
+ test_host_deps = []
+
+ if ctx.attr.host_test:
+ test_host_deps.extend(ctx.attr.host_test)
+ if ctx.attr.device_test:
+ test_device_deps.extend(ctx.attr.device_test)
+
+ return _tradefed_test_impl(
+ ctx,
+ tradefed_deps = tradefed_deps,
+ test_device_deps = test_device_deps,
+ test_host_deps = test_host_deps,
+ path_additions = [
+ _BAZEL_WORK_DIR + ctx.file._aapt.dirname,
+ _BAZEL_WORK_DIR + ctx.file._aapt2.dirname,
+ ],
+ )
+
+_tradefed_device_test = rule(
+ attrs = _add_dicts(
+ _TRADEFED_TEST_ATTRIBUTES,
+ {
+ "device_test": attr.label(
+ cfg = device_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "host_test": attr.label(
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "tradefed_deps": attr.label_list(
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_aapt": attr.label(
+ default = aapt_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ "_aapt2": attr.label(
+ default = aapt2_label,
+ allow_single_file = True,
+ cfg = host_transition,
+ aspects = [soong_prebuilt_tradefed_test_aspect],
+ ),
+ },
+ ),
+ test = True,
+ implementation = _tradefed_device_test_impl,
+ toolchains = _TOOLCHAINS,
+ doc = "A rule used to run device tests using Tradefed",
+)
+
+def tradefed_device_driven_test(
+ name,
+ test,
+ tradefed_deps = [],
+ suites = [],
+ **attrs):
+ tradefed_test_name = "tradefed_test_%s" % name
+ _tradefed_device_test(
+ name = tradefed_test_name,
+ device_test = test,
+ tradefed_deps = _get_tradefed_deps(suites, tradefed_deps),
+ **attrs
+ )
+ device_test(
+ name = name,
+ test = tradefed_test_name,
+ )
+
+def tradefed_host_driven_device_test(test, tradefed_deps = [], suites = [], **attrs):
+ _tradefed_device_test(
+ host_test = test,
+ tradefed_deps = _get_tradefed_deps(suites, tradefed_deps),
+ **attrs
+ )
+
+def _tradefed_test_impl(
+ ctx,
+ tradefed_options = [],
+ tradefed_deps = [],
+ test_host_deps = [],
+ test_device_deps = [],
+ path_additions = [],
+ add_extra_tradefed_test_files = lambda ctx, tradefed_test_dir: [],
+ data = []):
+ path_additions = path_additions + [_BAZEL_WORK_DIR + ctx.file._adb.dirname]
+
+ # Files required to run the host-side test.
+ test_host_runfiles = _collect_runfiles(ctx, test_host_deps)
+ test_host_runtime_jars = _collect_runtime_jars(test_host_deps)
+ test_host_runtime_shared_libs = _collect_runtime_shared_libs(test_host_deps)
+
+ # Files required to run the device-side test.
+ test_device_runfiles = _collect_runfiles(ctx, test_device_deps)
+
+ # Files required to run Tradefed.
+ all_tradefed_deps = []
+ all_tradefed_deps.extend(ctx.attr._tradefed_classpath_jars)
+ all_tradefed_deps.extend(ctx.attr._atest_tradefed_launcher)
+ all_tradefed_deps.extend(ctx.attr._atest_helper)
+ all_tradefed_deps.extend(ctx.attr._adb)
+ all_tradefed_deps.extend(tradefed_deps)
+
+ tradefed_runfiles = _collect_runfiles(ctx, all_tradefed_deps)
+ tradefed_runtime_jars = _collect_runtime_jars(all_tradefed_deps)
+ tradefed_runtime_shared_libs = _collect_runtime_shared_libs(all_tradefed_deps)
+
+ result_reporters_config_file = _generate_reporter_config(ctx)
+ tradefed_runfiles = tradefed_runfiles.merge(
+ ctx.runfiles(files = [result_reporters_config_file]),
+ )
+
+ py_paths, py_runfiles = _configure_python_toolchain(ctx)
+ java_paths, java_runfiles, java_home = _configure_java_toolchain(ctx)
+ path_additions = path_additions + java_paths + py_paths
+ tradefed_runfiles = tradefed_runfiles.merge_all([py_runfiles, java_runfiles])
+
+ tradefed_test_dir = "%s_tradefed_test_dir" % ctx.label.name
+ tradefed_test_files = []
+
+ for dep in tradefed_deps + test_host_deps + test_device_deps:
+ for f in dep[TradefedTestDependencyInfo].transitive_test_files.to_list():
+ symlink = _symlink(ctx, f, "%s/%s" % (tradefed_test_dir, f.short_path))
+ tradefed_test_files.append(symlink)
+
+ tradefed_test_files.extend(add_extra_tradefed_test_files(ctx, tradefed_test_dir))
+
+ script = ctx.actions.declare_file("tradefed_test_%s.sh" % ctx.label.name)
+ ctx.actions.expand_template(
+ template = ctx.file._tradefed_test_template,
+ output = script,
+ is_executable = True,
+ substitutions = {
+ "{module_name}": ctx.attr.module_name,
+ "{atest_tradefed_launcher}": _abspath(ctx.file._atest_tradefed_launcher),
+ "{atest_helper}": _abspath(ctx.file._atest_helper),
+ "{tradefed_test_dir}": _BAZEL_WORK_DIR + "%s/%s" % (
+ ctx.label.package,
+ tradefed_test_dir,
+ ),
+ "{tradefed_classpath}": _classpath([tradefed_runtime_jars, test_host_runtime_jars]),
+ "{shared_lib_dirs}": _ld_library_path([tradefed_runtime_shared_libs, test_host_runtime_shared_libs]),
+ "{path_additions}": ":".join(path_additions),
+ "{additional_tradefed_options}": " ".join(tradefed_options),
+ "{result_reporters_config_file}": _abspath(result_reporters_config_file),
+ "{java_home}": java_home,
+ },
+ )
+
+ return [
+ DefaultInfo(
+ executable = script,
+ runfiles = tradefed_runfiles.merge_all([
+ test_host_runfiles,
+ test_device_runfiles,
+ ctx.runfiles(tradefed_test_files),
+ ] + [ctx.runfiles(d.files.to_list()) for d in data]),
+ ),
+ TradefedTestInfo(
+ module_name = ctx.attr.module_name,
+ ),
+ ]
+
+def _get_tradefed_deps(suites, tradefed_deps = []):
+ suite_to_deps = {
+ "host-unit-tests": [],
+ "null-suite": [],
+ "device-tests": [],
+ "general-tests": [],
+ "vts": [vts_core_tradefed_harness_label],
+ }
+ all_tradefed_deps = {d: None for d in tradefed_deps}
+
+ for s in suites:
+ all_tradefed_deps.update({
+ d: None
+ for d in suite_to_deps.get(s, [compatibility_tradefed_label])
+ })
+
+ # Since `vts-core-tradefed-harness` includes `compatibility-tradefed`, we
+ # will exclude `compatibility-tradefed` if `vts-core-tradefed-harness` exists.
+ if vts_core_tradefed_harness_label in all_tradefed_deps:
+ all_tradefed_deps.pop(compatibility_tradefed_label, default = None)
+
+ return all_tradefed_deps.keys()
+
+def _generate_reporter_config(ctx):
+ result_reporters = [
+ "com.android.tradefed.result.BazelExitCodeResultReporter",
+ "com.android.tradefed.result.BazelXmlResultReporter",
+ ]
+
+ result_reporters.extend(ctx.attr._extra_tradefed_result_reporters[BuildSettingInfo].value)
+
+ result_reporters_config_file = ctx.actions.declare_file("result-reporters-%s.xml" % ctx.label.name)
+ _write_reporters_config_file(
+ ctx,
+ result_reporters_config_file,
+ result_reporters,
+ )
+
+ return result_reporters_config_file
+
+def _write_reporters_config_file(ctx, config_file, result_reporters):
+ config_lines = [
+ "<?xml version=\"1.0\" encoding=\"utf-8\"?>",
+ "<configuration>",
+ ]
+
+ for result_reporter in result_reporters:
+ config_lines.append(" <result_reporter class=\"%s\" />" % result_reporter)
+
+ config_lines.append("</configuration>")
+
+ ctx.actions.write(config_file, "\n".join(config_lines))
+
+def _configure_java_toolchain(ctx):
+ java_runtime = ctx.toolchains[_JAVA_TOOLCHAIN].java_runtime
+ java_home_path = _BAZEL_WORK_DIR + java_runtime.java_home
+ java_runfiles = ctx.runfiles(transitive_files = java_runtime.files)
+ return ([java_home_path + "/bin"], java_runfiles, java_home_path)
+
+def _configure_python_toolchain(ctx):
+ py_toolchain_info = ctx.toolchains[_PY_TOOLCHAIN]
+ py2_interpreter = py_toolchain_info.py2_runtime.interpreter
+ py3_interpreter = py_toolchain_info.py3_runtime.interpreter
+
+ # Create `python` and `python3` symlinks in the runfiles tree and add them
+ # to the executable path. This is required because scripts reference these
+ # commands in their shebang line.
+ py_runfiles = ctx.runfiles(symlinks = {
+ "/".join([py2_interpreter.dirname, "python"]): py2_interpreter,
+ "/".join([py3_interpreter.dirname, "python3"]): py3_interpreter,
+ })
+ py_paths = [
+ _BAZEL_WORK_DIR + py2_interpreter.dirname,
+ _BAZEL_WORK_DIR + py3_interpreter.dirname,
+ ]
+ return (py_paths, py_runfiles)
+
+def _symlink(ctx, target_file, output_path):
+ symlink = ctx.actions.declare_file(output_path)
+ ctx.actions.symlink(output = symlink, target_file = target_file)
+ return symlink
+
+def _collect_runfiles(ctx, targets):
+ return ctx.runfiles().merge_all([
+ target[DefaultInfo].default_runfiles
+ for target in targets
+ ])
+
+def _collect_runtime_jars(deps):
+ return depset(
+ transitive = [
+ d[TradefedTestDependencyInfo].runtime_jars
+ for d in deps
+ ],
+ )
+
+def _collect_runtime_shared_libs(deps):
+ return depset(
+ transitive = [
+ d[TradefedTestDependencyInfo].runtime_shared_libraries
+ for d in deps
+ ],
+ )
+
+def _classpath(deps):
+ runtime_jars = depset(transitive = deps)
+ return ":".join([_abspath(f) for f in runtime_jars.to_list()])
+
+def _ld_library_path(deps):
+ runtime_shared_libs = depset(transitive = deps)
+ return ":".join(
+ [_BAZEL_WORK_DIR + f.dirname for f in runtime_shared_libs.to_list()],
+ )
+
+def _abspath(file):
+ return _BAZEL_WORK_DIR + file.short_path
diff --git a/atest/bazel/rules/tradefed_test.sh.template b/atest/bazel/resources/rules/tradefed_test.sh.template
similarity index 85%
rename from atest/bazel/rules/tradefed_test.sh.template
rename to atest/bazel/resources/rules/tradefed_test.sh.template
index 87932f5..8b15c93 100644
--- a/atest/bazel/rules/tradefed_test.sh.template
+++ b/atest/bazel/resources/rules/tradefed_test.sh.template
@@ -3,13 +3,14 @@
set -x
TEST_MODULE="{module_name}"
-TEST_PATH="{tradefed_tests_dir}"
+TEST_PATH="{tradefed_test_dir}"
ATEST_TF_LAUNCHER="{atest_tradefed_launcher}"
ATEST_HELPER="{atest_helper}"
SHARED_LIB_DIRS="{shared_lib_dirs}"
PATH_ADDITIONS="{path_additions}"
TRADEFED_CLASSPATH="{tradefed_classpath}"
RESULT_REPORTERS_CONFIG_FILE="{result_reporters_config_file}"
+ATEST_JAVA_HOME="{atest_java_home}"
read -a ADDITIONAL_TRADEFED_OPTIONS <<< "{additional_tradefed_options}"
# Export variables expected by the Atest launcher script.
@@ -17,12 +18,7 @@
export TF_PATH="${TRADEFED_CLASSPATH}"
export PATH="${PATH_ADDITIONS}:${PATH}"
export ATEST_HELPER="${ATEST_HELPER}"
-
-# Prepend the TF_JAVA_HOME environment variable to the path to ensure that all Java invocations
-# throughout the test execution flow use the same version.
-if [ ! -z "${TF_JAVA_HOME}" ]; then
- export PATH="${TF_JAVA_HOME}/bin:${PATH}"
-fi
+export JAVA_HOME="${ATEST_JAVA_HOME}"
exit_code_file="$(mktemp /tmp/tf-exec-XXXXXXXXXX)"
@@ -40,6 +36,10 @@
"${ADDITIONAL_TRADEFED_OPTIONS[@]}" \
--bazel-exit-code-result-reporter:file=${exit_code_file} \
--bazel-xml-result-reporter:file=${XML_OUTPUT_FILE} \
+ --proto-output-file="${TEST_UNDECLARED_OUTPUTS_DIR}/proto-results" \
+ --use-delimited-api=true \
+ --log-file-path="${TEST_UNDECLARED_OUTPUTS_DIR}" \
+ --compress-files=false \
"$@"
# Use the TF exit code if it terminates abnormally.
diff --git a/atest/bazel/resources/rules/tradefed_test_aspects.bzl b/atest/bazel/resources/rules/tradefed_test_aspects.bzl
new file mode 100644
index 0000000..58affca
--- /dev/null
+++ b/atest/bazel/resources/rules/tradefed_test_aspects.bzl
@@ -0,0 +1,48 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Aspects used to transform certain providers into a TradefedTestDependencyInfo.
+
+Tradefed tests require a TradefedTestDependencyInfo provider that is not
+usually returned by most rules. Instead of creating custom rules to adapt
+build rule providers, we use Bazel aspects to convert the input rule's
+provider into a suitable type.
+
+See https://docs.bazel.build/versions/main/skylark/aspects.html#aspects
+for more information on how aspects work.
+"""
+
+load("//bazel/rules:soong_prebuilt.bzl", "SoongPrebuiltInfo")
+load("//bazel/rules:tradefed_test_dependency_info.bzl", "TradefedTestDependencyInfo")
+
+def _soong_prebuilt_tradefed_aspect_impl(target, ctx):
+ runtime_jars = []
+ runtime_shared_libraries = []
+ for f in target[SoongPrebuiltInfo].transitive_runtime_outputs.to_list():
+ if f.extension == "so":
+ runtime_shared_libraries.append(f)
+ elif f.extension == "jar":
+ runtime_jars.append(f)
+
+ return [
+ TradefedTestDependencyInfo(
+ runtime_jars = depset(runtime_jars),
+ runtime_shared_libraries = depset(runtime_shared_libraries),
+ transitive_test_files = target[SoongPrebuiltInfo].transitive_test_files,
+ ),
+ ]
+
+soong_prebuilt_tradefed_test_aspect = aspect(
+ implementation = _soong_prebuilt_tradefed_aspect_impl,
+)
diff --git a/atest/bazel/resources/rules/tradefed_test_dependency_info.bzl b/atest/bazel/resources/rules/tradefed_test_dependency_info.bzl
new file mode 100644
index 0000000..2f4689c
--- /dev/null
+++ b/atest/bazel/resources/rules/tradefed_test_dependency_info.bzl
@@ -0,0 +1,33 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provides dependency information required by Tradefed test rules.
+
+This provider encapsulates information about dependencies that is required for
+setting up the execution environment. Aspects are responsible for converting the
+actual dependency's provider to an instance of this structure. For example, a
+dependency with a `JavaInfo` provider defines several fields for the jars
+required at runtime which is different from what `SoongPrebuiltInfo` exports.
+This essentially shields the test rule's implementation from the different
+provider types.
+"""
+
+TradefedTestDependencyInfo = provider(
+ doc = "Info required by Tradefed rules to run tests",
+ fields = {
+ "runtime_jars": "Jars required on the runtime classpath",
+ "runtime_shared_libraries": "Shared libraries that are required at runtime",
+ "transitive_test_files": "Files of test modules",
+ },
+)
diff --git a/atest/bazel/rules/BUILD.bazel b/atest/bazel/rules/BUILD.bazel
deleted file mode 100644
index d44fd52..0000000
--- a/atest/bazel/rules/BUILD.bazel
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//bazel/rules:common_settings.bzl", "string_flag")
-load("//bazel/rules:common_settings.bzl", "string_list_flag")
-
-package(default_visibility = ["//visibility:public"])
-
-string_flag(name = "platform_flavor", build_setting_default = "")
-string_list_flag(name = "extra_tradefed_result_reporters", build_setting_default = [])
-
-config_setting(
- name = "device",
- flag_values = {":platform_flavor": "device"},
-)
-
-config_setting(
- name = "host",
- flag_values = {":platform_flavor": "host"},
-)
-
-exports_files(["tradefed_test.sh.template"])
diff --git a/atest/bazel/rules/soong_prebuilt.bzl b/atest/bazel/rules/soong_prebuilt.bzl
deleted file mode 100644
index 50c9f19..0000000
--- a/atest/bazel/rules/soong_prebuilt.bzl
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Rule used to import artifacts prebuilt by Soong into the Bazel workspace.
-
-The rule returns a DefaultInfo provider with all artifacts and runtime dependencies,
-and a SoongPrebuiltInfo provider with the original Soong module name and artifacts.
-"""
-
-SoongPrebuiltInfo = provider(
- doc = "Info about a prebuilt Soong build module",
- fields = {
- "files": "Files imported from Soong outputs",
- "module_name": "Name of the original Soong build module",
- },
-)
-
-def _soong_prebuilt_impl(ctx):
-
- files = ctx.files.files
-
- # Ensure that soong_prebuilt targets always have at least one file to avoid
- # evaluation errors when running Bazel cquery on a clean tree to find
- # dependencies.
- #
- # This happens because soong_prebuilt dependency target globs don't match
- # any files when the workspace symlinks are broken and point to build
- # artifacts that still don't exist. This in turn causes errors in rules
- # that reference these targets via attributes with allow_single_file=True
- # and which expect a file to be present.
- #
- # Note that the below action is never really executed during cquery
- # evaluation but fails when run as part of a test execution to signal that
- # prebuilts were not correctly imported.
- if not files:
- placeholder_file = ctx.actions.declare_file(ctx.label.name + ".missing")
-
- progress_message = (
- "Attempting to import missing artifacts for Soong module '%s'; " +
- "please make sure that the module is built with Soong before " +
- "running Bazel"
- ) % ctx.attr.module_name
-
- # Note that we don't write the file for the action to always be
- # executed and display the warning message.
- ctx.actions.run_shell(
- outputs=[placeholder_file],
- command="/bin/false",
- progress_message=progress_message
- )
- files = [placeholder_file]
-
- deps = []
- deps.extend(ctx.attr.runtime_deps)
- deps.extend(ctx.attr.data)
- runfiles = ctx.runfiles(files = files).merge_all([
- dep[DefaultInfo].default_runfiles
- for dep in deps
- ])
-
- return [
- SoongPrebuiltInfo(
- files = depset(files),
- module_name = ctx.attr.module_name,
- ),
- DefaultInfo(
- files = depset(files),
- runfiles = runfiles,
- ),
- ]
-
-soong_prebuilt = rule(
- attrs = {
- "module_name": attr.string(),
- # Artifacts prebuilt by Soong.
- "files": attr.label_list(allow_files = True),
- # Targets that are needed by this target during runtime.
- "runtime_deps": attr.label_list(),
- "data": attr.label_list(),
- },
- implementation = _soong_prebuilt_impl,
- doc = "A rule that imports artifacts prebuilt by Soong into the Bazel workspace",
-)
-
-def _soong_uninstalled_prebuilt_impl(ctx):
-
- runfiles = ctx.runfiles().merge_all([
- dep[DefaultInfo].default_runfiles
- for dep in ctx.attr.runtime_deps
- ])
-
- return [
- SoongPrebuiltInfo(
- module_name = ctx.attr.module_name,
- ),
- DefaultInfo(
- runfiles = runfiles,
- ),
- ]
-
-soong_uninstalled_prebuilt = rule(
- attrs = {
- "module_name": attr.string(),
- "runtime_deps": attr.label_list(),
- },
- implementation = _soong_uninstalled_prebuilt_impl,
- doc = "A rule for targets with no runtime outputs",
-)
diff --git a/atest/bazel/rules/tradefed_test.bzl b/atest/bazel/rules/tradefed_test.bzl
deleted file mode 100644
index 2277bc3..0000000
--- a/atest/bazel/rules/tradefed_test.bzl
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Rules used to run tests using Tradefed."""
-
-load("//bazel/rules:platform_transitions.bzl", "host_transition", "device_transition")
-load("//bazel/rules:tradefed_test_aspects.bzl", "soong_prebuilt_tradefed_test_aspect")
-load("//bazel/rules:tradefed_test_info.bzl", "TradefedTestInfo")
-load("//bazel/rules:common_settings.bzl", "BuildSettingInfo")
-load("//:constants.bzl",
- "aapt_label",
- "adb_label",
- "atest_script_help_sh_label",
- "atest_tradefed_label",
- "atest_tradefed_sh_label",
- "bazel_result_reporter_label",
- "tradefed_label",
- "tradefed_test_framework_label"
-)
-
-_BAZEL_WORK_DIR = "${TEST_SRCDIR}/${TEST_WORKSPACE}/"
-_PY_TOOLCHAIN = "@bazel_tools//tools/python:toolchain_type"
-_TOOLCHAINS = [_PY_TOOLCHAIN]
-
-_TRADEFED_TEST_ATTRIBUTES = {
- "_tradefed_test_template": attr.label(
- default = "//bazel/rules:tradefed_test.sh.template",
- allow_single_file = True,
- ),
- "_tradefed_classpath_jars": attr.label_list(
- default = [
- atest_tradefed_label,
- tradefed_label,
- tradefed_test_framework_label,
- bazel_result_reporter_label,
- ],
- cfg = host_transition
- ),
- "_atest_tradefed_launcher": attr.label(
- default = atest_tradefed_sh_label,
- allow_single_file = True,
- cfg = host_transition,
- ),
- "_atest_helper": attr.label(
- default = atest_script_help_sh_label,
- allow_single_file = True,
- cfg = host_transition,
- ),
- "_adb": attr.label(
- default = adb_label,
- allow_single_file = True,
- cfg = host_transition,
- ),
- "_extra_tradefed_result_reporters": attr.label(
- default = "//bazel/rules:extra_tradefed_result_reporters",
- ),
- # This attribute is required to use Starlark transitions. It allows
- # allowlisting usage of this rule. For more information, see
- # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
- "_allowlist_function_transition": attr.label(
- default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
- ),
-}
-
-def _add_dicts(*dictionaries):
- """Creates a new `dict` that has all the entries of the given dictionaries.
-
- This function serves as a replacement for the `+` operator which does not
- work with dictionaries. The implementation is inspired by Skylib's
- `dict.add` and duplicated to avoid the dependency. See
- https://github.com/bazelbuild/bazel/issues/6461 for more details.
-
- Note, if the same key is present in more than one of the input dictionaries,
- the last of them in the argument list overrides any earlier ones.
-
- Args:
- *dictionaries: Dictionaries to be added.
-
- Returns:
- A new `dict` that has all the entries of the given dictionaries.
- """
- result = {}
- for d in dictionaries:
- result.update(d)
- return result
-
-def _tradefed_deviceless_test_impl(ctx):
- return _tradefed_test_impl(
- ctx,
- tradefed_options = [
- "-n",
- "--prioritize-host-config",
- "--skip-host-arch-check",
- ],
- host_deps = ctx.attr.test
- )
-
-tradefed_deviceless_test = rule(
- attrs = _add_dicts(
- _TRADEFED_TEST_ATTRIBUTES,
- {
- "test": attr.label(
- mandatory = True,
- cfg = host_transition,
- aspects = [soong_prebuilt_tradefed_test_aspect],
- ),
- },
- ),
- test = True,
- implementation = _tradefed_deviceless_test_impl,
- toolchains = _TOOLCHAINS,
- doc = "A rule used to run host-side deviceless tests using Tradefed",
-)
-
-def _tradefed_device_test_impl(ctx):
- return _tradefed_test_impl(
- ctx,
- host_deps = ctx.attr._aapt,
- device_deps = ctx.attr.test,
- path_additions = [
- _BAZEL_WORK_DIR + ctx.file._aapt.dirname,
- ]
- )
-
-tradefed_device_test = rule(
- attrs = _add_dicts(
- _TRADEFED_TEST_ATTRIBUTES,
- {
- "test": attr.label(
- mandatory = True,
- cfg = device_transition,
- aspects = [soong_prebuilt_tradefed_test_aspect],
- ),
- "_aapt": attr.label(
- default = aapt_label,
- allow_single_file = True,
- cfg = host_transition,
- ),
- },
- ),
- test = True,
- implementation = _tradefed_device_test_impl,
- toolchains = _TOOLCHAINS,
- doc = "A rule used to run device tests using Tradefed",
-)
-
-def _tradefed_test_impl(
- ctx,
- tradefed_options=[],
- host_deps=[],
- device_deps=[],
- path_additions=[],
- ):
-
- path_additions = path_additions + [_BAZEL_WORK_DIR + ctx.file._adb.dirname]
-
- tradefed_classpath = []
- for tradefed_classpath_jar in ctx.attr._tradefed_classpath_jars:
- for f in tradefed_classpath_jar.files.to_list():
- tradefed_classpath.append(_BAZEL_WORK_DIR + f.short_path)
- tradefed_classpath = ":".join(tradefed_classpath)
-
- tradefed_host_deps = []
- tradefed_host_deps.extend(ctx.attr._tradefed_classpath_jars)
- tradefed_host_deps.extend(ctx.attr._atest_tradefed_launcher)
- tradefed_host_deps.extend(ctx.attr._atest_helper)
- tradefed_host_deps.extend(ctx.attr._adb)
- host_runfiles = _get_runfiles_from_targets(
- ctx,
- tradefed_host_deps + host_deps,
- )
-
- shared_lib_dirs = []
- for f in host_runfiles.files.to_list():
- if f.extension == "so":
- shared_lib_dirs.append(_BAZEL_WORK_DIR + f.dirname)
- shared_lib_dirs = ":".join(shared_lib_dirs)
-
- # Configure the Python toolchain.
- py_toolchain_info = ctx.toolchains[_PY_TOOLCHAIN]
- py2_interpreter = py_toolchain_info.py2_runtime.interpreter
- py3_interpreter = py_toolchain_info.py3_runtime.interpreter
-
- # Create `python` and `python3` symlinks in the runfiles tree and add them to the executable
- # path. This is required because scripts reference these commands in their shebang line.
- host_runfiles = host_runfiles.merge(ctx.runfiles(symlinks = {
- "/".join([py2_interpreter.dirname, "python"]): py2_interpreter,
- "/".join([py3_interpreter.dirname, "python3"]): py3_interpreter,
- }))
- path_additions = path_additions + [
- _BAZEL_WORK_DIR + py2_interpreter.dirname,
- _BAZEL_WORK_DIR + py3_interpreter.dirname,
- ]
-
- result_reporters = [
- "com.android.tradefed.result.BazelExitCodeResultReporter",
- "com.android.tradefed.result.BazelXmlResultReporter",
- ]
-
- result_reporters.extend(ctx.attr._extra_tradefed_result_reporters[BuildSettingInfo].value)
-
- result_reporters_config_file = ctx.actions.declare_file("result-reporters-%s.xml" % ctx.label.name)
- _write_reporters_config_file(
- ctx, result_reporters_config_file, result_reporters)
- reporter_runfiles = ctx.runfiles(files = [result_reporters_config_file])
-
- script = ctx.actions.declare_file("tradefed_test_%s.sh" % ctx.label.name)
- ctx.actions.expand_template(
- template = ctx.file._tradefed_test_template,
- output = script,
- is_executable = True,
- substitutions = {
- "{module_name}": ctx.attr.test[0][TradefedTestInfo].module_name,
- "{atest_tradefed_launcher}": _BAZEL_WORK_DIR + ctx.file._atest_tradefed_launcher.short_path,
- "{atest_helper}": _BAZEL_WORK_DIR + ctx.file._atest_helper.short_path,
- "{tradefed_tests_dir}": _BAZEL_WORK_DIR + ctx.attr.test[0].label.package,
- "{tradefed_classpath}": tradefed_classpath,
- "{shared_lib_dirs}": shared_lib_dirs,
- "{path_additions}": ":".join(path_additions),
- "{additional_tradefed_options}": " ".join(tradefed_options),
- "{result_reporters_config_file}": _BAZEL_WORK_DIR + result_reporters_config_file.short_path,
- },
- )
-
- device_runfiles = _get_runfiles_from_targets(ctx, device_deps)
- return [DefaultInfo(executable = script,
- runfiles = host_runfiles.merge_all([device_runfiles, reporter_runfiles]))]
-
-def _write_reporters_config_file(ctx, config_file, result_reporters):
- config_lines = [
- "<?xml version=\"1.0\" encoding=\"utf-8\"?>",
- "<configuration>"
- ]
-
- for result_reporter in result_reporters:
- config_lines.append(" <result_reporter class=\"%s\" />" % result_reporter)
-
- config_lines.append("</configuration>")
-
- ctx.actions.write(config_file, "\n".join(config_lines))
-
-def _get_runfiles_from_targets(ctx, targets):
- return ctx.runfiles().merge_all([
- target[DefaultInfo].default_runfiles for target in targets
- ])
diff --git a/atest/bazel/rules/tradefed_test_aspects.bzl b/atest/bazel/rules/tradefed_test_aspects.bzl
deleted file mode 100644
index d233d4a..0000000
--- a/atest/bazel/rules/tradefed_test_aspects.bzl
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Aspects used to transform certain providers into a TradefedTestInfo.
-
-Tradefed tests require a TradefedTestInfo provider that is not usually
-returned by most rules. Instead of creating custom rules to adapt build
-rule providers, we use Bazel aspects to convert the input rule's provider
-into a suitable type.
-
-See https://docs.bazel.build/versions/main/skylark/aspects.html#aspects
-for more information on how aspects work.
-"""
-
-load("//bazel/rules:soong_prebuilt.bzl", "SoongPrebuiltInfo")
-load("//bazel/rules:tradefed_test_info.bzl", "TradefedTestInfo")
-
-def _soong_prebuilt_tradefed_aspect_impl(target, ctx):
- test_config_files = []
- test_binary_files = []
-
- # Partition files into config files and test binaries.
- for f in target[SoongPrebuiltInfo].files.to_list():
- if f.extension == "config" or f.extension == "xml":
- test_config_files.append(f)
- else:
- test_binary_files.append(f)
-
- return [
- TradefedTestInfo(
- module_name = target[SoongPrebuiltInfo].module_name,
- test_binaries = test_binary_files,
- test_configs = test_config_files,
- ),
- ]
-
-soong_prebuilt_tradefed_test_aspect = aspect(
- attr_aspects = ["test"],
- implementation = _soong_prebuilt_tradefed_aspect_impl,
-)
diff --git a/atest/bazel/rules/tradefed_test_info.bzl b/atest/bazel/rules/tradefed_test_info.bzl
deleted file mode 100644
index fbdeec8..0000000
--- a/atest/bazel/rules/tradefed_test_info.bzl
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (C) 2021 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""A provider used to provide test information required by Tradefed rules."""
-
-TradefedTestInfo = provider(
- doc = "Info required by Tradefed rules to run tests",
- fields = {
- "test_binaries": "Test binary files",
- "test_configs": "Tradefed config files",
- "module_name": "Test module name",
- },
-)
diff --git a/atest/bazel/runner/Android.bp b/atest/bazel/runner/Android.bp
new file mode 100644
index 0000000..cc940da
--- /dev/null
+++ b/atest/bazel/runner/Android.bp
@@ -0,0 +1,85 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_library_host {
+ name: "bazel-test-runner",
+ srcs: [
+ "src/com/android/tradefed/testtype/bazel/BazelTest.java",
+ "src/com/android/tradefed/testtype/bazel/BepFileTailer.java",
+ "src/main/protobuf/*.proto",
+ ],
+ // b/267831518: Pin tradefed and dependencies to Java 11.
+ java_version: "11",
+ libs: [
+ "tradefed",
+ ],
+ java_resource_dirs: [
+ "config",
+ ],
+ proto: {
+ type: "full",
+ include_dirs: [
+ "external/protobuf/src",
+ ],
+ canonical_path_from_root: false,
+ },
+ // shade guava to avoid conflicts with guava embedded in Error Prone.
+ jarjar_rules: "jarjar-rules.txt",
+}
+
+java_genrule_host {
+ name: "empty-bazel-test-suite",
+ cmd: "BAZEL_SUITE_DIR=$(genDir)/android-bazel-suite && " +
+ "mkdir \"$${BAZEL_SUITE_DIR}\" && " +
+ "mkdir \"$${BAZEL_SUITE_DIR}\"/tools && " +
+ "mkdir \"$${BAZEL_SUITE_DIR}\"/testcases && " +
+ "cp $(location :tradefed) \"$${BAZEL_SUITE_DIR}\"/tools && " +
+ "cp $(location :compatibility-host-util) \"$${BAZEL_SUITE_DIR}\"/tools && " +
+ "cp $(location :compatibility-tradefed) \"$${BAZEL_SUITE_DIR}\"/tools && " +
+ "cp $(location :bazel-test-runner) \"$${BAZEL_SUITE_DIR}\"/testcases && " +
+ "$(location soong_zip) -o $(out) -d -C $(genDir) -D \"$${BAZEL_SUITE_DIR}\" -sha256",
+ out: ["empty-bazel-test-suite.zip"],
+ srcs: [
+ ":tradefed",
+ ":bazel-test-runner",
+ ":compatibility-host-util",
+ ":compatibility-tradefed",
+ ],
+ tools: [
+ "soong_zip",
+ ],
+ dist: {
+ targets: ["empty-bazel-test-suite"],
+ },
+}
+
+java_test_host {
+ name: "bazel-test-runner-tests",
+ srcs: [
+ "tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java",
+ ],
+ static_libs: [
+ "bazel-test-runner",
+ "tradefed",
+ "mockito",
+ "objenesis",
+ ],
+ test_options: {
+ unit_test: true,
+ },
+}
diff --git a/atest/bazel/runner/config/config/bazel_deviceless_tests.xml b/atest/bazel/runner/config/config/bazel_deviceless_tests.xml
new file mode 100644
index 0000000..e4033cb
--- /dev/null
+++ b/atest/bazel/runner/config/config/bazel_deviceless_tests.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2022 Google Inc. All Rights Reserved. -->
+<configuration description="A suite to run the Bazel tests contained in the Atest-generated workspace archive." >
+ <option name="null-device" value="true" />
+ <test class="com.android.tradefed.testtype.bazel.BazelTest" />
+ <logger class="com.android.tradefed.log.FileLogger" />
+ <template-include name="reporters" default="empty" />
+</configuration>
diff --git a/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery b/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery
new file mode 100644
index 0000000..1cc8ec0
--- /dev/null
+++ b/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery
@@ -0,0 +1,14 @@
+def format(target):
+ """Return a pair of 'module_name target_label' for the given tradefed test target, '' otherwise."""
+ p = providers(target)
+ if not p:
+ return ""
+ tradefed_test_info = p.get(
+ "//bazel/rules:tradefed_test.bzl%TradefedTestInfo")
+ if tradefed_test_info:
+ # Use space as a delimiter as Bazel labels can use many spacial characters in their target
+ # labels. See: https://bazel.build/concepts/labels#target-names
+ return "%s %s" % (tradefed_test_info.module_name, target.label)
+ else:
+ return ""
+ return ""
diff --git a/atest/bazel/runner/jarjar-rules.txt b/atest/bazel/runner/jarjar-rules.txt
new file mode 100644
index 0000000..de5ffab
--- /dev/null
+++ b/atest/bazel/runner/jarjar-rules.txt
@@ -0,0 +1 @@
+rule com.google.protobuf.** com.android.tradefed.internal.protobuf.@1
\ No newline at end of file
diff --git a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java
new file mode 100644
index 0000000..1df0abc
--- /dev/null
+++ b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java
@@ -0,0 +1,816 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.tradefed.testtype.bazel;
+
+import com.android.annotations.VisibleForTesting;
+import com.android.tradefed.config.Option;
+import com.android.tradefed.config.OptionClass;
+import com.android.tradefed.device.DeviceNotAvailableException;
+import com.android.tradefed.invoker.TestInformation;
+import com.android.tradefed.invoker.tracing.CloseableTraceScope;
+import com.android.tradefed.invoker.tracing.TracePropagatingExecutorService;
+import com.android.tradefed.log.ITestLogger;
+import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.result.FailureDescription;
+import com.android.tradefed.result.FileInputStreamSource;
+import com.android.tradefed.result.ITestInvocationListener;
+import com.android.tradefed.result.LogDataType;
+import com.android.tradefed.result.error.ErrorIdentifier;
+import com.android.tradefed.result.error.TestErrorIdentifier;
+import com.android.tradefed.result.proto.LogFileProto.LogFileInfo;
+import com.android.tradefed.result.proto.ProtoResultParser;
+import com.android.tradefed.result.proto.TestRecordProto.ChildReference;
+import com.android.tradefed.result.proto.TestRecordProto.FailureStatus;
+import com.android.tradefed.result.proto.TestRecordProto.TestRecord;
+import com.android.tradefed.testtype.IRemoteTest;
+import com.android.tradefed.util.ZipUtil;
+import com.android.tradefed.util.proto.TestRecordProtoUtil;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.SetMultimap;
+import com.google.common.io.CharStreams;
+import com.google.common.io.MoreFiles;
+import com.google.common.io.Resources;
+import com.google.devtools.build.lib.buildeventstream.BuildEventStreamProtos;
+import com.google.protobuf.Any;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.FileOutputStream;
+import java.lang.ProcessBuilder.Redirect;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.zip.ZipFile;
+
+/** Test runner for executing Bazel tests. */
+@OptionClass(alias = "bazel-test")
+public final class BazelTest implements IRemoteTest {
+
+ public static final String QUERY_ALL_TARGETS = "query_all_targets";
+ public static final String QUERY_MAP_MODULES_TO_TARGETS = "query_map_modules_to_targets";
+ public static final String RUN_TESTS = "run_tests";
+
+ // Add method excludes to TF's global filters since Bazel doesn't support target-specific
+ // arguments. See https://github.com/bazelbuild/rules_go/issues/2784.
+ // TODO(b/274787592): Integrate with Bazel's test filtering to filter specific test cases.
+ public static final String GLOBAL_EXCLUDE_FILTER_TEMPLATE =
+ "--test_arg=--global-filters:exclude-filter=%s";
+
+ private static final Duration BAZEL_QUERY_TIMEOUT = Duration.ofMinutes(5);
+ private static final String TEST_NAME = BazelTest.class.getName();
+ // Bazel internally calls the test output archive file "test.outputs__outputs.zip", the double
+ // underscore is part of this name.
+ private static final String TEST_UNDECLARED_OUTPUTS_ARCHIVE_NAME = "test.outputs__outputs.zip";
+ private static final String PROTO_RESULTS_FILE_NAME = "proto-results";
+
+ private final List<Path> mTemporaryPaths = new ArrayList<>();
+ private final List<Path> mLogFiles = new ArrayList<>();
+ private final Properties mProperties;
+ private final ProcessStarter mProcessStarter;
+ private final Path mTemporaryDirectory;
+ private final ExecutorService mExecutor;
+
+ private Path mRunTemporaryDirectory;
+
+ private enum FilterType {
+ MODULE,
+ TEST_CASE
+ };
+
+ @Option(
+ name = "bazel-test-command-timeout",
+ description = "Timeout for running the Bazel test.")
+ private Duration mBazelCommandTimeout = Duration.ofHours(1L);
+
+ @Option(
+ name = "bazel-test-suite-root-dir",
+ description =
+ "Name of the environment variable set by CtsTestLauncher indicating the"
+ + " location of the root bazel-test-suite dir.")
+ private String mSuiteRootDirEnvVar = "BAZEL_SUITE_ROOT";
+
+ @Option(
+ name = "bazel-startup-options",
+ description = "List of startup options to be passed to Bazel.")
+ private final List<String> mBazelStartupOptions = new ArrayList<>();
+
+ @Option(
+ name = "bazel-test-extra-args",
+ description = "List of extra arguments to be passed to Bazel")
+ private final List<String> mBazelTestExtraArgs = new ArrayList<>();
+
+ @Option(
+ name = "bazel-max-idle-timout",
+ description = "Max idle timeout in seconds for bazel commands.")
+ private Duration mBazelMaxIdleTimeout = Duration.ofSeconds(5L);
+
+ @Option(name = "exclude-filter", description = "Test modules to exclude when running tests.")
+ private final List<String> mExcludeTargets = new ArrayList<>();
+
+ @Option(name = "include-filter", description = "Test modules to include when running tests.")
+ private final List<String> mIncludeTargets = new ArrayList<>();
+
+ @Option(
+ name = "report-cached-test-results",
+ description = "Whether or not to report cached test results.")
+ private boolean mReportCachedTestResults = true;
+
+ public BazelTest() {
+ this(new DefaultProcessStarter(), System.getProperties());
+ }
+
+ @VisibleForTesting
+ BazelTest(ProcessStarter processStarter, Properties properties) {
+ mProcessStarter = processStarter;
+ mExecutor = TracePropagatingExecutorService.create(Executors.newCachedThreadPool());
+ mProperties = properties;
+ mTemporaryDirectory = Paths.get(properties.getProperty("java.io.tmpdir"));
+ }
+
+ @Override
+ public void run(TestInformation testInfo, ITestInvocationListener listener)
+ throws DeviceNotAvailableException {
+
+ List<FailureDescription> runFailures = new ArrayList<>();
+ long startTime = System.currentTimeMillis();
+
+ try {
+ initialize();
+ runTestsAndParseResults(testInfo, listener, runFailures);
+ } catch (AbortRunException e) {
+ runFailures.add(e.getFailureDescription());
+ } catch (IOException | InterruptedException e) {
+ runFailures.add(throwableToTestFailureDescription(e));
+ }
+
+ listener.testModuleStarted(testInfo.getContext());
+ listener.testRunStarted(TEST_NAME, 0);
+ reportRunFailures(runFailures, listener);
+ listener.testRunEnded(System.currentTimeMillis() - startTime, Collections.emptyMap());
+ listener.testModuleEnded();
+
+ addTestLogs(listener);
+ cleanup();
+ }
+
+ private void initialize() throws IOException {
+ mRunTemporaryDirectory = Files.createTempDirectory(mTemporaryDirectory, "bazel-test-");
+ }
+
+ private void runTestsAndParseResults(
+ TestInformation testInfo,
+ ITestInvocationListener listener,
+ List<FailureDescription> runFailures)
+ throws IOException, InterruptedException {
+
+ Path workspaceDirectory = resolveWorkspacePath();
+
+ Collection<String> testTargets = listTestTargets(workspaceDirectory);
+ if (testTargets.isEmpty()) {
+ throw new AbortRunException(
+ "No targets found, aborting",
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+
+ Path bepFile = createTemporaryFile("BEP_output");
+
+ Process bazelTestProcess =
+ startTests(testInfo, listener, testTargets, workspaceDirectory, bepFile);
+
+ try (BepFileTailer tailer = BepFileTailer.create(bepFile)) {
+ bazelTestProcess.onExit().thenRun(() -> tailer.stop());
+ reportTestResults(listener, testInfo, runFailures, tailer);
+ }
+
+ // Note that if Bazel exits without writing the 'last' BEP message marker we won't get to
+ // here since the above reporting code throws.
+ waitForProcess(bazelTestProcess, RUN_TESTS);
+ }
+
+ void reportTestResults(
+ ITestInvocationListener listener,
+ TestInformation testInfo,
+ List<FailureDescription> runFailures,
+ BepFileTailer tailer)
+ throws InterruptedException, IOException {
+
+ try (CloseableTraceScope ignored = new CloseableTraceScope("reportTestResults")) {
+ reportTestResultsNoTrace(listener, testInfo, runFailures, tailer);
+ }
+ }
+
+ void reportTestResultsNoTrace(
+ ITestInvocationListener listener,
+ TestInformation testInfo,
+ List<FailureDescription> runFailures,
+ BepFileTailer tailer)
+ throws InterruptedException, IOException {
+
+ ProtoResultParser resultParser =
+ new ProtoResultParser(listener, testInfo.getContext(), false, "tf-test-process-");
+
+ BuildEventStreamProtos.BuildEvent event;
+ while ((event = tailer.nextEvent()) != null) {
+ if (event.getLastMessage()) {
+ return;
+ }
+
+ if (!event.hasTestResult()) {
+ continue;
+ }
+
+ if (!mReportCachedTestResults && isTestResultCached(event.getTestResult())) {
+ continue;
+ }
+
+ try {
+ reportEventsInTestOutputsArchive(event.getTestResult(), resultParser);
+ } catch (IOException | InterruptedException | URISyntaxException e) {
+ runFailures.add(
+ throwableToInfraFailureDescription(e)
+ .setErrorIdentifier(TestErrorIdentifier.OUTPUT_PARSER_ERROR));
+ }
+ }
+
+ throw new AbortRunException(
+ "Unexpectedly hit end of BEP file without receiving last message",
+ FailureStatus.INFRA_FAILURE,
+ TestErrorIdentifier.OUTPUT_PARSER_ERROR);
+ }
+
+ private static boolean isTestResultCached(BuildEventStreamProtos.TestResult result) {
+ return result.getCachedLocally() || result.getExecutionInfo().getCachedRemotely();
+ }
+
+ private ProcessBuilder createBazelCommand(Path workspaceDirectory, String tmpDirPrefix)
+ throws IOException {
+
+ Path javaTmpDir = createTemporaryDirectory("%s-java-tmp-out".formatted(tmpDirPrefix));
+ Path bazelTmpDir = createTemporaryDirectory("%s-bazel-tmp-out".formatted(tmpDirPrefix));
+
+ List<String> command = new ArrayList<>();
+
+ command.add(workspaceDirectory.resolve("bazel.sh").toAbsolutePath().toString());
+ command.add(
+ "--host_jvm_args=-Djava.io.tmpdir=%s"
+ .formatted(javaTmpDir.toAbsolutePath().toString()));
+ command.add("--output_user_root=%s".formatted(bazelTmpDir.toAbsolutePath().toString()));
+ command.add("--max_idle_secs=%d".formatted(mBazelMaxIdleTimeout.toSeconds()));
+
+ ProcessBuilder builder = new ProcessBuilder(command);
+
+ builder.directory(workspaceDirectory.toFile());
+
+ return builder;
+ }
+
+ private Collection<String> listTestTargets(Path workspaceDirectory)
+ throws IOException, InterruptedException {
+
+ try (CloseableTraceScope ignored = new CloseableTraceScope("listTestTargets")) {
+ return listTestTargetsNoTrace(workspaceDirectory);
+ }
+ }
+
+ private Collection<String> listTestTargetsNoTrace(Path workspaceDirectory)
+ throws IOException, InterruptedException {
+
+ // We need to query all tests targets first in a separate Bazel query call since 'cquery
+ // tests(...)' doesn't work in the Atest Bazel workspace.
+ List<String> allTestTargets = queryAllTestTargets(workspaceDirectory);
+ CLog.i("Found %d test targets in workspace", allTestTargets.size());
+
+ Map<String, String> moduleToTarget =
+ queryModulesToTestTargets(workspaceDirectory, allTestTargets);
+
+ Set<String> moduleExcludes = groupTargetsByType(mExcludeTargets).get(FilterType.MODULE);
+ Set<String> moduleIncludes = groupTargetsByType(mIncludeTargets).get(FilterType.MODULE);
+
+ if (!moduleIncludes.isEmpty() && !moduleExcludes.isEmpty()) {
+ throw new AbortRunException(
+ "Invalid options: cannot set both module-level include filters and module-level"
+ + " exclude filters.",
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+
+ if (!moduleIncludes.isEmpty()) {
+ return Maps.filterKeys(moduleToTarget, s -> moduleIncludes.contains(s)).values();
+ }
+
+ if (!moduleExcludes.isEmpty()) {
+ return Maps.filterKeys(moduleToTarget, s -> !moduleExcludes.contains(s)).values();
+ }
+
+ return moduleToTarget.values();
+ }
+
+ private List<String> queryAllTestTargets(Path workspaceDirectory)
+ throws IOException, InterruptedException {
+
+ Path logFile = createLogFile("%s-log".formatted(QUERY_ALL_TARGETS));
+
+ ProcessBuilder builder = createBazelCommand(workspaceDirectory, QUERY_ALL_TARGETS);
+
+ builder.command().add("query");
+ builder.command().add("tests(...)");
+ builder.redirectError(Redirect.appendTo(logFile.toFile()));
+
+ Process queryProcess = startProcess(QUERY_ALL_TARGETS, builder, BAZEL_QUERY_TIMEOUT);
+ List<String> queryLines = readProcessLines(queryProcess);
+
+ waitForProcess(queryProcess, QUERY_ALL_TARGETS);
+
+ return queryLines;
+ }
+
+ private Map<String, String> queryModulesToTestTargets(
+ Path workspaceDirectory, List<String> allTestTargets)
+ throws IOException, InterruptedException {
+
+ Path cqueryTestTargetsFile = createTemporaryFile("test_targets");
+ Files.write(cqueryTestTargetsFile, String.join("+", allTestTargets).getBytes());
+
+ Path cqueryFormatFile = createTemporaryFile("format_module_name_to_test_target");
+ try (FileOutputStream os = new FileOutputStream(cqueryFormatFile.toFile())) {
+ Resources.copy(
+ Resources.getResource("config/format_module_name_to_test_target.cquery"), os);
+ }
+
+ Path logFile = createLogFile("%s-log".formatted(QUERY_MAP_MODULES_TO_TARGETS));
+ ProcessBuilder builder =
+ createBazelCommand(workspaceDirectory, QUERY_MAP_MODULES_TO_TARGETS);
+
+ builder.command().add("cquery");
+ builder.command().add("--query_file=%s".formatted(cqueryTestTargetsFile.toAbsolutePath()));
+ builder.command().add("--output=starlark");
+ builder.command().add("--starlark:file=%s".formatted(cqueryFormatFile.toAbsolutePath()));
+ builder.redirectError(Redirect.appendTo(logFile.toFile()));
+
+ Process process = startProcess(QUERY_MAP_MODULES_TO_TARGETS, builder, BAZEL_QUERY_TIMEOUT);
+
+ List<String> queryLines = readProcessLines(process);
+
+ waitForProcess(process, QUERY_MAP_MODULES_TO_TARGETS);
+
+ return parseModulesToTargets(queryLines);
+ }
+
+ private List<String> readProcessLines(Process process) throws IOException {
+ return CharStreams.readLines(process.inputReader());
+ }
+
+ private Map<String, String> parseModulesToTargets(Collection<String> lines) {
+ Map<String, String> moduleToTarget = new HashMap<>();
+ StringBuilder errorMessage = new StringBuilder();
+ for (String line : lines) {
+ // Query output format is: "module_name //bazel/test:target" if a test target is a
+ // TF test, "" otherwise, so only count proper targets.
+ if (line.isEmpty()) {
+ continue;
+ }
+
+ String[] splitLine = line.split(" ");
+
+ if (splitLine.length != 2) {
+ throw new AbortRunException(
+ String.format(
+ "Unrecognized output from %s command: %s",
+ QUERY_MAP_MODULES_TO_TARGETS, line),
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+
+ String moduleName = splitLine[0];
+ String targetName = splitLine[1];
+
+ String duplicateEntry;
+ if ((duplicateEntry = moduleToTarget.get(moduleName)) != null) {
+ errorMessage.append(
+ "Multiple test targets found for module %s: %s, %s\n"
+ .formatted(moduleName, duplicateEntry, targetName));
+ }
+
+ moduleToTarget.put(moduleName, targetName);
+ }
+
+ if (errorMessage.length() != 0) {
+ throw new AbortRunException(
+ errorMessage.toString(),
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+ return ImmutableMap.copyOf(moduleToTarget);
+ }
+
+ private Process startTests(
+ TestInformation testInfo,
+ ITestInvocationListener listener,
+ Collection<String> testTargets,
+ Path workspaceDirectory,
+ Path bepFile)
+ throws IOException {
+
+ Path logFile = createLogFile("%s-log".formatted(RUN_TESTS));
+
+ ProcessBuilder builder = createBazelCommand(workspaceDirectory, RUN_TESTS);
+
+ builder.command().addAll(mBazelStartupOptions);
+ builder.command().add("test");
+ builder.command().addAll(testTargets);
+
+ builder.command().add("--build_event_binary_file=%s".formatted(bepFile.toAbsolutePath()));
+
+ builder.command().addAll(mBazelTestExtraArgs);
+
+ Set<String> testFilters = groupTargetsByType(mExcludeTargets).get(FilterType.TEST_CASE);
+ for (String test : testFilters) {
+ builder.command().add(GLOBAL_EXCLUDE_FILTER_TEMPLATE.formatted(test));
+ }
+ builder.redirectErrorStream(true);
+ builder.redirectOutput(Redirect.appendTo(logFile.toFile()));
+
+ return startProcess(RUN_TESTS, builder, mBazelCommandTimeout);
+ }
+
+ private static SetMultimap<FilterType, String> groupTargetsByType(List<String> targets) {
+ Map<FilterType, List<String>> groupedMap =
+ targets.stream()
+ .collect(
+ Collectors.groupingBy(
+ s ->
+ s.contains(" ")
+ ? FilterType.TEST_CASE
+ : FilterType.MODULE));
+
+ SetMultimap<FilterType, String> groupedMultiMap = HashMultimap.create();
+ for (Entry<FilterType, List<String>> entry : groupedMap.entrySet()) {
+ groupedMultiMap.putAll(entry.getKey(), entry.getValue());
+ }
+
+ return groupedMultiMap;
+ }
+
+ private Process startAndWaitForProcess(
+ String processTag, ProcessBuilder builder, Duration processTimeout)
+ throws InterruptedException, IOException {
+
+ Process process = startProcess(processTag, builder, processTimeout);
+ waitForProcess(process, processTag);
+ return process;
+ }
+
+ private Process startProcess(String processTag, ProcessBuilder builder, Duration timeout)
+ throws IOException {
+
+ CLog.i("Running command for %s: %s", processTag, new ProcessDebugString(builder));
+ String traceTag = "Process:" + processTag;
+ Process process = mProcessStarter.start(processTag, builder);
+
+ // We wait for the process in a separate thread so that we can trace its execution time.
+ // Another alternative could be to start/stop tracing with explicit calls but these would
+ // have to be done on the same thread as required by the tracing facility.
+ mExecutor.submit(
+ () -> {
+ try (CloseableTraceScope unused = new CloseableTraceScope(traceTag)) {
+ if (waitForProcessUninterruptibly(process, timeout)) {
+ return;
+ }
+
+ CLog.e("%s command timed out and is being destroyed", processTag);
+ process.destroy();
+
+ // Give the process a grace period to properly shut down before forcibly
+ // terminating it. We _could_ deduct this time from the total timeout but
+ // it's overkill.
+ if (!waitForProcessUninterruptibly(process, Duration.ofSeconds(5))) {
+ CLog.w(
+ "%s command did not terminate normally after the grace period"
+ + " and is being forcibly destroyed",
+ processTag);
+ process.destroyForcibly();
+ }
+
+ // We wait for the process as it may take it some time to terminate and
+ // otherwise skew the trace results.
+ waitForProcessUninterruptibly(process);
+ CLog.i("%s command timed out and was destroyed", processTag);
+ }
+ });
+
+ return process;
+ }
+
+ private void waitForProcess(Process process, String processTag) throws InterruptedException {
+
+ if (process.waitFor() == 0) {
+ return;
+ }
+
+ throw new AbortRunException(
+ String.format("%s command failed. Exit code: %d", processTag, process.exitValue()),
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+
+ private void reportEventsInTestOutputsArchive(
+ BuildEventStreamProtos.TestResult result, ProtoResultParser resultParser)
+ throws IOException, InvalidProtocolBufferException, InterruptedException,
+ URISyntaxException {
+
+ try (CloseableTraceScope ignored =
+ new CloseableTraceScope("reportEventsInTestOutputsArchive")) {
+ reportEventsInTestOutputsArchiveNoTrace(result, resultParser);
+ }
+ }
+
+ private void reportEventsInTestOutputsArchiveNoTrace(
+ BuildEventStreamProtos.TestResult result, ProtoResultParser resultParser)
+ throws IOException, InvalidProtocolBufferException, InterruptedException,
+ URISyntaxException {
+
+ BuildEventStreamProtos.File outputsFile =
+ result.getTestActionOutputList().stream()
+ .filter(file -> file.getName().equals(TEST_UNDECLARED_OUTPUTS_ARCHIVE_NAME))
+ .findAny()
+ .orElseThrow(() -> new IOException("No test output archive found"));
+
+ URI uri = new URI(outputsFile.getUri());
+
+ File zipFile = new File(uri.getPath());
+ Path outputFilesDir = Files.createTempDirectory(mRunTemporaryDirectory, "output_zip-");
+
+ try {
+ ZipUtil.extractZip(new ZipFile(zipFile), outputFilesDir.toFile());
+
+ File protoResult = outputFilesDir.resolve(PROTO_RESULTS_FILE_NAME).toFile();
+ TestRecord record = TestRecordProtoUtil.readFromFile(protoResult);
+
+ TestRecord.Builder recordBuilder = record.toBuilder();
+ //recursivelyUpdateArtifactsRootPath(recordBuilder, outputFilesDir);
+ //moveRootRecordArtifactsToFirstChild(recordBuilder);
+ resultParser.processFinalizedProto(recordBuilder.build());
+ } finally {
+ MoreFiles.deleteRecursively(outputFilesDir);
+ }
+ }
+
+ /*private void recursivelyUpdateArtifactsRootPath(TestRecord.Builder recordBuilder, Path newRoot)
+ throws InvalidProtocolBufferException {
+
+ Map<String, Any> updatedMap = new HashMap<>();
+ for (Entry<String, Any> entry : recordBuilder.getArtifactsMap().entrySet()) {
+ LogFileInfo info = entry.getValue().unpack(LogFileInfo.class);
+
+ Path relativePath = findRelativeArtifactPath(Paths.get(info.getPath()));
+
+ LogFileInfo updatedInfo =
+ info.toBuilder()
+ .setPath(newRoot.resolve(relativePath).toAbsolutePath().toString())
+ .build();
+ updatedMap.put(entry.getKey(), Any.pack(updatedInfo));
+ }
+
+ recordBuilder.putAllArtifacts(updatedMap);
+
+ for (ChildReference.Builder childBuilder : recordBuilder.getChildrenBuilderList()) {
+ recursivelyUpdateArtifactsRootPath(childBuilder.getInlineTestRecordBuilder(), newRoot);
+ }
+ }*/
+
+ private Path findRelativeArtifactPath(Path originalPath) {
+ // The log files are stored under
+ // ${EXTRACTED_UNDECLARED_OUTPUTS}/stub/-1/stub/inv_xxx/inv_xxx/logfile so the new path is
+ // found by trimming down the original path until it starts with "stub/-1/stub" and
+ // appending that to our extracted directory.
+ // TODO(b/251279690) Create a directory within undeclared outputs which we can more
+ // reliably look for to calculate this relative path.
+ Path delimiter = Paths.get("stub/-1/stub");
+
+ Path relativePath = originalPath;
+ while (!relativePath.startsWith(delimiter)
+ && relativePath.getNameCount() > delimiter.getNameCount()) {
+ relativePath = relativePath.subpath(1, relativePath.getNameCount());
+ }
+
+ if (!relativePath.startsWith(delimiter)) {
+ throw new IllegalArgumentException(
+ String.format(
+ "Artifact path '%s' does not contain delimiter '%s' and therefore"
+ + " cannot be found",
+ originalPath, delimiter));
+ }
+
+ return relativePath;
+ }
+
+ /*private void moveRootRecordArtifactsToFirstChild(TestRecord.Builder recordBuilder) {
+ if (recordBuilder.getChildrenCount() == 0) {
+ return;
+ }
+
+ TestRecord.Builder childTestRecordBuilder =
+ recordBuilder.getChildrenBuilder(0).getInlineTestRecordBuilder();
+ for (Entry<String, Any> entry : recordBuilder.getArtifactsMap().entrySet()) {
+ childTestRecordBuilder.putArtifacts(entry.getKey(), entry.getValue());
+ }
+
+ recordBuilder.clearArtifacts();
+ }*/
+
+ private void reportRunFailures(
+ List<FailureDescription> runFailures, ITestInvocationListener listener) {
+
+ if (runFailures.isEmpty()) {
+ return;
+ }
+
+ for (FailureDescription runFailure : runFailures) {
+ CLog.e(runFailure.getErrorMessage());
+ }
+
+ FailureDescription reportedFailure = runFailures.get(0);
+ listener.testRunFailed(
+ FailureDescription.create(
+ String.format(
+ "The run had %d failures, the first of which was: %s\n"
+ + "See the subprocess-host_log for more details.",
+ runFailures.size(), reportedFailure.getErrorMessage()),
+ reportedFailure.getFailureStatus())
+ .setErrorIdentifier(reportedFailure.getErrorIdentifier()));
+ }
+
+ private Path resolveWorkspacePath() {
+ String suiteRootPath = mProperties.getProperty(mSuiteRootDirEnvVar);
+ if (suiteRootPath == null || suiteRootPath.isEmpty()) {
+ throw new AbortRunException(
+ "Bazel Test Suite root directory not set, aborting",
+ FailureStatus.DEPENDENCY_ISSUE,
+ TestErrorIdentifier.TEST_ABORTED);
+ }
+
+ // TODO(b/233885171): Remove resolve once workspace archive is updated.
+ return Paths.get(suiteRootPath).resolve("android-bazel-suite/out/atest_bazel_workspace");
+ }
+
+ private void addTestLogs(ITestLogger logger) {
+ for (Path logFile : mLogFiles) {
+ try (FileInputStreamSource source = new FileInputStreamSource(logFile.toFile(), true)) {
+ logger.testLog(logFile.toFile().getName(), LogDataType.TEXT, source);
+ }
+ }
+ }
+
+ private void cleanup() {
+ try {
+ MoreFiles.deleteRecursively(mRunTemporaryDirectory);
+ } catch (IOException e) {
+ CLog.e(e);
+ }
+ }
+
+ interface ProcessStarter {
+ Process start(String processTag, ProcessBuilder builder) throws IOException;
+ }
+
+ private static final class DefaultProcessStarter implements ProcessStarter {
+ @Override
+ public Process start(String processTag, ProcessBuilder builder) throws IOException {
+ return builder.start();
+ }
+ }
+
+ private Path createTemporaryDirectory(String prefix) throws IOException {
+ return Files.createTempDirectory(mRunTemporaryDirectory, prefix);
+ }
+
+ private Path createTemporaryFile(String prefix) throws IOException {
+ return Files.createTempFile(mRunTemporaryDirectory, prefix, "");
+ }
+
+ private Path createLogFile(String name) throws IOException {
+ Path logFile = Files.createTempFile(mRunTemporaryDirectory, name, ".txt");
+
+ mLogFiles.add(logFile);
+
+ return logFile;
+ }
+
+ private static FailureDescription throwableToTestFailureDescription(Throwable t) {
+ return FailureDescription.create(t.getMessage())
+ .setCause(t)
+ .setFailureStatus(FailureStatus.TEST_FAILURE);
+ }
+
+ private static FailureDescription throwableToInfraFailureDescription(Exception e) {
+ return FailureDescription.create(e.getMessage())
+ .setCause(e)
+ .setFailureStatus(FailureStatus.INFRA_FAILURE);
+ }
+
+ private static boolean waitForProcessUninterruptibly(Process process, Duration timeout) {
+ long remainingNanos = timeout.toNanos();
+ long end = System.nanoTime() + remainingNanos;
+ boolean interrupted = false;
+
+ try {
+ while (true) {
+ try {
+ return process.waitFor(remainingNanos, TimeUnit.NANOSECONDS);
+ } catch (InterruptedException e) {
+ interrupted = true;
+ remainingNanos = end - System.nanoTime();
+ }
+ }
+ } finally {
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ private static int waitForProcessUninterruptibly(Process process) {
+ boolean interrupted = false;
+
+ try {
+ while (true) {
+ try {
+ return process.waitFor();
+ } catch (InterruptedException e) {
+ interrupted = true;
+ }
+ }
+ } finally {
+ if (interrupted) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ private static final class AbortRunException extends RuntimeException {
+ private final FailureDescription mFailureDescription;
+
+ public AbortRunException(
+ String errorMessage, FailureStatus failureStatus, ErrorIdentifier errorIdentifier) {
+ this(
+ FailureDescription.create(errorMessage, failureStatus)
+ .setErrorIdentifier(errorIdentifier));
+ }
+
+ public AbortRunException(FailureDescription failureDescription) {
+ super(failureDescription.getErrorMessage());
+ mFailureDescription = failureDescription;
+ }
+
+ public FailureDescription getFailureDescription() {
+ return mFailureDescription;
+ }
+ }
+
+ private static final class ProcessDebugString {
+
+ private final ProcessBuilder mBuilder;
+
+ ProcessDebugString(ProcessBuilder builder) {
+ mBuilder = builder;
+ }
+
+ public String toString() {
+ return String.join(" ", mBuilder.command());
+ }
+ }
+}
diff --git a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java
new file mode 100644
index 0000000..96c12fc
--- /dev/null
+++ b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.tradefed.testtype.bazel;
+
+import com.google.devtools.build.lib.buildeventstream.BuildEventStreamProtos.BuildEvent;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.BufferedInputStream;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.time.Duration;
+
+final class BepFileTailer implements AutoCloseable {
+ private static final Duration BEP_PARSE_SLEEP_TIME = Duration.ofMillis(100);
+
+ private final BufferedInputStream mIn;
+ private volatile boolean mStop;
+
+ static BepFileTailer create(Path bepFile) throws FileNotFoundException {
+ return new BepFileTailer(new BufferedInputStream(new FileInputStream(bepFile.toFile())));
+ }
+
+ private BepFileTailer(BufferedInputStream In) {
+ mIn = In;
+ mStop = false;
+ }
+
+ public BuildEvent nextEvent() throws InterruptedException, IOException {
+ while (true) {
+ boolean stop = mStop;
+
+ // Mark the current position in the input stream.
+ mIn.mark(Integer.MAX_VALUE);
+
+ try {
+ BuildEvent event = BuildEvent.parseDelimitedFrom(mIn);
+
+ // When event is null and we hit EOF, wait for an event to be written and try again.
+ if (event != null) {
+ return event;
+ }
+ if (stop) {
+ return null;
+ }
+ } catch (InvalidProtocolBufferException e) {
+ if (stop) {
+ throw e;
+ }
+ // Partial read. Restore the old position in the input stream.
+ mIn.reset();
+ }
+ Thread.sleep(BEP_PARSE_SLEEP_TIME.toMillis());
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ mIn.close();
+ }
+
+ public void stop() {
+ mStop = true;
+ }
+}
diff --git a/atest/bazel/runner/src/main/protobuf/build_event_stream.proto b/atest/bazel/runner/src/main/protobuf/build_event_stream.proto
new file mode 100644
index 0000000..9866414
--- /dev/null
+++ b/atest/bazel/runner/src/main/protobuf/build_event_stream.proto
@@ -0,0 +1,1178 @@
+// Copyright 2016 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package build_event_stream;
+
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "src/main/protobuf/command_line.proto";
+import "src/main/protobuf/failure_details.proto";
+import "src/main/protobuf/invocation_policy.proto";
+
+option java_package = "com.google.devtools.build.lib.buildeventstream";
+option java_outer_classname = "BuildEventStreamProtos";
+
+// Identifier for a build event. It is deliberately structured to also provide
+// information about which build target etc the event is related to.
+//
+// Events are chained via the event id as follows: each event has an id and a
+// set of ids of children events such that apart from the initial event each
+// event has an id that is mentioned as child id in an earlier event and a build
+// invocation is complete if and only if all direct and indirect children of the
+// initial event have been posted.
+message BuildEventId {
+ // Generic identifier for a build event. This is the default type of
+ // BuildEventId, but should not be used outside testing; nevertheless,
+ // tools should handle build events with this kind of id gracefully.
+ message UnknownBuildEventId {
+ string details = 1;
+ }
+
+ // Identifier of an event reporting progress. Those events are also used to
+ // chain in events that come early.
+ message ProgressId {
+ // Unique identifier. No assumption should be made about how the ids are
+ // assigned; the only meaningful operation on this field is test for
+ // equality.
+ int32 opaque_count = 1;
+ }
+
+ // Identifier of an event indicating the beginning of a build; this will
+ // normally be the first event.
+ message BuildStartedId {}
+
+ // Identifier on an event indicating the original commandline received by
+ // the bazel server.
+ message UnstructuredCommandLineId {}
+
+ // Identifier on an event describing the commandline received by Bazel.
+ message StructuredCommandLineId {
+ // A title for this command line value, as there may be multiple.
+ // For example, a single invocation may wish to report both the literal and
+ // canonical command lines, and this label would be used to differentiate
+ // between both versions.
+ string command_line_label = 1;
+ }
+
+ // Identifier of an event indicating the workspace status.
+ message WorkspaceStatusId {}
+
+ // Identifier on an event reporting on the options included in the command
+ // line, both explicitly and implicitly.
+ message OptionsParsedId {}
+
+ // Identifier of an event reporting that an external resource was fetched
+ // from.
+ message FetchId {
+ // The external resource that was fetched from.
+ string url = 1;
+ }
+
+ // Identifier of an event indicating that a target pattern has been expanded
+ // further.
+ // Messages of this shape are also used to describe parts of a pattern that
+ // have been skipped for some reason, if the actual expansion was still
+ // carried out (e.g., if keep_going is set). In this case, the
+ // pattern_skipped choice in the id field is to be made.
+ message PatternExpandedId {
+ repeated string pattern = 1;
+ }
+
+ message WorkspaceConfigId {}
+
+ message BuildMetadataId {}
+
+ // Identifier of an event indicating that a target has been expanded by
+ // identifying for which configurations it should be build.
+ message TargetConfiguredId {
+ string label = 1;
+
+ // If empty, the id refers to the expansion of the target. If not-empty,
+ // the id refers to the expansion of an aspect applied to the (already
+ // expanded) target.
+ //
+ // For example, when building an apple_binary that depends on proto_library
+ // "//:foo_proto", there will be two TargetConfigured events for
+ // "//:foo_proto":
+ //
+ // 1. An event with an empty aspect, corresponding to actions producing
+ // language-agnostic outputs from the proto_library; and
+ // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C
+ // code generation.
+ string aspect = 2;
+ }
+
+ // Identifier of an event introducing a named set of files (usually artifacts)
+ // to be referred to in later messages.
+ message NamedSetOfFilesId {
+ // Identifier of the file set; this is an opaque string valid only for the
+ // particular instance of the event stream.
+ string id = 1;
+ }
+
+ // Identifier of an event introducing a configuration.
+ message ConfigurationId {
+ // Identifier of the configuration; users of the protocol should not make
+ // any assumptions about it having any structure, or equality of the
+ // identifier between different streams.
+ string id = 1;
+ }
+
+ // Identifier of an event indicating that a target was built completely; this
+ // does not include running the test if the target is a test target.
+ message TargetCompletedId {
+ string label = 1;
+
+ // The configuration for which the target was built.
+ ConfigurationId configuration = 3;
+
+ // If empty, the id refers to the completion of the target. If not-empty,
+ // the id refers to the completion of an aspect applied to the (already
+ // completed) target.
+ //
+ // For example, when building an apple_binary that depends on proto_library
+ // "//:foo_proto", there will be two TargetCompleted events for
+ // "//:foo_proto":
+ //
+ // 1. An event with an empty aspect, corresponding to actions producing
+ // language-agnostic outputs from the proto_library; and
+ // 2. An event with aspect "ObjcProtoAspect", corresponding to Objective-C
+ // code generation.
+ string aspect = 2;
+ }
+
+ // Identifier of an event reporting that an action was completed (not all
+ // actions are reported, only the ones that can be considered important;
+ // this includes all failed actions).
+ message ActionCompletedId {
+ string primary_output = 1;
+ // Optional, the label of the owner of the action, for reference.
+ string label = 2;
+ // Optional, the id of the configuration of the action owner.
+ ConfigurationId configuration = 3;
+ }
+
+ // Identifier of an event reporting an event associated with an unconfigured
+ // label. Usually, this indicates a failure due to a missing input file. In
+ // any case, it will report some form of error (i.e., the payload will be an
+ // Aborted event); there are no regular events using this identifier. The
+ // purpose of those events is to serve as the root cause of a failed target.
+ message UnconfiguredLabelId {
+ string label = 1;
+ }
+
+ // Identifier of an event reporting an event associated with a configured
+ // label, usually a visibility error. In any case, an event with such an
+ // id will always report some form of error (i.e., the payload will be an
+ // Aborted event); there are no regular events using this identifier.
+ message ConfiguredLabelId {
+ string label = 1;
+ ConfigurationId configuration = 2;
+ }
+
+ // Identifier of an event reporting on an individual test run. The label
+ // identifies the test that is reported about, the remaining fields are
+ // in such a way as to uniquely identify the action within a build. In fact,
+ // attempts for the same test, run, shard triple are counted sequentially,
+ // starting with 1.
+ message TestResultId {
+ string label = 1;
+ ConfigurationId configuration = 5;
+ int32 run = 2;
+ int32 shard = 3;
+ int32 attempt = 4;
+ }
+
+ // Identifier of an event reporting the summary of a test.
+ message TestSummaryId {
+ string label = 1;
+ ConfigurationId configuration = 2;
+ }
+
+ // Identifier of an event reporting the summary of a target.
+ message TargetSummaryId {
+ string label = 1;
+ ConfigurationId configuration = 2;
+ }
+
+ // Identifier of the BuildFinished event, indicating the end of a build.
+ message BuildFinishedId {}
+
+ // Identifier of an event providing additional logs/statistics after
+ // completion of the build.
+ message BuildToolLogsId {}
+
+ // Identifier of an event providing build metrics after completion
+ // of the build.
+ message BuildMetricsId {}
+
+ // Identifier of an event providing convenience symlinks information.
+ message ConvenienceSymlinksIdentifiedId {}
+
+ oneof id {
+ UnknownBuildEventId unknown = 1;
+ ProgressId progress = 2;
+ BuildStartedId started = 3;
+ UnstructuredCommandLineId unstructured_command_line = 11;
+ StructuredCommandLineId structured_command_line = 18;
+ WorkspaceStatusId workspace_status = 14;
+ OptionsParsedId options_parsed = 12;
+ FetchId fetch = 17;
+ ConfigurationId configuration = 15;
+ TargetConfiguredId target_configured = 16;
+ PatternExpandedId pattern = 4;
+ PatternExpandedId pattern_skipped = 10;
+ NamedSetOfFilesId named_set = 13;
+ TargetCompletedId target_completed = 5;
+ ActionCompletedId action_completed = 6;
+ UnconfiguredLabelId unconfigured_label = 19;
+ ConfiguredLabelId configured_label = 21;
+ TestResultId test_result = 8;
+ TestSummaryId test_summary = 7;
+ TargetSummaryId target_summary = 26;
+ BuildFinishedId build_finished = 9;
+ BuildToolLogsId build_tool_logs = 20;
+ BuildMetricsId build_metrics = 22;
+ WorkspaceConfigId workspace = 23;
+ BuildMetadataId build_metadata = 24;
+ ConvenienceSymlinksIdentifiedId convenience_symlinks_identified = 25;
+ }
+}
+
+// Payload of an event summarizing the progress of the build so far. Those
+// events are also used to be parents of events where the more logical parent
+// event cannot be posted yet as the needed information is not yet complete.
+message Progress {
+ // The next chunk of stdout that bazel produced since the last progress event
+ // or the beginning of the build.
+ string stdout = 1;
+
+ // The next chunk of stderr that bazel produced since the last progress event
+ // or the beginning of the build.
+ string stderr = 2;
+}
+
+// Payload of an event indicating that an expected event will not come, as
+// the build is aborted prematurely for some reason.
+message Aborted {
+ enum AbortReason {
+ UNKNOWN = 0;
+
+ // The user requested the build to be aborted (e.g., by hitting Ctl-C).
+ USER_INTERRUPTED = 1;
+
+ // The user requested that no analysis be performed.
+ NO_ANALYZE = 8;
+
+ // The user requested that no build be carried out.
+ NO_BUILD = 9;
+
+ // The build or target was aborted as a timeout was exceeded.
+ TIME_OUT = 2;
+
+ // The build or target was aborted as some remote environment (e.g., for
+ // remote execution of actions) was not available in the expected way.
+ REMOTE_ENVIRONMENT_FAILURE = 3;
+
+ // Failure due to reasons entirely internal to the build tool, i.e. an
+ // unexpected crash due to programmer error.
+ INTERNAL = 4;
+
+ // A Failure occurred in the loading phase of a target.
+ LOADING_FAILURE = 5;
+
+ // A Failure occurred in the analysis phase of a target.
+ ANALYSIS_FAILURE = 6;
+
+ // Target build was skipped (e.g. due to incompatible CPU constraints).
+ SKIPPED = 7;
+
+ // Build incomplete due to an earlier build failure (e.g. --keep_going was
+ // set to false causing the build be ended upon failure).
+ INCOMPLETE = 10;
+
+ // The build tool ran out of memory and crashed.
+ OUT_OF_MEMORY = 11;
+ }
+ AbortReason reason = 1;
+
+ // A human readable description with more details about there reason, where
+ // available and useful.
+ string description = 2;
+}
+
+// Payload of an event indicating the beginning of a new build. Usually, events
+// of those type start a new build-event stream. The target pattern requested
+// to be build is contained in one of the announced child events; it is an
+// invariant that precisely one of the announced child events has a non-empty
+// target pattern.
+message BuildStarted {
+ string uuid = 1;
+
+ // Start of the build in ms since the epoch.
+ //
+ // Deprecated, use `start_time` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 start_time_millis = 2 [deprecated = true];
+
+ // Start of the build.
+ google.protobuf.Timestamp start_time = 9;
+
+ // Version of the build tool that is running.
+ string build_tool_version = 3;
+
+ // A human-readable description of all the non-default option settings
+ string options_description = 4;
+
+ // The name of the command that the user invoked.
+ string command = 5;
+
+ // The working directory from which the build tool was invoked.
+ string working_directory = 6;
+
+ // The directory of the workspace.
+ string workspace_directory = 7;
+
+ // The process ID of the Bazel server.
+ int64 server_pid = 8;
+}
+
+// Configuration related to the blaze workspace and output tree.
+message WorkspaceConfig {
+ // The root of the local blaze exec root. All output files live underneath
+ // this at "blaze-out/".
+ string local_exec_root = 1;
+}
+
+// Payload of an event reporting the command-line of the invocation as
+// originally received by the server. Note that this is not the command-line
+// given by the user, as the client adds information about the invocation,
+// like name and relevant entries of rc-files and client environment variables.
+// However, it does contain enough information to reproduce the build
+// invocation.
+message UnstructuredCommandLine {
+ repeated string args = 1;
+}
+
+// Payload of an event reporting on the parsed options, grouped in various ways.
+message OptionsParsed {
+ repeated string startup_options = 1;
+ repeated string explicit_startup_options = 2;
+ repeated string cmd_line = 3;
+ repeated string explicit_cmd_line = 4;
+ blaze.invocation_policy.InvocationPolicy invocation_policy = 5;
+ string tool_tag = 6;
+}
+
+// Payload of an event indicating that an external resource was fetched. This
+// event will only occur in streams where an actual fetch happened, not in ones
+// where a cached copy of the entity to be fetched was used.
+message Fetch {
+ bool success = 1;
+}
+
+// Payload of an event reporting the workspace status. Key-value pairs can be
+// provided by specifying the workspace_status_command to an executable that
+// returns one key-value pair per line of output (key and value separated by a
+// space).
+message WorkspaceStatus {
+ message Item {
+ string key = 1;
+ string value = 2;
+ }
+ repeated Item item = 1;
+}
+
+// Payload of an event reporting custom key-value metadata associated with the
+// build.
+message BuildMetadata {
+ // Custom metadata for the build.
+ map<string, string> metadata = 1;
+}
+
+// Payload of an event reporting details of a given configuration.
+message Configuration {
+ string mnemonic = 1;
+ string platform_name = 2;
+ string cpu = 3;
+ map<string, string> make_variable = 4;
+ // Whether this configuration is used for building tools.
+ bool is_tool = 5;
+}
+
+// Payload of the event indicating the expansion of a target pattern.
+// The main information is in the chaining part: the id will contain the
+// target pattern that was expanded and the children id will contain the
+// target or target pattern it was expanded to.
+message PatternExpanded {
+ // Represents a test_suite target and the tests that it expanded to. Nested
+ // test suites are recursively expanded. The test labels only contain the
+ // final test targets, not any nested suites.
+ message TestSuiteExpansion {
+ // The label of the test_suite rule.
+ string suite_label = 1;
+ // Labels of the test targets included in the suite. Includes all tests in
+ // the suite regardless of any filters or negative patterns which may result
+ // in the test not actually being run.
+ repeated string test_labels = 2;
+ }
+
+ // All test suites requested via top-level target patterns. Does not include
+ // test suites whose label matched a negative pattern.
+ repeated TestSuiteExpansion test_suite_expansions = 1;
+}
+
+// Enumeration type characterizing the size of a test, as specified by the
+// test rule.
+enum TestSize {
+ UNKNOWN = 0;
+ SMALL = 1;
+ MEDIUM = 2;
+ LARGE = 3;
+ ENORMOUS = 4;
+}
+
+// Payload of the event indicating that the configurations for a target have
+// been identified. As with pattern expansion the main information is in the
+// chaining part: the id will contain the target that was configured and the
+// children id will contain the configured targets it was configured to.
+message TargetConfigured {
+ // The kind of target (e.g., e.g. "cc_library rule", "source file",
+ // "generated file") where the completion is reported.
+ string target_kind = 1;
+
+ // The size of the test, if the target is a test target. Unset otherwise.
+ TestSize test_size = 2;
+
+ // List of all tags associated with this target (for all possible
+ // configurations).
+ repeated string tag = 3;
+}
+
+message File {
+ // A sequence of prefixes to apply to the file name to construct a full path.
+ // In most but not all cases, there will be 3 entries:
+ // 1. A root output directory, eg "bazel-out"
+ // 2. A configuration mnemonic, eg "k8-fastbuild"
+ // 3. An output category, eg "genfiles"
+ repeated string path_prefix = 4;
+
+ // identifier indicating the nature of the file (e.g., "stdout", "stderr")
+ string name = 1;
+
+ oneof file {
+ // A location where the contents of the file can be found. The string is
+ // encoded according to RFC2396.
+ string uri = 2;
+ // The contents of the file, if they are guaranteed to be short.
+ bytes contents = 3;
+ }
+
+ // Digest of the file, using the build tool's configured digest algorithm,
+ // hex-encoded.
+ string digest = 5;
+
+ // Length of the file in bytes.
+ int64 length = 6;
+}
+
+// Payload of a message to describe a set of files, usually build artifacts, to
+// be referred to later by their name. In this way, files that occur identically
+// as outputs of several targets have to be named only once.
+message NamedSetOfFiles {
+ // Files that belong to this named set of files.
+ repeated File files = 1;
+
+ // Other named sets whose members also belong to this set.
+ repeated BuildEventId.NamedSetOfFilesId file_sets = 2;
+}
+
+// Payload of the event indicating the completion of an action. The main purpose
+// of posting those events is to provide details on the root cause for a target
+// failing; however, consumers of the build-event protocol must not assume
+// that only failed actions are posted.
+message ActionExecuted {
+ bool success = 1;
+
+ // The mnemonic of the action that was executed
+ string type = 8;
+
+ // The exit code of the action, if it is available.
+ int32 exit_code = 2;
+
+ // Location where to find the standard output of the action
+ // (e.g., a file path).
+ File stdout = 3;
+
+ // Location where to find the standard error of the action
+ // (e.g., a file path).
+ File stderr = 4;
+
+ // Deprecated. This field is now present on ActionCompletedId.
+ string label = 5 [deprecated = true];
+
+ // Deprecated. This field is now present on ActionCompletedId.
+ BuildEventId.ConfigurationId configuration = 7 [deprecated = true];
+
+ // Primary output; only provided for successful actions.
+ File primary_output = 6;
+
+ // The command-line of the action, if the action is a command.
+ repeated string command_line = 9;
+
+ // List of paths to log files
+ repeated File action_metadata_logs = 10;
+
+ // Only populated if success = false, and sometimes not even then.
+ failure_details.FailureDetail failure_detail = 11;
+}
+
+// Collection of all output files belonging to that output group.
+message OutputGroup {
+ // Ids of fields that have been removed.
+ reserved 2;
+
+ // Name of the output group
+ string name = 1;
+
+ // List of file sets that belong to this output group as well.
+ repeated BuildEventId.NamedSetOfFilesId file_sets = 3;
+
+ // Indicates that one or more of the output group's files were not built
+ // successfully (the generating action failed).
+ bool incomplete = 4;
+}
+
+// Payload of the event indicating the completion of a target. The target is
+// specified in the id. If the target failed the root causes are provided as
+// children events.
+message TargetComplete {
+ bool success = 1;
+
+ // The kind of target (e.g., e.g. "cc_library rule", "source file",
+ // "generated file") where the completion is reported.
+ // Deprecated: use the target_kind field in TargetConfigured instead.
+ string target_kind = 5 [deprecated = true];
+
+ // The size of the test, if the target is a test target. Unset otherwise.
+ // Deprecated: use the test_size field in TargetConfigured instead.
+ TestSize test_size = 6 [deprecated = true];
+
+ // The output files are arranged by their output group. If an output file
+ // is part of multiple output groups, it appears once in each output
+ // group.
+ repeated OutputGroup output_group = 2;
+
+ // Temporarily, also report the important outputs directly. This is only to
+ // allow existing clients help transition to the deduplicated representation;
+ // new clients should not use it.
+ repeated File important_output = 4 [deprecated = true];
+
+ // Report output artifacts (referenced transitively via output_group) which
+ // emit directories instead of singleton files. These directory_output entries
+ // will never include a uri.
+ repeated File directory_output = 8;
+
+ // List of tags associated with this configured target.
+ repeated string tag = 3;
+
+ // The timeout specified for test actions under this configured target.
+ //
+ // Deprecated, use `test_timeout` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 test_timeout_seconds = 7 [deprecated = true];
+
+ // The timeout specified for test actions under this configured target.
+ google.protobuf.Duration test_timeout = 10;
+
+ // Failure information about the target, only populated if success is false,
+ // and sometimes not even then. Equal to one of the ActionExecuted
+ // failure_detail fields for one of the root cause ActionExecuted events.
+ failure_details.FailureDetail failure_detail = 9;
+}
+
+enum TestStatus {
+ NO_STATUS = 0;
+ PASSED = 1;
+ FLAKY = 2;
+ TIMEOUT = 3;
+ FAILED = 4;
+ INCOMPLETE = 5;
+ REMOTE_FAILURE = 6;
+ FAILED_TO_BUILD = 7;
+ TOOL_HALTED_BEFORE_TESTING = 8;
+}
+
+// Payload on events reporting about individual test action.
+message TestResult {
+ reserved 1;
+
+ // The status of this test.
+ TestStatus status = 5;
+
+ // Additional details about the status of the test. This is intended for
+ // user display and must not be parsed.
+ string status_details = 9;
+
+ // True, if the reported attempt is taken from the tool's local cache.
+ bool cached_locally = 4;
+
+ // Time in milliseconds since the epoch at which the test attempt was started.
+ // Note: for cached test results, this is time can be before the start of the
+ // build.
+ //
+ // Deprecated, use `test_attempt_start` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 test_attempt_start_millis_epoch = 6 [deprecated = true];
+
+ // Time at which the test attempt was started.
+ // Note: for cached test results, this is time can be before the start of the
+ // build.
+ google.protobuf.Timestamp test_attempt_start = 10;
+
+ // Time the test took to run. For locally cached results, this is the time
+ // the cached invocation took when it was invoked.
+ //
+ // Deprecated, use `test_attempt_duration` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 test_attempt_duration_millis = 3 [deprecated = true];
+
+ // Time the test took to run. For locally cached results, this is the time
+ // the cached invocation took when it was invoked.
+ google.protobuf.Duration test_attempt_duration = 11;
+
+ // Files (logs, test.xml, undeclared outputs, etc) generated by that test
+ // action.
+ repeated File test_action_output = 2;
+
+ // Warnings generated by that test action.
+ repeated string warning = 7;
+
+ // Message providing optional meta data on the execution of the test action,
+ // if available.
+ message ExecutionInfo {
+ // Deprecated, use TargetComplete.test_timeout instead.
+ int32 timeout_seconds = 1 [deprecated = true];
+
+ // Name of the strategy to execute this test action (e.g., "local",
+ // "remote")
+ string strategy = 2;
+
+ // True, if the reported attempt was a cache hit in a remote cache.
+ bool cached_remotely = 6;
+
+ // The exit code of the test action.
+ int32 exit_code = 7;
+
+ // The hostname of the machine where the test action was executed (in case
+ // of remote execution), if known.
+ string hostname = 3;
+
+ // Represents a hierarchical timing breakdown of an activity.
+ // The top level time should be the total time of the activity.
+ // Invariant: `time` >= sum of `time`s of all direct children.
+ message TimingBreakdown {
+ repeated TimingBreakdown child = 1;
+ string name = 2;
+ // Deprecated, use `time` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 time_millis = 3 [deprecated = true];
+ google.protobuf.Duration time = 4;
+ }
+ TimingBreakdown timing_breakdown = 4;
+
+ message ResourceUsage {
+ string name = 1;
+ int64 value = 2;
+ }
+ repeated ResourceUsage resource_usage = 5;
+ }
+ ExecutionInfo execution_info = 8;
+}
+
+// Payload of the event summarizing a test.
+message TestSummary {
+ // Wrapper around BlazeTestStatus to support importing that enum to proto3.
+ // Overall status of test, accumulated over all runs, shards, and attempts.
+ TestStatus overall_status = 5;
+
+ // Total number of shard attempts.
+ // E.g., if a target has 4 runs, 3 shards, each with 2 attempts,
+ // then total_run_count will be 4*3*2 = 24.
+ int32 total_run_count = 1;
+
+ // Value of runs_per_test for the test.
+ int32 run_count = 10;
+
+ // Number of attempts.
+ // If there are a different number of attempts per shard, the highest attempt
+ // count across all shards for each run is used.
+ int32 attempt_count = 15;
+
+ // Number of shards.
+ int32 shard_count = 11;
+
+ // Path to logs of passed runs.
+ repeated File passed = 3;
+
+ // Path to logs of failed runs;
+ repeated File failed = 4;
+
+ // Total number of cached test actions
+ int32 total_num_cached = 6;
+
+ // When the test first started running.
+ //
+ // Deprecated, use `first_start_time` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 first_start_time_millis = 7 [deprecated = true];
+
+ // When the test first started running.
+ google.protobuf.Timestamp first_start_time = 13;
+
+ // When the last test action completed.
+ //
+ // Deprecated, use `last_stop_time` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 last_stop_time_millis = 8 [deprecated = true];
+
+ // When the test first started running.
+ google.protobuf.Timestamp last_stop_time = 14;
+
+ // The total runtime of the test.
+ //
+ // Deprecated, use `total_run` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 total_run_duration_millis = 9 [deprecated = true];
+
+ // The total runtime of the test.
+ google.protobuf.Duration total_run_duration = 12;
+}
+
+// Payload of the event summarizing a target (test or non-test).
+message TargetSummary {
+ // Conjunction of TargetComplete events for this target, including aspects.
+ bool overall_build_success = 1;
+
+ // Repeats TestSummary's overall_status if available.
+ TestStatus overall_test_status = 2;
+}
+
+// Event indicating the end of a build.
+message BuildFinished {
+ // Exit code of a build. The possible values correspond to the predefined
+ // codes in bazel's lib.ExitCode class, as well as any custom exit code a
+ // module might define. The predefined exit codes are subject to change (but
+ // rarely do) and are not part of the public API.
+ //
+ // A build was successful iff ExitCode.code equals 0.
+ message ExitCode {
+ // The name of the exit code.
+ string name = 1;
+
+ // The exit code.
+ int32 code = 2;
+ }
+
+ // Things that happened during the build that could be of interest.
+ message AnomalyReport {
+ // Was the build suspended at any time during the build.
+ // Examples of suspensions are SIGSTOP, or the hardware being put to sleep.
+ // If was_suspended is true, then most of the timings for this build are
+ // suspect.
+ // NOTE: This is no longer set and is deprecated.
+ bool was_suspended = 1;
+ }
+
+ // If the build succeeded or failed.
+ bool overall_success = 1 [deprecated = true];
+
+ // The overall status of the build. A build was successful iff
+ // ExitCode.code equals 0.
+ ExitCode exit_code = 3;
+
+ // End of the build in ms since the epoch.
+ //
+ // Deprecated, use `finish_time` instead.
+ //
+ // TODO(yannic): Remove.
+ int64 finish_time_millis = 2 [deprecated = true];
+
+ // End of the build.
+ google.protobuf.Timestamp finish_time = 5;
+
+ AnomalyReport anomaly_report = 4 [deprecated = true];
+}
+
+message BuildMetrics {
+ message ActionSummary {
+ // The total number of actions created and registered during the build,
+ // including both aspects and configured targets. This metric includes
+ // unused actions that were constructed but not executed during this build.
+ // It does not include actions that were created on prior builds that are
+ // still valid, even if those actions had to be re-executed on this build.
+ // For the total number of actions that would be created if this invocation
+ // were "clean", see BuildGraphMetrics below.
+ int64 actions_created = 1;
+
+ // The total number of actions created this build just by configured
+ // targets. Used mainly to allow consumers of actions_created, which used to
+ // not include aspects' actions, to normalize across the Blaze release that
+ // switched actions_created to include all created actions.
+ int64 actions_created_not_including_aspects = 3;
+
+ // The total number of actions executed during the build. This includes any
+ // remote cache hits, but excludes local action cache hits.
+ int64 actions_executed = 2;
+
+ message ActionData {
+ string mnemonic = 1;
+
+ // The total number of actions of this type executed during the build. As
+ // above, includes remote cache hits but excludes local action cache hits.
+ int64 actions_executed = 2;
+
+ // When the first action of this type started being executed, in
+ // milliseconds from the epoch.
+ int64 first_started_ms = 3;
+
+ // When the last action of this type ended being executed, in
+ // milliseconds from the epoch.
+ int64 last_ended_ms = 4;
+ }
+ // Contains the top N actions by number of actions executed.
+ repeated ActionData action_data = 4;
+
+ // Deprecated. The total number of remote cache hits.
+ int64 remote_cache_hits = 5 [deprecated = true];
+
+ message RunnerCount {
+ string name = 1;
+ int32 count = 2;
+ }
+ repeated RunnerCount runner_count = 6;
+ }
+ ActionSummary action_summary = 1;
+
+ message MemoryMetrics {
+ // Size of the JVM heap post build in bytes. This is only collected if
+ // --memory_profile is set, since it forces a full GC.
+ int64 used_heap_size_post_build = 1;
+
+ // Size of the peak JVM heap size in bytes post GC. Note that this reports 0
+ // if there was no major GC during the build.
+ int64 peak_post_gc_heap_size = 2;
+
+ // Size of the peak tenured space JVM heap size event in bytes post GC. Note
+ // that this reports 0 if there was no major GC during the build.
+ int64 peak_post_gc_tenured_space_heap_size = 4;
+
+ message GarbageMetrics {
+ // Type of garbage collected, e.g. G1 Old Gen.
+ string type = 1;
+ // Number of bytes of garbage of the given type collected during this
+ // invocation.
+ int64 garbage_collected = 2;
+ }
+
+ repeated GarbageMetrics garbage_metrics = 3;
+ }
+ MemoryMetrics memory_metrics = 2;
+
+ message TargetMetrics {
+ // DEPRECATED
+ // No longer populated. It never measured what it was supposed to (targets
+ // loaded): it counted targets that were analyzed even if the underlying
+ // package had not changed.
+ // TODO(janakr): rename and remove.
+ int64 targets_loaded = 1;
+
+ // Number of targets/aspects configured during this build. Does not include
+ // targets/aspects that were configured on prior builds on this server and
+ // were cached. See BuildGraphMetrics below if you need that.
+ int64 targets_configured = 2;
+
+ // Number of configured targets analyzed during this build. Does not include
+ // aspects. Used mainly to allow consumers of targets_configured, which used
+ // to not include aspects, to normalize across the Blaze release that
+ // switched targets_configured to include aspects.
+ int64 targets_configured_not_including_aspects = 3;
+ }
+ TargetMetrics target_metrics = 3;
+
+ message PackageMetrics {
+ // Number of BUILD files (aka packages) successfully loaded during this
+ // build.
+ //
+ // [For Bazel binaries built at source states] Before Dec 2021, this value
+ // was the number of packages attempted to be loaded, for a particular
+ // definition of "attempted".
+ //
+ // After Dec 2021, this value would sometimes overcount because the same
+ // package could sometimes be attempted to be loaded multiple times due to
+ // memory pressure.
+ //
+ // After Feb 2022, this value is the number of packages successfully
+ // loaded.
+ int64 packages_loaded = 1;
+ }
+ PackageMetrics package_metrics = 4;
+
+ message TimingMetrics {
+ // The CPU time in milliseconds consumed during this build.
+ int64 cpu_time_in_ms = 1;
+ // The elapsed wall time in milliseconds during this build.
+ int64 wall_time_in_ms = 2;
+ // The elapsed wall time in milliseconds during the analysis phase.
+ // When analysis and execution phases are interleaved, this measures the
+ // elapsed time from the first analysis work to the last.
+ int64 analysis_phase_time_in_ms = 3;
+ }
+ TimingMetrics timing_metrics = 5;
+
+ message CumulativeMetrics {
+ // One-indexed number of "analyses" the server has run, including the
+ // current one. Will be incremented for every build/test/cquery/etc. command
+ // that reaches the analysis phase.
+ int32 num_analyses = 11;
+ // One-indexed number of "builds" the server has run, including the current
+ // one. Will be incremented for every build/test/run/etc. command that
+ // reaches the execution phase.
+ int32 num_builds = 12;
+ }
+
+ CumulativeMetrics cumulative_metrics = 6;
+
+ message ArtifactMetrics {
+ reserved 1;
+
+ message FilesMetric {
+ int64 size_in_bytes = 1;
+ int32 count = 2;
+ }
+
+ // Measures all source files newly read this build. Does not include
+ // unchanged sources on incremental builds.
+ FilesMetric source_artifacts_read = 2;
+ // Measures all output artifacts from executed actions. This includes
+ // actions that were cached locally (via the action cache) or remotely (via
+ // a remote cache or executor), but does *not* include outputs of actions
+ // that were cached internally in Skyframe.
+ FilesMetric output_artifacts_seen = 3;
+ // Measures all output artifacts from actions that were cached locally
+ // via the action cache. These artifacts were already present on disk at the
+ // start of the build. Does not include Skyframe-cached actions' outputs.
+ FilesMetric output_artifacts_from_action_cache = 4;
+ // Measures all artifacts that belong to a top-level output group. Does not
+ // deduplicate, so if there are two top-level targets in this build that
+ // share an artifact, it will be counted twice.
+ FilesMetric top_level_artifacts = 5;
+ }
+
+ ArtifactMetrics artifact_metrics = 7;
+
+ // Information about the size and shape of the build graph. Some fields may
+ // not be populated if Bazel was able to skip steps due to caching.
+ message BuildGraphMetrics {
+ // How many configured targets/aspects were in this build, including any
+ // that were analyzed on a prior build and are still valid. May not be
+ // populated if analysis phase was fully cached. Note: for historical
+ // reasons this includes input/output files and other configured targets
+ // that do not actually have associated actions.
+ int32 action_lookup_value_count = 1;
+ // How many configured targets alone were in this build: always at most
+ // action_lookup_value_count. Useful mainly for historical comparisons to
+ // TargetMetrics.targets_configured, which used to not count aspects. This
+ // also includes configured targets that do not have associated actions.
+ int32 action_lookup_value_count_not_including_aspects = 5;
+ // How many actions belonged to the configured targets/aspects above. It may
+ // not be necessary to execute all of these actions to build the requested
+ // targets. May not be populated if analysis phase was fully cached.
+ int32 action_count = 2;
+ // How many actions belonged to configured targets: always at most
+ // action_count. Useful mainly for historical comparisons to
+ // ActionMetrics.actions_created, which used to not count aspects' actions.
+ int32 action_count_not_including_aspects = 6;
+ // How many "input file" configured targets there were: one per source file.
+ // Should agree with artifact_metrics.source_artifacts_read.count above,
+ int32 input_file_configured_target_count = 7;
+ // How many "output file" configured targets there were: output files that
+ // are targets (not implicit outputs).
+ int32 output_file_configured_target_count = 8;
+ // How many "other" configured targets there were (like alias,
+ // package_group, and other non-rule non-file configured targets).
+ int32 other_configured_target_count = 9;
+ // How many artifacts are outputs of the above actions. May not be populated
+ // if analysis phase was fully cached.
+ int32 output_artifact_count = 3;
+ // How many Skyframe nodes there are in memory at the end of the build. This
+ // may underestimate the number of nodes when running with memory-saving
+ // settings or with Skybuild, and may overestimate if there are nodes from
+ // prior evaluations still in the cache.
+ int32 post_invocation_skyframe_node_count = 4;
+ }
+
+ BuildGraphMetrics build_graph_metrics = 8;
+
+ // Information about all workers that were alive during the invocation.
+ message WorkerMetrics {
+ // Unique id of worker.
+ int32 worker_id = 1;
+ // Worker process id. If there is no process for worker, equals to zero.
+ uint32 process_id = 2;
+ // Mnemonic of running worker.
+ string mnemonic = 3;
+ // Multiplex or singleplex worker.
+ bool is_multiplex = 4;
+ // Using worker sandbox file system or not.
+ bool is_sandbox = 5;
+ // Shows is worker stats measured at the end of invocation.
+ bool is_measurable = 6;
+
+ // Information collected from worker at some point.
+ message WorkerStats {
+ // Epoch unix time of collection of metrics.
+ int64 collect_time_in_ms = 1;
+ // RSS size of worker process.
+ int32 worker_memory_in_kb = 2;
+ // Epoch unix time of last action started on specific worker.
+ int64 last_action_start_time_in_ms = 3;
+ }
+
+ // Combined workers statistics.
+ repeated WorkerStats worker_stats = 7;
+ }
+
+ repeated WorkerMetrics worker_metrics = 9;
+
+ // Information about host network.
+ message NetworkMetrics {
+ // Information for all the network traffic going on on the host machine during the invocation.
+ message SystemNetworkStats {
+ // Total bytes sent during the invocation.
+ uint64 bytes_sent = 1;
+ // Total bytes received during the invocation.
+ uint64 bytes_recv = 2;
+ // Total packets sent during the invocation.
+ uint64 packets_sent = 3;
+ // Total packets received during the invocation.
+ uint64 packets_recv = 4;
+ // Peak bytes/sec sent during the invocation.
+ uint64 peak_bytes_sent_per_sec = 5;
+ // Peak bytes/sec received during the invocation.
+ uint64 peak_bytes_recv_per_sec = 6;
+ // Peak packets/sec sent during the invocation.
+ uint64 peak_packets_sent_per_sec = 7;
+ // Peak packets/sec received during the invocation.
+ uint64 peak_packets_recv_per_sec = 8;
+ }
+
+ SystemNetworkStats system_network_stats = 1;
+ }
+
+ NetworkMetrics network_metrics = 10;
+}
+
+// Event providing additional statistics/logs after completion of the build.
+message BuildToolLogs {
+ repeated File log = 1;
+}
+
+// Event describing all convenience symlinks (i.e., workspace symlinks) to be
+// created or deleted once the execution phase has begun. Note that this event
+// does not say anything about whether or not the build tool actually executed
+// these filesystem operations; it only says what logical operations should be
+// performed. This event is emitted exactly once per build; if no symlinks are
+// to be modified, the event is still emitted with empty contents.
+message ConvenienceSymlinksIdentified {
+ repeated ConvenienceSymlink convenience_symlinks = 1;
+}
+
+// The message that contains what type of action to perform on a given path and
+// target of a symlink.
+message ConvenienceSymlink {
+ enum Action {
+ UNKNOWN = 0;
+
+ // Indicates a symlink should be created, or overwritten if it already
+ // exists.
+ CREATE = 1;
+
+ // Indicates a symlink should be deleted if it already exists.
+ DELETE = 2;
+ }
+
+ // The path of the symlink to be created or deleted, absolute or relative to
+ // the workspace, creating any directories necessary. If a symlink already
+ // exists at that location, then it should be replaced by a symlink pointing
+ // to the new target.
+ string path = 1;
+
+ // The operation we are performing on the symlink.
+ Action action = 2;
+
+ // If action is CREATE, this is the target path that the symlink should point
+ // to. If the path points underneath the output base, it is relative to the
+ // output base; otherwise it is absolute.
+ //
+ // If action is DELETE, this field is not set.
+ string target = 3;
+}
+
+// Message describing a build event. Events will have an identifier that
+// is unique within a given build invocation; they also announce follow-up
+// events as children. More details, which are specific to the kind of event
+// that is observed, is provided in the payload. More options for the payload
+// might be added in the future.
+message BuildEvent {
+ reserved 11, 19;
+ BuildEventId id = 1;
+ repeated BuildEventId children = 2;
+ bool last_message = 20;
+ oneof payload {
+ Progress progress = 3;
+ Aborted aborted = 4;
+ BuildStarted started = 5;
+ UnstructuredCommandLine unstructured_command_line = 12;
+ command_line.CommandLine structured_command_line = 22;
+ OptionsParsed options_parsed = 13;
+ WorkspaceStatus workspace_status = 16;
+ Fetch fetch = 21;
+ Configuration configuration = 17;
+ PatternExpanded expanded = 6;
+ TargetConfigured configured = 18;
+ ActionExecuted action = 7;
+ NamedSetOfFiles named_set_of_files = 15;
+ TargetComplete completed = 8;
+ TestResult test_result = 10;
+ TestSummary test_summary = 9;
+ TargetSummary target_summary = 28;
+ BuildFinished finished = 14;
+ BuildToolLogs build_tool_logs = 23;
+ BuildMetrics build_metrics = 24;
+ WorkspaceConfig workspace_info = 25;
+ BuildMetadata build_metadata = 26;
+ ConvenienceSymlinksIdentified convenience_symlinks_identified = 27;
+ }
+}
diff --git a/atest/bazel/runner/src/main/protobuf/command_line.proto b/atest/bazel/runner/src/main/protobuf/command_line.proto
new file mode 100644
index 0000000..d5fa6ac
--- /dev/null
+++ b/atest/bazel/runner/src/main/protobuf/command_line.proto
@@ -0,0 +1,102 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package command_line;
+
+// option java_api_version = 2;
+option java_package = "com.google.devtools.build.lib.runtime.proto";
+
+import "src/main/protobuf/option_filters.proto";
+
+// Representation of a Bazel command line.
+message CommandLine {
+ // A title for this command line value, to differentiate it from others.
+ // In particular, a single invocation may wish to report both the literal and
+ // canonical command lines, and this label would be used to differentiate
+ // between both versions. This is a string for flexibility.
+ string command_line_label = 1;
+
+ // A Bazel command line is made of distinct parts. For example,
+ // `bazel --nomaster_bazelrc test --nocache_test_results //foo:aTest`
+ // has the executable "bazel", a startup flag, a command "test", a command
+ // flag, and a test target. There could be many more flags and targets, or
+ // none (`bazel info` for example), but the basic structure is there. The
+ // command line should be broken down into these logical sections here.
+ repeated CommandLineSection sections = 2;
+}
+
+// A section of the Bazel command line.
+message CommandLineSection {
+ // The name of this section, such as "startup_option" or "command".
+ string section_label = 1;
+
+ oneof section_type {
+ // Sections with non-options, such as the list of targets or the command,
+ // should use simple string chunks.
+ ChunkList chunk_list = 2;
+
+ // Startup and command options are lists of options and belong here.
+ OptionList option_list = 3;
+ }
+}
+
+// Wrapper to allow a list of strings in the "oneof" section_type.
+message ChunkList {
+ repeated string chunk = 1;
+}
+
+// Wrapper to allow a list of options in the "oneof" section_type.
+message OptionList {
+ repeated Option option = 1;
+}
+
+// A single command line option.
+//
+// This represents the option itself, but does not take into account the type of
+// option or how the parser interpreted it. If this option is part of a command
+// line that represents the actual input that Bazel received, it would, for
+// example, include expansion flags as they are. However, if this option
+// represents the canonical form of the command line, with the values as Bazel
+// understands them, then the expansion flag, which has no value, would not
+// appear, and the flags it expands to would.
+message Option {
+ // How the option looks with the option and its value combined. Depending on
+ // the purpose of this command line report, this could be the canonical
+ // form, or the way that the flag was set.
+ //
+ // Some examples: this might be `--foo=bar` form, or `--foo bar` with a space;
+ // for boolean flags, `--nobaz` is accepted on top of `--baz=false` and other
+ // negating values, or for a positive value, the unqualified `--baz` form
+ // is also accepted. This could also be a short `-b`, if the flag has an
+ // abbreviated form.
+ string combined_form = 1;
+
+ // The canonical name of the option, without the preceding dashes.
+ string option_name = 2;
+
+ // The value of the flag, or unset for flags that do not take values.
+ // Especially for boolean flags, this should be in canonical form, the
+ // combined_form field above gives room for showing the flag as it was set
+ // if that is preferred.
+ string option_value = 3;
+
+ // This flag's tagged effects. See OptionEffectTag's java documentation for
+ // details.
+ repeated options.OptionEffectTag effect_tags = 4;
+
+ // Metadata about the flag. See OptionMetadataTag's java documentation for
+ // details.
+ repeated options.OptionMetadataTag metadata_tags = 5;
+}
diff --git a/atest/bazel/runner/src/main/protobuf/failure_details.proto b/atest/bazel/runner/src/main/protobuf/failure_details.proto
new file mode 100644
index 0000000..ea0873c
--- /dev/null
+++ b/atest/bazel/runner/src/main/protobuf/failure_details.proto
@@ -0,0 +1,1306 @@
+// Copyright 2020 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file's messages describe any failure(s) that occurred during Bazel's
+// handling of a request. The intent is to provide more detail to a Bazel client
+// than is conveyed with an exit code, to help those clients decide how to
+// respond to, or classify, a failure.
+
+syntax = "proto3";
+
+package failure_details;
+
+option java_package = "com.google.devtools.build.lib.server";
+
+import "google/protobuf/descriptor.proto";
+
+message FailureDetailMetadata {
+ uint32 exit_code = 1;
+}
+
+ extend google.protobuf.EnumValueOptions {
+ FailureDetailMetadata metadata = 1078;
+}
+
+// The FailureDetail message type is designed such that consumers can extract a
+// basic classification of a FailureDetail message even if the consumer was
+// built with a stale definition. This forward compatibility is implemented via
+// conventions on FailureDetail and its submessage types, as follows.
+//
+// *** FailureDetail field numbers
+//
+// Field numbers 1 through 100 (inclusive) are reserved for generally applicable
+// values. Any number of these fields may be set on a FailureDetail message.
+//
+// Field numbers 101 through 10,000 (inclusive) are reserved for use inside the
+// "oneof" structure. Only one of these values should be set on a FailureDetail
+// message.
+//
+// Additional fields numbers are unlikely to be needed, but, for extreme future-
+// proofing purposes, field numbers 10,001 through 1,000,000 (inclusive;
+// excluding protobuf's reserved range 19000 through 19999) are reserved for
+// additional generally applicable values.
+//
+// *** FailureDetail's "oneof" submessages
+//
+// Each field in the "oneof" structure is a submessage corresponding to a
+// category of failure.
+//
+// In each of these submessage types, field number 1 is an enum whose values
+// correspond to a subcategory of the failure. Generally, the enum's constant
+// which maps to 0 should be interpreted as "unspecified", though this is not
+// required.
+//
+// *** Recommended forward compatibility strategy
+//
+// The recommended forward compatibility strategy is to reduce a FailureDetail
+// message to a pair of integers.
+//
+// The first integer corresponds to the field number of the submessage set
+// inside FailureDetail's "oneof", which corresponds with the failure's
+// category.
+//
+// The second integer corresponds to the value of the enum at field number 1
+// within that submessage, which corresponds with the failure's subcategory.
+//
+// WARNING: This functionality is experimental and should not be relied on at
+// this time.
+// TODO(mschaller): remove experimental warning
+message FailureDetail {
+ // A short human-readable message describing the failure, for debugging.
+ //
+ // This value is *not* intended to be used algorithmically.
+ string message = 1;
+
+ // Reserved for future generally applicable values. Any of these may be set.
+ reserved 2 to 100;
+
+ oneof category {
+ Interrupted interrupted = 101;
+ ExternalRepository external_repository = 103;
+ BuildProgress build_progress = 104;
+ RemoteOptions remote_options = 106;
+ ClientEnvironment client_environment = 107;
+ Crash crash = 108;
+ SymlinkForest symlink_forest = 110;
+ PackageOptions package_options = 114;
+ RemoteExecution remote_execution = 115;
+ Execution execution = 116;
+ Workspaces workspaces = 117;
+ CrashOptions crash_options = 118;
+ Filesystem filesystem = 119;
+ ExecutionOptions execution_options = 121;
+ Command command = 122;
+ Spawn spawn = 123;
+ GrpcServer grpc_server = 124;
+ CanonicalizeFlags canonicalize_flags = 125;
+ BuildConfiguration build_configuration = 126;
+ InfoCommand info_command = 127;
+ MemoryOptions memory_options = 129;
+ Query query = 130;
+ LocalExecution local_execution = 132;
+ ActionCache action_cache = 134;
+ FetchCommand fetch_command = 135;
+ SyncCommand sync_command = 136;
+ Sandbox sandbox = 137;
+ IncludeScanning include_scanning = 139;
+ TestCommand test_command = 140;
+ ActionQuery action_query = 141;
+ TargetPatterns target_patterns = 142;
+ CleanCommand clean_command = 144;
+ ConfigCommand config_command = 145;
+ ConfigurableQuery configurable_query = 146;
+ DumpCommand dump_command = 147;
+ HelpCommand help_command = 148;
+ MobileInstall mobile_install = 150;
+ ProfileCommand profile_command = 151;
+ RunCommand run_command = 152;
+ VersionCommand version_command = 153;
+ PrintActionCommand print_action_command = 154;
+ WorkspaceStatus workspace_status = 158;
+ JavaCompile java_compile = 159;
+ ActionRewinding action_rewinding = 160;
+ CppCompile cpp_compile = 161;
+ StarlarkAction starlark_action = 162;
+ NinjaAction ninja_action = 163;
+ DynamicExecution dynamic_execution = 164;
+ FailAction fail_action = 166;
+ SymlinkAction symlink_action = 167;
+ CppLink cpp_link = 168;
+ LtoAction lto_action = 169;
+ TestAction test_action = 172;
+ Worker worker = 173;
+ Analysis analysis = 174;
+ PackageLoading package_loading = 175;
+ Toolchain toolchain = 177;
+ StarlarkLoading starlark_loading = 179;
+ ExternalDeps external_deps = 181;
+ DiffAwareness diff_awareness = 182;
+ ModqueryCommand modquery_command = 183;
+ BuildReport build_report = 184;
+ }
+
+ reserved 102; // For internal use
+ reserved 105; // For internal use
+ reserved 109; // For internal use
+ reserved 111 to 113; // For internal use
+ reserved 120; // For internal use
+ reserved 128; // For internal use
+ reserved 131; // For internal use
+ reserved 133; // For internal use
+ reserved 138; // For internal use
+ reserved 143; // For internal use
+ reserved 149; // For internal use
+ reserved 155 to 157; // For internal use
+ reserved 165; // For internal use
+ reserved 170 to 171; // For internal use
+ reserved 176; // For internal use
+ reserved 178; // For internal use
+ reserved 180; // For internal use
+}
+
+message Interrupted {
+ enum Code {
+ // Unknown interrupt. Avoid using this code, instead use INTERRUPTED.
+ INTERRUPTED_UNKNOWN = 0 [(metadata) = { exit_code: 8 }];
+
+ // Command was interrupted (cancelled).
+ INTERRUPTED = 28 [(metadata) = { exit_code: 8 }];
+
+ // The following more specific interrupt codes have been deprecated and
+ // consolidated into INTERRUPTED.
+ DEPRECATED_BUILD = 4 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_BUILD_COMPLETION = 5 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_PACKAGE_LOADING_SYNC = 6 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_EXECUTOR_COMPLETION = 7 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_COMMAND_DISPATCH = 8 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_INFO_ITEM = 9 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_AFTER_QUERY = 10 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_FETCH_COMMAND = 17 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_SYNC_COMMAND = 18 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_CLEAN_COMMAND = 20 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_MOBILE_INSTALL_COMMAND = 21 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_QUERY = 22 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_RUN_COMMAND = 23 [(metadata) = { exit_code: 8 }];
+ DEPRECATED_OPTIONS_PARSING = 27 [(metadata) = { exit_code: 8 }];
+
+ reserved 1 to 3; // For internal use
+ reserved 11 to 16; // For internal use
+ reserved 19; // For internal use
+ reserved 24 to 26; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message Spawn {
+ enum Code {
+ SPAWN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ // See the SpawnResult.Status Java enum for definitions of the following
+ // Spawn failure codes.
+ NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }];
+ TIMEOUT = 2 [(metadata) = { exit_code: 1 }];
+ // Note: Spawn OUT_OF_MEMORY leads to a BUILD_FAILURE exit_code because the
+ // build tool itself did not run out of memory.
+ OUT_OF_MEMORY = 3 [(metadata) = { exit_code: 1 }];
+ EXECUTION_FAILED = 4 [(metadata) = { exit_code: 34 }];
+ EXECUTION_DENIED = 5 [(metadata) = { exit_code: 1 }];
+ REMOTE_CACHE_FAILED = 6 [(metadata) = { exit_code: 34 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 7 [(metadata) = { exit_code: 1 }];
+ EXEC_IO_EXCEPTION = 8 [(metadata) = { exit_code: 36 }];
+ INVALID_TIMEOUT = 9 [(metadata) = { exit_code: 1 }];
+ INVALID_REMOTE_EXECUTION_PROPERTIES = 10 [(metadata) = { exit_code: 1 }];
+ NO_USABLE_STRATEGY_FOUND = 11 [(metadata) = { exit_code: 1 }];
+ // TODO(b/138456686): this code should be deprecated when SpawnResult is
+ // refactored to prohibit undetailed failures
+ UNSPECIFIED_EXECUTION_FAILURE = 12 [(metadata) = { exit_code: 1 }];
+ FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }];
+ }
+ Code code = 1;
+
+ // For Codes describing generic failure to spawn (eg. EXECUTION_FAILED and
+ // EXECUTION_DENIED) the `catastrophic` field may be set to true indicating a
+ // failure that immediately terminated the entire build tool.
+ bool catastrophic = 2;
+
+ // If Code is NON_ZERO_EXIT, the `spawn_exit_code` field may be set to the
+ // non-zero exit code returned by the spawned process to the OS.
+ //
+ // NOTE: This field must not be confused with the build tool's overall
+ // exit code.
+ int32 spawn_exit_code = 3;
+}
+
+message ExternalRepository {
+ enum Code {
+ EXTERNAL_REPOSITORY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ OVERRIDE_DISALLOWED_MANAGED_DIRECTORIES = 1 [(metadata) = { exit_code: 2 }];
+ BAD_DOWNLOADER_CONFIG = 2 [(metadata) = { exit_code: 2 }];
+ REPOSITORY_MAPPING_RESOLUTION_FAILED = 3 [(metadata) = { exit_code: 37 }];
+ }
+ Code code = 1;
+ // Additional data could include external repository names.
+}
+
+message BuildProgress {
+ enum Code {
+ BUILD_PROGRESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ OUTPUT_INITIALIZATION = 3 [(metadata) = { exit_code: 36 }];
+ BES_RUNS_PER_TEST_LIMIT_UNSUPPORTED = 4 [(metadata) = { exit_code: 2 }];
+ BES_LOCAL_WRITE_ERROR = 5 [(metadata) = { exit_code: 36 }];
+ BES_INITIALIZATION_ERROR = 6 [(metadata) = { exit_code: 36 }];
+ BES_UPLOAD_TIMEOUT_ERROR = 7 [(metadata) = { exit_code: 38 }];
+ BES_FILE_WRITE_TIMEOUT = 8 [(metadata) = { exit_code: 38 }];
+ BES_FILE_WRITE_IO_ERROR = 9 [(metadata) = { exit_code: 38 }];
+ BES_FILE_WRITE_INTERRUPTED = 10 [(metadata) = { exit_code: 38 }];
+ BES_FILE_WRITE_CANCELED = 11 [(metadata) = { exit_code: 38 }];
+ BES_FILE_WRITE_UNKNOWN_ERROR = 12 [(metadata) = { exit_code: 38 }];
+ BES_UPLOAD_LOCAL_FILE_ERROR = 13 [(metadata) = { exit_code: 38 }];
+ BES_STREAM_NOT_RETRYING_FAILURE = 14 [(metadata) = { exit_code: 45 }];
+ BES_STREAM_COMPLETED_WITH_UNACK_EVENTS_ERROR = 15
+ [(metadata) = { exit_code: 45 }];
+ BES_STREAM_COMPLETED_WITH_UNSENT_EVENTS_ERROR = 16
+ [(metadata) = { exit_code: 45 }];
+ BES_STREAM_COMPLETED_WITH_REMOTE_ERROR = 19
+ [(metadata) = { exit_code: 45 }];
+ BES_UPLOAD_RETRY_LIMIT_EXCEEDED_FAILURE = 17
+ [(metadata) = { exit_code: 38 }];
+ reserved 1, 2, 18; // For internal use
+ }
+ Code code = 1;
+ // Additional data could include the build progress upload endpoint.
+}
+
+message RemoteOptions {
+ enum Code {
+ REMOTE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ REMOTE_DEFAULT_EXEC_PROPERTIES_LOGIC_ERROR = 1
+ [(metadata) = { exit_code: 2 }];
+ // Credentials could not be read from the requested file/socket/process/etc.
+ CREDENTIALS_READ_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+ // Credentials could not be written to a shared, temporary file.
+ CREDENTIALS_WRITE_FAILURE = 3 [(metadata) = { exit_code: 36 }];
+ DOWNLOADER_WITHOUT_GRPC_CACHE = 4 [(metadata) = { exit_code: 2 }];
+ EXECUTION_WITH_INVALID_CACHE = 5 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message ClientEnvironment {
+ enum Code {
+ CLIENT_ENVIRONMENT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ CLIENT_CWD_MALFORMED = 1 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message Crash {
+ enum Code {
+ CRASH_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ CRASH_OOM = 1 [(metadata) = { exit_code: 33 }];
+ }
+
+ Code code = 1;
+
+ // The cause chain of the crash, with the outermost throwable first. Limited
+ // to the outermost exception and at most 4 nested causes (so, max size of 5).
+ repeated Throwable causes = 2;
+}
+
+message Throwable {
+ // The class name of the java.lang.Throwable.
+ string throwable_class = 1;
+ // The throwable's message.
+ string message = 2;
+ // The result of calling toString on the deepest (i.e. closest to the
+ // throwable's construction site) 1000 (or fewer) StackTraceElements.
+ // Unstructured to simplify string matching.
+ repeated string stack_trace = 3;
+}
+
+message SymlinkForest {
+ enum Code {
+ SYMLINK_FOREST_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ TOPLEVEL_OUTDIR_PACKAGE_PATH_CONFLICT = 1 [(metadata) = { exit_code: 2 }];
+ TOPLEVEL_OUTDIR_USED_AS_SOURCE = 2 [(metadata) = { exit_code: 2 }];
+ CREATION_FAILED = 3 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message BuildReport {
+ enum Code {
+ BUILD_REPORT_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ BUILD_REPORT_UPLOADER_NEEDS_PACKAGE_PATHS = 1
+ [(metadata) = { exit_code: 36 }];
+ BUILD_REPORT_WRITE_FAILED = 2 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+ // Additional data for partial failures might include the build report that
+ // failed to be written.
+}
+
+message PackageOptions {
+ enum Code {
+ reserved 2, 3; // For internal use
+
+ PACKAGE_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ PACKAGE_PATH_INVALID = 1 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message RemoteExecution {
+ // The association of some of these options with exit code 2, "command line
+ // error", seems sketchy. Especially worth reconsidering are the channel init
+ // failure modes, which can correspond to failures occurring in gRPC setup.
+ // These all correspond with current Bazel behavior.
+ enum Code {
+ REMOTE_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ CAPABILITIES_QUERY_FAILURE = 1 [(metadata) = { exit_code: 34 }];
+ CREDENTIALS_INIT_FAILURE = 2 [(metadata) = { exit_code: 2 }];
+ CACHE_INIT_FAILURE = 3 [(metadata) = { exit_code: 2 }];
+ RPC_LOG_FAILURE = 4 [(metadata) = { exit_code: 2 }];
+ EXEC_CHANNEL_INIT_FAILURE = 5 [(metadata) = { exit_code: 2 }];
+ CACHE_CHANNEL_INIT_FAILURE = 6 [(metadata) = { exit_code: 2 }];
+ DOWNLOADER_CHANNEL_INIT_FAILURE = 7 [(metadata) = { exit_code: 2 }];
+ LOG_DIR_CLEANUP_FAILURE = 8 [(metadata) = { exit_code: 36 }];
+ CLIENT_SERVER_INCOMPATIBLE = 9 [(metadata) = { exit_code: 34 }];
+ DOWNLOADED_INPUTS_DELETION_FAILURE = 10 [(metadata) = { exit_code: 34 }];
+ REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_DOTD = 11
+ [(metadata) = { exit_code: 2 }];
+ REMOTE_DOWNLOAD_OUTPUTS_MINIMAL_WITHOUT_INMEMORY_JDEPS = 12
+ [(metadata) = { exit_code: 2 }];
+ INCOMPLETE_OUTPUT_DOWNLOAD_CLEANUP_FAILURE = 13
+ [(metadata) = { exit_code: 36 }];
+ REMOTE_DEFAULT_PLATFORM_PROPERTIES_PARSE_FAILURE = 14
+ [(metadata) = { exit_code: 1 }];
+ ILLEGAL_OUTPUT = 15 [(metadata) = { exit_code: 1 }];
+ INVALID_EXEC_AND_PLATFORM_PROPERTIES = 16 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message Execution {
+ enum Code {
+ EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ EXECUTION_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }];
+ EXECUTION_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+ EXECROOT_CREATION_FAILURE = 3 [(metadata) = { exit_code: 36 }];
+ TEMP_ACTION_OUTPUT_DIRECTORY_DELETION_FAILURE = 4
+ [(metadata) = { exit_code: 36 }];
+ TEMP_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 5
+ [(metadata) = { exit_code: 36 }];
+ PERSISTENT_ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 6
+ [(metadata) = { exit_code: 36 }];
+ LOCAL_OUTPUT_DIRECTORY_SYMLINK_FAILURE = 7 [(metadata) = { exit_code: 36 }];
+ reserved 8; // was ACTION_INPUT_FILES_MISSING, now mostly
+ // SOURCE_INPUT_MISSING
+ LOCAL_TEMPLATE_EXPANSION_FAILURE = 9 [(metadata) = { exit_code: 36 }];
+ INPUT_DIRECTORY_CHECK_IO_EXCEPTION = 10 [(metadata) = { exit_code: 36 }];
+ EXTRA_ACTION_OUTPUT_CREATION_FAILURE = 11 [(metadata) = { exit_code: 36 }];
+ TEST_RUNNER_IO_EXCEPTION = 12 [(metadata) = { exit_code: 36 }];
+ FILE_WRITE_IO_EXCEPTION = 13 [(metadata) = { exit_code: 36 }];
+ TEST_OUT_ERR_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }];
+ SYMLINK_TREE_MANIFEST_COPY_IO_EXCEPTION = 15
+ [(metadata) = { exit_code: 36 }];
+ SYMLINK_TREE_MANIFEST_LINK_IO_EXCEPTION = 16
+ [(metadata) = { exit_code: 36 }];
+ SYMLINK_TREE_CREATION_IO_EXCEPTION = 17 [(metadata) = { exit_code: 36 }];
+ SYMLINK_TREE_CREATION_COMMAND_EXCEPTION = 18
+ [(metadata) = { exit_code: 36 }];
+ ACTION_INPUT_READ_IO_EXCEPTION = 19 [(metadata) = { exit_code: 36 }];
+ ACTION_NOT_UP_TO_DATE = 20 [(metadata) = { exit_code: 1 }];
+ PSEUDO_ACTION_EXECUTION_PROHIBITED = 21 [(metadata) = { exit_code: 1 }];
+ DISCOVERED_INPUT_DOES_NOT_EXIST = 22 [(metadata) = { exit_code: 36 }];
+ ACTION_OUTPUTS_DELETION_FAILURE = 23 [(metadata) = { exit_code: 1 }];
+ ACTION_OUTPUTS_NOT_CREATED = 24 [(metadata) = { exit_code: 1 }];
+ ACTION_FINALIZATION_FAILURE = 25 [(metadata) = { exit_code: 1 }];
+ ACTION_INPUT_LOST = 26 [(metadata) = { exit_code: 1 }];
+ FILESYSTEM_CONTEXT_UPDATE_FAILURE = 27 [(metadata) = { exit_code: 1 }];
+ ACTION_OUTPUT_CLOSE_FAILURE = 28 [(metadata) = { exit_code: 1 }];
+ INPUT_DISCOVERY_IO_EXCEPTION = 29 [(metadata) = { exit_code: 1 }];
+ TREE_ARTIFACT_DIRECTORY_CREATION_FAILURE = 30
+ [(metadata) = { exit_code: 1 }];
+ ACTION_OUTPUT_DIRECTORY_CREATION_FAILURE = 31
+ [(metadata) = { exit_code: 1 }];
+ ACTION_FS_OUTPUT_DIRECTORY_CREATION_FAILURE = 32
+ [(metadata) = { exit_code: 1 }];
+ ACTION_FS_OUT_ERR_DIRECTORY_CREATION_FAILURE = 33
+ [(metadata) = { exit_code: 1 }];
+ NON_ACTION_EXECUTION_FAILURE = 34 [(metadata) = { exit_code: 1 }];
+ CYCLE = 35 [(metadata) = { exit_code: 1 }];
+ SOURCE_INPUT_MISSING = 36 [(metadata) = { exit_code: 1 }];
+ UNEXPECTED_EXCEPTION = 37 [(metadata) = { exit_code: 1 }];
+ reserved 38;
+ SOURCE_INPUT_IO_EXCEPTION = 39 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+// Failure details about Bazel's WORKSPACE features.
+message Workspaces {
+ enum Code {
+ WORKSPACES_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ WORKSPACES_LOG_INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 2 }];
+ WORKSPACES_LOG_WRITE_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+
+ // See `managed_directories` in
+ // https://bazel.build/rules/lib/globals#workspace.
+ ILLEGAL_WORKSPACE_FILE_SYMLINK_WITH_MANAGED_DIRECTORIES = 3
+ [(metadata) = { exit_code: 1 }];
+ WORKSPACE_FILE_READ_FAILURE_WITH_MANAGED_DIRECTORIES = 4
+ [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message CrashOptions {
+ enum Code {
+ CRASH_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ reserved 1; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message Filesystem {
+ enum Code {
+ FILESYSTEM_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ reserved 1;
+ reserved 2;
+ EMBEDDED_BINARIES_ENUMERATION_FAILURE = 3 [(metadata) = { exit_code: 36 }];
+ SERVER_PID_TXT_FILE_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }];
+ SERVER_FILE_WRITE_FAILURE = 5 [(metadata) = { exit_code: 36 }];
+ DEFAULT_DIGEST_HASH_FUNCTION_INVALID_VALUE = 6
+ [(metadata) = { exit_code: 2 }];
+
+ reserved 7; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message ExecutionOptions {
+ // All numerical exit code associations correspond to pre-existing Bazel
+ // behavior. These associations are suspicious:
+ // - REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING (instead: 2?)
+ // - DEPRECATED_LOCAL_RESOURCES_USED (instead: 2?)
+ // TODO(b/138456686): Revise these after the (intentionally non-breaking)
+ // initial rollout of FailureDetail-based encoding.
+ enum Code {
+ EXECUTION_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INVALID_STRATEGY = 3 [(metadata) = { exit_code: 2 }];
+ REQUESTED_STRATEGY_INCOMPATIBLE_WITH_SANDBOXING = 4
+ [(metadata) = { exit_code: 36 }];
+ DEPRECATED_LOCAL_RESOURCES_USED = 5 [(metadata) = { exit_code: 36 }];
+ INVALID_CYCLIC_DYNAMIC_STRATEGY = 6 [(metadata) = { exit_code: 36 }];
+ RESTRICTION_UNMATCHED_TO_ACTION_CONTEXT = 7 [(metadata) = { exit_code: 2 }];
+ REMOTE_FALLBACK_STRATEGY_NOT_ABSTRACT_SPAWN = 8
+ [(metadata) = { exit_code: 2 }];
+ STRATEGY_NOT_FOUND = 9 [(metadata) = { exit_code: 2 }];
+ DYNAMIC_STRATEGY_NOT_SANDBOXED = 10 [(metadata) = { exit_code: 2 }];
+
+ reserved 1, 2; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message Command {
+ enum Code {
+ // The name "COMMAND_UNKNOWN" might reasonably be interpreted as "command
+ // not found". The enum's default value should represent a lack of knowledge
+ // about the failure instead.
+ COMMAND_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ COMMAND_NOT_FOUND = 1 [(metadata) = { exit_code: 2 }];
+ ANOTHER_COMMAND_RUNNING = 2 [(metadata) = { exit_code: 9 }];
+ PREVIOUSLY_SHUTDOWN = 3 [(metadata) = { exit_code: 36 }];
+ STARLARK_CPU_PROFILE_FILE_INITIALIZATION_FAILURE = 4
+ [(metadata) = { exit_code: 36 }];
+ STARLARK_CPU_PROFILING_INITIALIZATION_FAILURE = 5
+ [(metadata) = { exit_code: 36 }];
+ STARLARK_CPU_PROFILE_FILE_WRITE_FAILURE = 6
+ [(metadata) = { exit_code: 36 }];
+ INVOCATION_POLICY_PARSE_FAILURE = 7 [(metadata) = { exit_code: 2 }];
+ INVOCATION_POLICY_INVALID = 8 [(metadata) = { exit_code: 2 }];
+ OPTIONS_PARSE_FAILURE = 9 [(metadata) = { exit_code: 2 }];
+ STARLARK_OPTIONS_PARSE_FAILURE = 10 [(metadata) = { exit_code: 2 }];
+ ARGUMENTS_NOT_RECOGNIZED = 11 [(metadata) = { exit_code: 2 }];
+ NOT_IN_WORKSPACE = 12 [(metadata) = { exit_code: 2 }];
+ SPACES_IN_WORKSPACE_PATH = 13 [(metadata) = { exit_code: 36 }];
+ IN_OUTPUT_DIRECTORY = 14 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message GrpcServer {
+ enum Code {
+ GRPC_SERVER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ GRPC_SERVER_NOT_COMPILED_IN = 1 [(metadata) = { exit_code: 37 }];
+ SERVER_BIND_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ BAD_COOKIE = 3 [(metadata) = { exit_code: 36 }];
+ NO_CLIENT_DESCRIPTION = 4 [(metadata) = { exit_code: 36 }];
+ reserved 5; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message CanonicalizeFlags {
+ enum Code {
+ CANONICALIZE_FLAGS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ FOR_COMMAND_INVALID = 1 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+// Failure modes described by this category pertain to the Bazel invocation
+// configuration consumed by Bazel's analysis phase. This category is not
+// intended as a grab-bag for all Bazel flag value constraint violations, which
+// instead generally belong in the category for the subsystem whose flag values
+// participate in the constraint.
+message BuildConfiguration {
+ enum Code {
+ BUILD_CONFIGURATION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ PLATFORM_MAPPING_EVALUATION_FAILURE = 1 [(metadata) = { exit_code: 2 }];
+ PLATFORM_MAPPINGS_FILE_IS_DIRECTORY = 2 [(metadata) = { exit_code: 1 }];
+ PLATFORM_MAPPINGS_FILE_NOT_FOUND = 3 [(metadata) = { exit_code: 1 }];
+ TOP_LEVEL_CONFIGURATION_CREATION_FAILURE = 4
+ [(metadata) = { exit_code: 1 }];
+ INVALID_CONFIGURATION = 5 [(metadata) = { exit_code: 2 }];
+ INVALID_BUILD_OPTIONS = 6 [(metadata) = { exit_code: 2 }];
+ MULTI_CPU_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }];
+ HEURISTIC_INSTRUMENTATION_FILTER_INVALID = 8
+ [(metadata) = { exit_code: 2 }];
+ CYCLE = 9 [(metadata) = { exit_code: 2 }];
+ CONFLICTING_CONFIGURATIONS = 10 [(metadata) = { exit_code: 2 }];
+ // This can come from either an invalid user-specified option or a
+ // configuration transition. There's no sure-fire way to distinguish the two
+ // possibilities in Bazel, so we go with the more straightforward
+ // command-line error exit code 2.
+ INVALID_OUTPUT_DIRECTORY_MNEMONIC = 11 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message InfoCommand {
+ // The distinction between a failure to write a single info item and a failure
+ // to write them all seems sketchy. Why do they have different exit codes?
+ // This reflects current Bazel behavior, but deserves more thought.
+ enum Code {
+ INFO_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ TOO_MANY_KEYS = 1 [(metadata) = { exit_code: 2 }];
+ KEY_NOT_RECOGNIZED = 2 [(metadata) = { exit_code: 2 }];
+ INFO_BLOCK_WRITE_FAILURE = 3 [(metadata) = { exit_code: 7 }];
+ ALL_INFO_WRITE_FAILURE = 4 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message MemoryOptions {
+ enum Code {
+ MEMORY_OPTIONS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ EXPERIMENTAL_OOM_MORE_EAGERLY_THRESHOLD_INVALID_VALUE = 1
+ [(metadata) = { exit_code: 2 }];
+ EXPERIMENTAL_OOM_MORE_EAGERLY_NO_TENURED_COLLECTORS_FOUND = 2
+ [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message Query {
+ enum Code {
+ QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ QUERY_FILE_WITH_COMMAND_LINE_EXPRESSION = 1 [(metadata) = { exit_code: 2 }];
+ QUERY_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }];
+ COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }];
+ OUTPUT_FORMAT_INVALID = 4 [(metadata) = { exit_code: 2 }];
+ GRAPHLESS_PREREQ_UNMET = 5 [(metadata) = { exit_code: 2 }];
+ QUERY_OUTPUT_WRITE_FAILURE = 6 [(metadata) = { exit_code: 36 }];
+ QUERY_STDOUT_FLUSH_FAILURE = 13 [(metadata) = { exit_code: 36 }];
+ ANALYSIS_QUERY_PREREQ_UNMET = 14 [(metadata) = { exit_code: 2 }];
+ QUERY_RESULTS_FLUSH_FAILURE = 15 [(metadata) = { exit_code: 36 }];
+ // Deprecated - folded into SYNTAX_ERROR.
+ DEPRECATED_UNCLOSED_QUOTATION_EXPRESSION_ERROR = 16
+ [(metadata) = { exit_code: 2 }];
+ VARIABLE_NAME_INVALID = 17 [(metadata) = { exit_code: 7 }];
+ VARIABLE_UNDEFINED = 18 [(metadata) = { exit_code: 7 }];
+ BUILDFILES_AND_LOADFILES_CANNOT_USE_OUTPUT_LOCATION_ERROR = 19
+ [(metadata) = { exit_code: 2 }];
+ BUILD_FILE_ERROR = 20 [(metadata) = { exit_code: 7 }];
+ CYCLE = 21 [(metadata) = { exit_code: 7 }];
+ UNIQUE_SKYKEY_THRESHOLD_EXCEEDED = 22 [(metadata) = { exit_code: 7 }];
+ TARGET_NOT_IN_UNIVERSE_SCOPE = 23 [(metadata) = { exit_code: 2 }];
+ INVALID_FULL_UNIVERSE_EXPRESSION = 24 [(metadata) = { exit_code: 7 }];
+ UNIVERSE_SCOPE_LIMIT_EXCEEDED = 25 [(metadata) = { exit_code: 7 }];
+ INVALIDATION_LIMIT_EXCEEDED = 26 [(metadata) = { exit_code: 7 }];
+ OUTPUT_FORMAT_PREREQ_UNMET = 27 [(metadata) = { exit_code: 2 }];
+ ARGUMENTS_MISSING = 28 [(metadata) = { exit_code: 7 }];
+ RBUILDFILES_FUNCTION_REQUIRES_SKYQUERY = 29 [(metadata) = { exit_code: 7 }];
+ FULL_TARGETS_NOT_SUPPORTED = 30 [(metadata) = { exit_code: 7 }];
+ // Deprecated - folded into SYNTAX_ERROR.
+ DEPRECATED_UNEXPECTED_TOKEN_ERROR = 31 [(metadata) = { exit_code: 2 }];
+ // Deprecated - folded into SYNTAX_ERROR.
+ DEPRECATED_INTEGER_LITERAL_MISSING = 32 [(metadata) = { exit_code: 2 }];
+ // Deprecated - folded into SYNTAX_ERROR.
+ DEPRECATED_INVALID_STARTING_CHARACTER_ERROR = 33
+ [(metadata) = { exit_code: 2 }];
+ // Deprecated - folded into SYNTAX_ERROR.
+ DEPRECATED_PREMATURE_END_OF_INPUT_ERROR = 34
+ [(metadata) = { exit_code: 2 }];
+ // Indicates the user specified invalid query syntax.
+ SYNTAX_ERROR = 35 [(metadata) = { exit_code: 2 }];
+ OUTPUT_FORMATTER_IO_EXCEPTION = 36 [(metadata) = { exit_code: 36 }];
+ SKYQUERY_TRANSITIVE_TARGET_ERROR = 37 [(metadata) = { exit_code: 7 }];
+ SKYQUERY_TARGET_EXCEPTION = 38 [(metadata) = { exit_code: 7 }];
+ INVALID_LABEL_IN_TEST_SUITE = 39 [(metadata) = { exit_code: 7 }];
+ // Indicates any usage of flags that must not be combined.
+ ILLEGAL_FLAG_COMBINATION = 40 [(metadata) = { exit_code: 2 }];
+ // Indicates a non-detailed exception that halted a query. This is a
+ // deficiency in Blaze/Bazel and code should be changed to attach a detailed
+ // exit code to this failure mode.
+ NON_DETAILED_ERROR = 41 [(metadata) = { exit_code: 1 }];
+
+ reserved 7 to 12; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message LocalExecution {
+ enum Code {
+ LOCAL_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ LOCKFREE_OUTPUT_PREREQ_UNMET = 1 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message ActionCache {
+ enum Code {
+ ACTION_CACHE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message FetchCommand {
+ enum Code {
+ FETCH_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }];
+ OPTIONS_INVALID = 2 [(metadata) = { exit_code: 2 }];
+ QUERY_PARSE_ERROR = 3 [(metadata) = { exit_code: 2 }];
+ QUERY_EVALUATION_ERROR = 4 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message SyncCommand {
+ enum Code {
+ SYNC_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ PACKAGE_LOOKUP_ERROR = 1 [(metadata) = { exit_code: 7 }];
+ WORKSPACE_EVALUATION_ERROR = 2 [(metadata) = { exit_code: 7 }];
+ REPOSITORY_FETCH_ERRORS = 3 [(metadata) = { exit_code: 7 }];
+ REPOSITORY_NAME_INVALID = 4 [(metadata) = { exit_code: 7 }];
+ }
+
+ Code code = 1;
+}
+
+message Sandbox {
+ enum Code {
+ SANDBOX_FAILURE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INITIALIZATION_FAILURE = 1 [(metadata) = { exit_code: 36 }];
+ EXECUTION_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }];
+ DOCKER_COMMAND_FAILURE = 3 [(metadata) = { exit_code: 1 }];
+ NO_DOCKER_IMAGE = 4 [(metadata) = { exit_code: 1 }];
+ DOCKER_IMAGE_PREPARATION_FAILURE = 5 [(metadata) = { exit_code: 1 }];
+ BIND_MOUNT_ANALYSIS_FAILURE = 6 [(metadata) = { exit_code: 1 }];
+ MOUNT_SOURCE_DOES_NOT_EXIST = 7 [(metadata) = { exit_code: 1 }];
+ MOUNT_SOURCE_TARGET_TYPE_MISMATCH = 8 [(metadata) = { exit_code: 1 }];
+ MOUNT_TARGET_DOES_NOT_EXIST = 9 [(metadata) = { exit_code: 1 }];
+ SUBPROCESS_START_FAILED = 10 [(metadata) = { exit_code: 36 }];
+ FORBIDDEN_INPUT = 11 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message IncludeScanning {
+ enum Code {
+ INCLUDE_SCANNING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INITIALIZE_INCLUDE_HINTS_ERROR = 1 [(metadata) = { exit_code: 36 }];
+ SCANNING_IO_EXCEPTION = 2 [(metadata) = { exit_code: 36 }];
+ INCLUDE_HINTS_FILE_NOT_IN_PACKAGE = 3 [(metadata) = { exit_code: 36 }];
+ INCLUDE_HINTS_READ_FAILURE = 4 [(metadata) = { exit_code: 36 }];
+ ILLEGAL_ABSOLUTE_PATH = 5 [(metadata) = { exit_code: 1 }];
+ // TODO(b/166268889): this code should be deprecated in favor of more finely
+ // resolved loading-phase codes.
+ PACKAGE_LOAD_FAILURE = 6 [(metadata) = { exit_code: 1 }];
+ USER_PACKAGE_LOAD_FAILURE = 7 [(metadata) = { exit_code: 1 }];
+ SYSTEM_PACKAGE_LOAD_FAILURE = 8 [(metadata) = { exit_code: 36 }];
+ UNDIFFERENTIATED_PACKAGE_LOAD_FAILURE = 9 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+ PackageLoading.Code package_loading_code = 2;
+}
+
+message TestCommand {
+ enum Code {
+ TEST_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NO_TEST_TARGETS = 1 [(metadata) = { exit_code: 4 }];
+ TEST_WITH_NOANALYZE = 2 [(metadata) = { exit_code: 1 }];
+ TESTS_FAILED = 3 [(metadata) = { exit_code: 3 }];
+ }
+
+ Code code = 1;
+}
+
+message ActionQuery {
+ // All numerical exit code associations correspond to pre-existing Bazel
+ // behavior. These associations are suspicious:
+ // - COMMAND_LINE_EXPANSION_FAILURE: this is associated with 2, the numerical
+ // exit code for "bad Bazel command line", but is generated when an
+ // action's command line fails to expand, which sounds similar but is
+ // completely different.
+ // - OUTPUT_FAILURE: this is associated with 6, an undocumented exit code.
+ // - INVALID_AQUERY_EXPRESSION: this is associate with 1, which is not
+ // documented for (a)query.
+ // TODO(b/138456686): Revise these after the (intentionally non-breaking)
+ // initial rollout of FailureDetail-based encoding.
+ enum Code {
+ ACTION_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 1 [(metadata) = { exit_code: 2 }];
+ OUTPUT_FAILURE = 2 [(metadata) = { exit_code: 6 }];
+ COMMAND_LINE_EXPRESSION_MISSING = 3 [(metadata) = { exit_code: 2 }];
+ EXPRESSION_PARSE_FAILURE = 4 [(metadata) = { exit_code: 2 }];
+ SKYFRAME_STATE_WITH_COMMAND_LINE_EXPRESSION = 5
+ [(metadata) = { exit_code: 2 }];
+ INVALID_AQUERY_EXPRESSION = 6 [(metadata) = { exit_code: 1 }];
+ SKYFRAME_STATE_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }];
+ AQUERY_OUTPUT_TOO_BIG = 8 [(metadata) = { exit_code: 7 }];
+ ILLEGAL_PATTERN_SYNTAX = 9 [(metadata) = { exit_code: 2 }];
+ INCORRECT_ARGUMENTS = 10 [(metadata) = { exit_code: 2 }];
+ TOP_LEVEL_TARGETS_WITH_SKYFRAME_STATE_NOT_SUPPORTED = 11
+ [(metadata) = { exit_code: 2 }];
+ SKYFRAME_STATE_AFTER_EXECUTION = 12 [(metadata) = { exit_code: 1 }];
+ LABELS_FUNCTION_NOT_SUPPORTED = 13 [(metadata) = { exit_code: 2 }];
+ TEMPLATE_EXPANSION_FAILURE = 14 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message TargetPatterns {
+ enum Code {
+ TARGET_PATTERNS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ TARGET_PATTERN_FILE_WITH_COMMAND_LINE_PATTERN = 1
+ [(metadata) = { exit_code: 2 }];
+ TARGET_PATTERN_FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 2 }];
+ TARGET_PATTERN_PARSE_FAILURE = 3 [(metadata) = { exit_code: 1 }];
+ PACKAGE_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }];
+ TARGET_FORMAT_INVALID = 5 [(metadata) = { exit_code: 1 }];
+ ABSOLUTE_TARGET_PATTERN_INVALID = 6 [(metadata) = { exit_code: 1 }];
+ CANNOT_DETERMINE_TARGET_FROM_FILENAME = 7 [(metadata) = { exit_code: 1 }];
+ LABEL_SYNTAX_ERROR = 8 [(metadata) = { exit_code: 1 }];
+ TARGET_CANNOT_BE_EMPTY_STRING = 9 [(metadata) = { exit_code: 1 }];
+ PACKAGE_PART_CANNOT_END_IN_SLASH = 10 [(metadata) = { exit_code: 1 }];
+ CYCLE = 11 [(metadata) = { exit_code: 1 }];
+ CANNOT_PRELOAD_TARGET = 12 [(metadata) = { exit_code: 1 }];
+ TARGETS_MISSING = 13 [(metadata) = { exit_code: 1 }];
+ RECURSIVE_TARGET_PATTERNS_NOT_ALLOWED = 14 [(metadata) = { exit_code: 1 }];
+ UP_LEVEL_REFERENCES_NOT_ALLOWED = 15 [(metadata) = { exit_code: 1 }];
+ NEGATIVE_TARGET_PATTERN_NOT_ALLOWED = 16 [(metadata) = { exit_code: 1 }];
+ TARGET_MUST_BE_A_FILE = 17 [(metadata) = { exit_code: 1 }];
+ DEPENDENCY_NOT_FOUND = 18 [(metadata) = { exit_code: 1 }];
+ PACKAGE_NAME_INVALID = 19 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message CleanCommand {
+ enum Code {
+ CLEAN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ OUTPUT_SERVICE_CLEAN_FAILURE = 1 [(metadata) = { exit_code: 6 }];
+ ACTION_CACHE_CLEAN_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+ OUT_ERR_CLOSE_FAILURE = 3 [(metadata) = { exit_code: 36 }];
+ OUTPUT_BASE_DELETE_FAILURE = 4 [(metadata) = { exit_code: 36 }];
+ OUTPUT_BASE_TEMP_MOVE_FAILURE = 5 [(metadata) = { exit_code: 36 }];
+ ASYNC_OUTPUT_BASE_DELETE_FAILURE = 6 [(metadata) = { exit_code: 6 }];
+ EXECROOT_DELETE_FAILURE = 7 [(metadata) = { exit_code: 36 }];
+ EXECROOT_TEMP_MOVE_FAILURE = 8 [(metadata) = { exit_code: 36 }];
+ ASYNC_EXECROOT_DELETE_FAILURE = 9 [(metadata) = { exit_code: 6 }];
+ ARGUMENTS_NOT_RECOGNIZED = 10 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message ConfigCommand {
+ enum Code {
+ CONFIG_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ TOO_MANY_CONFIG_IDS = 1 [(metadata) = { exit_code: 2 }];
+ CONFIGURATION_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message ConfigurableQuery {
+ enum Code {
+ CONFIGURABLE_QUERY_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ COMMAND_LINE_EXPRESSION_MISSING = 1 [(metadata) = { exit_code: 2 }];
+ EXPRESSION_PARSE_FAILURE = 2 [(metadata) = { exit_code: 2 }];
+ FILTERS_NOT_SUPPORTED = 3 [(metadata) = { exit_code: 2 }];
+ BUILDFILES_FUNCTION_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 2 }];
+ SIBLINGS_FUNCTION_NOT_SUPPORTED = 5 [(metadata) = { exit_code: 2 }];
+ VISIBLE_FUNCTION_NOT_SUPPORTED = 6 [(metadata) = { exit_code: 2 }];
+ ATTRIBUTE_MISSING = 7 [(metadata) = { exit_code: 2 }];
+ INCORRECT_CONFIG_ARGUMENT_ERROR = 8 [(metadata) = { exit_code: 2 }];
+ TARGET_MISSING = 9 [(metadata) = { exit_code: 2 }];
+ STARLARK_SYNTAX_ERROR = 10 [(metadata) = { exit_code: 2 }];
+ STARLARK_EVAL_ERROR = 11 [(metadata) = { exit_code: 2 }];
+ // Indicates failure to correctly define a format function
+ FORMAT_FUNCTION_ERROR = 12 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message DumpCommand {
+ enum Code {
+ DUMP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NO_OUTPUT_SPECIFIED = 1 [(metadata) = { exit_code: 7 }];
+ ACTION_CACHE_DUMP_FAILED = 2 [(metadata) = { exit_code: 7 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 7 }];
+ ACTION_GRAPH_DUMP_FAILED = 4 [(metadata) = { exit_code: 7 }];
+ STARLARK_HEAP_DUMP_FAILED = 5 [(metadata) = { exit_code: 8 }];
+ reserved 6; // For internal use
+ }
+
+ Code code = 1;
+}
+
+message HelpCommand {
+ enum Code {
+ HELP_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ MISSING_ARGUMENT = 1 [(metadata) = { exit_code: 2 }];
+ COMMAND_NOT_FOUND = 2 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message MobileInstall {
+ enum Code {
+ MOBILE_INSTALL_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ CLASSIC_UNSUPPORTED = 1 [(metadata) = { exit_code: 2 }];
+ NO_TARGET_SPECIFIED = 2 [(metadata) = { exit_code: 2 }];
+ MULTIPLE_TARGETS_SPECIFIED = 3 [(metadata) = { exit_code: 2 }];
+ TARGET_TYPE_INVALID = 4 [(metadata) = { exit_code: 6 }];
+ NON_ZERO_EXIT = 5 [(metadata) = { exit_code: 6 }];
+ ERROR_RUNNING_PROGRAM = 6 [(metadata) = { exit_code: 6 }];
+ }
+
+ Code code = 1;
+}
+
+message ProfileCommand {
+ enum Code {
+ PROFILE_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ OLD_BINARY_FORMAT_UNSUPPORTED = 1 [(metadata) = { exit_code: 1 }];
+ FILE_READ_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message RunCommand {
+ enum Code {
+ RUN_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NO_TARGET_SPECIFIED = 1 [(metadata) = { exit_code: 2 }];
+ TOO_MANY_TARGETS_SPECIFIED = 2 [(metadata) = { exit_code: 2 }];
+ TARGET_NOT_EXECUTABLE = 3 [(metadata) = { exit_code: 2 }];
+ TARGET_BUILT_BUT_PATH_NOT_EXECUTABLE = 4 [(metadata) = { exit_code: 1 }];
+ TARGET_BUILT_BUT_PATH_VALIDATION_FAILED = 5
+ [(metadata) = { exit_code: 36 }];
+ RUN_UNDER_TARGET_NOT_BUILT = 6 [(metadata) = { exit_code: 2 }];
+ RUN_PREREQ_UNMET = 7 [(metadata) = { exit_code: 2 }];
+ TOO_MANY_TEST_SHARDS_OR_RUNS = 8 [(metadata) = { exit_code: 2 }];
+ TEST_ENVIRONMENT_SETUP_FAILURE = 9 [(metadata) = { exit_code: 36 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 10 [(metadata) = { exit_code: 36 }];
+ NO_SHELL_SPECIFIED = 11 [(metadata) = { exit_code: 2 }];
+ SCRIPT_WRITE_FAILURE = 12 [(metadata) = { exit_code: 6 }];
+ RUNFILES_DIRECTORIES_CREATION_FAILURE = 13 [(metadata) = { exit_code: 36 }];
+ RUNFILES_SYMLINKS_CREATION_FAILURE = 14 [(metadata) = { exit_code: 36 }];
+ TEST_ENVIRONMENT_SETUP_INTERRUPTED = 15 [(metadata) = { exit_code: 8 }];
+ }
+
+ Code code = 1;
+}
+
+message VersionCommand {
+ enum Code {
+ VERSION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NOT_AVAILABLE = 1 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message PrintActionCommand {
+ enum Code {
+ PRINT_ACTION_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ TARGET_NOT_FOUND = 1 [(metadata) = { exit_code: 1 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ TARGET_KIND_UNSUPPORTED = 3 [(metadata) = { exit_code: 1 }];
+ ACTIONS_NOT_FOUND = 4 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message WorkspaceStatus {
+ enum Code {
+ WORKSPACE_STATUS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NON_ZERO_EXIT = 1 [(metadata) = { exit_code: 1 }];
+ ABNORMAL_TERMINATION = 2 [(metadata) = { exit_code: 1 }];
+ EXEC_FAILED = 3 [(metadata) = { exit_code: 1 }];
+ PARSE_FAILURE = 4 [(metadata) = { exit_code: 36 }];
+ VALIDATION_FAILURE = 5 [(metadata) = { exit_code: 1 }];
+ CONTENT_UPDATE_IO_EXCEPTION = 6 [(metadata) = { exit_code: 1 }];
+ STDERR_IO_EXCEPTION = 7 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message JavaCompile {
+ enum Code {
+ JAVA_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ REDUCED_CLASSPATH_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ JDEPS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 36 }];
+ REDUCED_CLASSPATH_FALLBACK_CLEANUP_FAILURE = 4
+ [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message ActionRewinding {
+ enum Code {
+ ACTION_REWINDING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ LOST_INPUT_TOO_MANY_TIMES = 1 [(metadata) = { exit_code: 1 }];
+ LOST_INPUT_IS_SOURCE = 2 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message CppCompile {
+ enum Code {
+ CPP_COMPILE_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ FIND_USED_HEADERS_IO_EXCEPTION = 1 [(metadata) = { exit_code: 36 }];
+ COPY_OUT_ERR_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+ D_FILE_READ_FAILURE = 3 [(metadata) = { exit_code: 36 }];
+ COMMAND_GENERATION_FAILURE = 4 [(metadata) = { exit_code: 1 }];
+ MODULE_EXPANSION_TIMEOUT = 5 [(metadata) = { exit_code: 1 }];
+ INCLUDE_PATH_OUTSIDE_EXEC_ROOT = 6 [(metadata) = { exit_code: 1 }];
+ FAKE_COMMAND_GENERATION_FAILURE = 7 [(metadata) = { exit_code: 1 }];
+ UNDECLARED_INCLUSIONS = 8 [(metadata) = { exit_code: 1 }];
+ D_FILE_PARSE_FAILURE = 9 [(metadata) = { exit_code: 1 }];
+ COVERAGE_NOTES_CREATION_FAILURE = 10 [(metadata) = { exit_code: 1 }];
+ MODULE_EXPANSION_MISSING_DATA = 11 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message StarlarkAction {
+ enum Code {
+ STARLARK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ UNUSED_INPUT_LIST_READ_FAILURE = 1 [(metadata) = { exit_code: 36 }];
+ UNUSED_INPUT_LIST_FILE_NOT_FOUND = 2 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message NinjaAction {
+ enum Code {
+ NINJA_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INVALID_DEPFILE_DECLARED_DEPENDENCY = 1 [(metadata) = { exit_code: 36 }];
+ D_FILE_PARSE_FAILURE = 2 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message DynamicExecution {
+ enum Code {
+ DYNAMIC_EXECUTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ XCODE_RELATED_PREREQ_UNMET = 1 [(metadata) = { exit_code: 36 }];
+ ACTION_LOG_MOVE_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ RUN_FAILURE = 3 [(metadata) = { exit_code: 1 }];
+ NO_USABLE_STRATEGY_FOUND = 4 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
+
+message FailAction {
+ enum Code {
+ FAIL_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INTENTIONAL_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ INCORRECT_PYTHON_VERSION = 2 [(metadata) = { exit_code: 1 }];
+ PROGUARD_SPECS_MISSING = 3 [(metadata) = { exit_code: 1 }];
+ DYNAMIC_LINKING_NOT_SUPPORTED = 4 [(metadata) = { exit_code: 1 }];
+ SOURCE_FILES_MISSING = 5 [(metadata) = { exit_code: 1 }];
+ INCORRECT_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }];
+ FRAGMENT_CLASS_MISSING = 7 [(metadata) = { exit_code: 1 }];
+ reserved 8, 9; // For internal use
+ CANT_BUILD_INCOMPATIBLE_TARGET = 10 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message SymlinkAction {
+ enum Code {
+ SYMLINK_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ EXECUTABLE_INPUT_NOT_FILE = 1 [(metadata) = { exit_code: 1 }];
+ EXECUTABLE_INPUT_IS_NOT = 2 [(metadata) = { exit_code: 1 }];
+ EXECUTABLE_INPUT_CHECK_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }];
+ LINK_CREATION_IO_EXCEPTION = 4 [(metadata) = { exit_code: 1 }];
+ LINK_TOUCH_IO_EXCEPTION = 5 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message CppLink {
+ enum Code {
+ CPP_LINK_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ COMMAND_GENERATION_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ FAKE_COMMAND_GENERATION_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message LtoAction {
+ enum Code {
+ LTO_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ INVALID_ABSOLUTE_PATH_IN_IMPORTS = 1 [(metadata) = { exit_code: 1 }];
+ MISSING_BITCODE_FILES = 2 [(metadata) = { exit_code: 1 }];
+ IMPORTS_READ_IO_EXCEPTION = 3 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message TestAction {
+ enum Code {
+ TEST_ACTION_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ NO_KEEP_GOING_TEST_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ LOCAL_TEST_PREREQ_UNMET = 2 [(metadata) = { exit_code: 1 }];
+ COMMAND_LINE_EXPANSION_FAILURE = 3 [(metadata) = { exit_code: 1 }];
+ DUPLICATE_CPU_TAGS = 4 [(metadata) = { exit_code: 1 }];
+ INVALID_CPU_TAG = 5 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message Worker {
+ enum Code {
+ WORKER_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ MULTIPLEXER_INSTANCE_REMOVAL_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ MULTIPLEXER_DOES_NOT_EXIST = 2 [(metadata) = { exit_code: 1 }];
+ NO_TOOLS = 3 [(metadata) = { exit_code: 1 }];
+ NO_FLAGFILE = 4 [(metadata) = { exit_code: 1 }];
+ VIRTUAL_INPUT_MATERIALIZATION_FAILURE = 5 [(metadata) = { exit_code: 1 }];
+ BORROW_FAILURE = 6 [(metadata) = { exit_code: 1 }];
+ PREFETCH_FAILURE = 7 [(metadata) = { exit_code: 36 }];
+ PREPARE_FAILURE = 8 [(metadata) = { exit_code: 1 }];
+ REQUEST_FAILURE = 9 [(metadata) = { exit_code: 1 }];
+ PARSE_RESPONSE_FAILURE = 10 [(metadata) = { exit_code: 1 }];
+ NO_RESPONSE = 11 [(metadata) = { exit_code: 1 }];
+ FINISH_FAILURE = 12 [(metadata) = { exit_code: 1 }];
+ FORBIDDEN_INPUT = 13 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message Analysis {
+ enum Code {
+ ANALYSIS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ LOAD_FAILURE = 1 [(metadata) = { exit_code: 1 }];
+ // TODO(b/138456686): this code should be deprecated in favor of more finely
+ // resolved loading-phase codes.
+ GENERIC_LOADING_PHASE_FAILURE = 2 [(metadata) = { exit_code: 1 }];
+ NOT_ALL_TARGETS_ANALYZED = 3 [(metadata) = { exit_code: 1 }];
+ CYCLE = 4 [(metadata) = { exit_code: 1 }];
+ PARAMETERIZED_TOP_LEVEL_ASPECT_INVALID = 5 [(metadata) = { exit_code: 1 }];
+ ASPECT_LABEL_SYNTAX_ERROR = 6 [(metadata) = { exit_code: 1 }];
+ ASPECT_PREREQ_UNMET = 7 [(metadata) = { exit_code: 1 }];
+ ASPECT_NOT_FOUND = 8 [(metadata) = { exit_code: 1 }];
+ ACTION_CONFLICT = 9 [(metadata) = { exit_code: 1 }];
+ ARTIFACT_PREFIX_CONFLICT = 10 [(metadata) = { exit_code: 1 }];
+ UNEXPECTED_ANALYSIS_EXCEPTION = 11 [(metadata) = { exit_code: 1 }];
+ TARGETS_MISSING_ENVIRONMENTS = 12 [(metadata) = { exit_code: 1 }];
+ INVALID_ENVIRONMENT = 13 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_MISSING_FROM_GROUPS = 14 [(metadata) = { exit_code: 1 }];
+ EXEC_GROUP_MISSING = 15 [(metadata) = { exit_code: 1 }];
+ INVALID_EXECUTION_PLATFORM = 16 [(metadata) = { exit_code: 1 }];
+ ASPECT_CREATION_FAILED = 17 [(metadata) = { exit_code: 1 }];
+ CONFIGURED_VALUE_CREATION_FAILED = 18 [(metadata) = { exit_code: 1 }];
+ INCOMPATIBLE_TARGET_REQUESTED = 19 [(metadata) = { exit_code: 1 }];
+ ANALYSIS_FAILURE_PROPAGATION_FAILED = 20 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message PackageLoading {
+ enum Code {
+ PACKAGE_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ WORKSPACE_FILE_ERROR = 1 [(metadata) = { exit_code: 1 }];
+ MAX_COMPUTATION_STEPS_EXCEEDED = 2 [(metadata) = { exit_code: 1 }];
+ BUILD_FILE_MISSING = 3 [(metadata) = { exit_code: 1 }];
+ REPOSITORY_MISSING = 4 [(metadata) = { exit_code: 1 }];
+ PERSISTENT_INCONSISTENT_FILESYSTEM_ERROR = 5
+ [(metadata) = { exit_code: 36 }];
+ TRANSIENT_INCONSISTENT_FILESYSTEM_ERROR = 6
+ [(metadata) = { exit_code: 36 }];
+ INVALID_NAME = 7 [(metadata) = { exit_code: 1 }];
+ // was: PRELUDE_FILE_READ_ERROR. Replaced by IMPORT_STARLARK_FILE_ERROR
+ // when the prelude was changed to be loaded as a Starlark module.
+ reserved 8;
+ EVAL_GLOBS_SYMLINK_ERROR = 9 [(metadata) = { exit_code: 1 }];
+ IMPORT_STARLARK_FILE_ERROR = 10 [(metadata) = { exit_code: 1 }];
+ PACKAGE_MISSING = 11 [(metadata) = { exit_code: 1 }];
+ TARGET_MISSING = 12 [(metadata) = { exit_code: 1 }];
+ NO_SUCH_THING = 13 [(metadata) = { exit_code: 1 }];
+ GLOB_IO_EXCEPTION = 14 [(metadata) = { exit_code: 36 }];
+ DUPLICATE_LABEL = 15 [(metadata) = { exit_code: 1 }];
+ INVALID_PACKAGE_SPECIFICATION = 16 [(metadata) = { exit_code: 1 }];
+ SYNTAX_ERROR = 17 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_IN_DIFFERENT_PACKAGE = 18 [(metadata) = { exit_code: 1 }];
+ DEFAULT_ENVIRONMENT_UNDECLARED = 19 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_IN_MULTIPLE_GROUPS = 20 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_DOES_NOT_EXIST = 21 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_INVALID = 22 [(metadata) = { exit_code: 1 }];
+ ENVIRONMENT_NOT_IN_GROUP = 23 [(metadata) = { exit_code: 1 }];
+ PACKAGE_NAME_INVALID = 24 [(metadata) = { exit_code: 1 }];
+ STARLARK_EVAL_ERROR = 25 [(metadata) = { exit_code: 1 }];
+ LICENSE_PARSE_FAILURE = 26 [(metadata) = { exit_code: 1 }];
+ DISTRIBUTIONS_PARSE_FAILURE = 27 [(metadata) = { exit_code: 1 }];
+ LABEL_CROSSES_PACKAGE_BOUNDARY = 28 [(metadata) = { exit_code: 1 }];
+ // Failure while evaluating or applying @_builtins injection. Since the
+ // builtins .bzl files are always packaged with Blaze in production, a
+ // failure here generally indicates a bug in Blaze.
+ BUILTINS_INJECTION_FAILURE = 29 [(metadata) = { exit_code: 1 }];
+ SYMLINK_CYCLE_OR_INFINITE_EXPANSION = 30 [(metadata) = { exit_code: 1 }];
+ OTHER_IO_EXCEPTION = 31 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message Toolchain {
+ enum Code {
+ TOOLCHAIN_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ MISSING_PROVIDER = 1 [(metadata) = { exit_code: 1 }];
+ INVALID_CONSTRAINT_VALUE = 2 [(metadata) = { exit_code: 1 }];
+ INVALID_PLATFORM_VALUE = 3 [(metadata) = { exit_code: 1 }];
+ INVALID_TOOLCHAIN = 4 [(metadata) = { exit_code: 1 }];
+ NO_MATCHING_EXECUTION_PLATFORM = 5 [(metadata) = { exit_code: 1 }];
+ NO_MATCHING_TOOLCHAIN = 6 [(metadata) = { exit_code: 1 }];
+ INVALID_TOOLCHAIN_TYPE = 7 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message StarlarkLoading {
+ enum Code {
+ STARLARK_LOADING_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ CYCLE = 1 [(metadata) = { exit_code: 1 }];
+ COMPILE_ERROR = 2 [(metadata) = { exit_code: 1 }];
+ PARSE_ERROR = 3 [(metadata) = { exit_code: 1 }];
+ EVAL_ERROR = 4 [(metadata) = { exit_code: 1 }];
+ CONTAINING_PACKAGE_NOT_FOUND = 5 [(metadata) = { exit_code: 1 }];
+ PACKAGE_NOT_FOUND = 6 [(metadata) = { exit_code: 1 }];
+ IO_ERROR = 7 [(metadata) = { exit_code: 1 }];
+ LABEL_CROSSES_PACKAGE_BOUNDARY = 8 [(metadata) = { exit_code: 1 }];
+ BUILTINS_ERROR = 9 [(metadata) = { exit_code: 1 }];
+ VISIBILITY_ERROR = 10 [(metadata) = { exit_code: 1 }];
+ }
+
+ Code code = 1;
+}
+
+message ExternalDeps {
+ enum Code {
+ EXTERNAL_DEPS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ MODULE_NOT_FOUND = 1 [(metadata) = { exit_code: 48 }];
+ BAD_MODULE = 2 [(metadata) = { exit_code: 48 }];
+ VERSION_RESOLUTION_ERROR = 3 [(metadata) = { exit_code: 48 }];
+ INVALID_REGISTRY_URL = 4 [(metadata) = { exit_code: 48 }];
+ ERROR_ACCESSING_REGISTRY = 5 [(metadata) = { exit_code: 32 }];
+ }
+
+ Code code = 1;
+}
+
+message DiffAwareness {
+ enum Code {
+ DIFF_AWARENESS_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ DIFF_STAT_FAILED = 1 [(metadata) = { exit_code: 36 }];
+ }
+
+ Code code = 1;
+}
+
+message ModqueryCommand {
+ enum Code {
+ MODQUERY_COMMAND_UNKNOWN = 0 [(metadata) = { exit_code: 37 }];
+ MISSING_ARGUMENTS = 1 [(metadata) = { exit_code: 2 }];
+ TOO_MANY_ARGUMENTS = 2 [(metadata) = { exit_code: 2 }];
+ INVALID_ARGUMENTS = 3 [(metadata) = { exit_code: 2 }];
+ }
+
+ Code code = 1;
+}
diff --git a/atest/bazel/runner/src/main/protobuf/invocation_policy.proto b/atest/bazel/runner/src/main/protobuf/invocation_policy.proto
new file mode 100644
index 0000000..f54a0f5
--- /dev/null
+++ b/atest/bazel/runner/src/main/protobuf/invocation_policy.proto
@@ -0,0 +1,202 @@
+// Copyright 2015 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto2";
+package blaze.invocation_policy;
+
+// option java_api_version = 2;
+option java_package = "com.google.devtools.build.lib.runtime.proto";
+
+// The --invocation_policy flag takes a base64-encoded binary-serialized or text
+// formatted InvocationPolicy message.
+message InvocationPolicy {
+ // Order matters.
+ // After expanding policies on expansion flags or flags with implicit
+ // requirements, only the final policy on a specific flag will be enforced
+ // onto the user's command line.
+ repeated FlagPolicy flag_policies = 1;
+}
+
+// A policy for controlling the value of a flag.
+message FlagPolicy {
+ // The name of the flag to enforce this policy on.
+ //
+ // Note that this should be the full name of the flag, not the abbreviated
+ // name of the flag. If the user specifies the abbreviated name of a flag,
+ // that flag will be matched using its full name.
+ //
+ // The "no" prefix will not be parsed, so for boolean flags, use
+ // the flag's full name and explicitly set it to true or false.
+ optional string flag_name = 1;
+
+ // If set, this flag policy is applied only if one of the given commands or a
+ // command that inherits from one of the given commands is being run. For
+ // instance, if "build" is one of the commands here, then this policy will
+ // apply to any command that inherits from build, such as info, coverage, or
+ // test. If empty, this flag policy is applied for all commands. This allows
+ // the policy setter to add all policies to the proto without having to
+ // determine which Bazel command the user is actually running. Additionally,
+ // Bazel allows multiple flags to be defined by the same name, and the
+ // specific flag definition is determined by the command.
+ repeated string commands = 2;
+
+ oneof operation {
+ SetValue set_value = 3;
+ UseDefault use_default = 4;
+ DisallowValues disallow_values = 5;
+ AllowValues allow_values = 6;
+ }
+}
+
+message SetValue {
+ // Use this value for the specified flag, overriding any default or user-set
+ // value (unless behavior = APPEND for repeatable flags).
+ //
+ // This field is repeated for repeatable flags. It is an error to set
+ // multiple values for a flag that is not actually a repeatable flag.
+ // This requires at least 1 value, if even the empty string.
+ //
+ // If the flag allows multiple values, all of its values are replaced with the
+ // value or values from the policy (i.e., no diffing or merging is performed),
+ // unless behavior = APPEND (see below).
+ //
+ // Note that some flags are tricky. For example, some flags look like boolean
+ // flags, but are actually Void expansion flags that expand into other flags.
+ // The Bazel flag parser will accept "--void_flag=false", but because
+ // the flag is Void, the "=false" is ignored. It can get even trickier, like
+ // "--novoid_flag" which is also an expansion flag with the type Void whose
+ // name is explicitly "novoid_flag" and which expands into other flags that
+ // are the opposite of "--void_flag". For expansion flags, it's best to
+ // explicitly override the flags they expand into.
+ //
+ // Other flags may be differently tricky: A flag could have a converter that
+ // converts some string to a list of values, but that flag may not itself have
+ // allowMultiple set to true.
+ //
+ // An example is "--test_tag_filters": this flag sets its converter to
+ // CommaSeparatedOptionListConverter, but does not set allowMultiple to true.
+ // So "--test_tag_filters=foo,bar" results in ["foo", "bar"], however
+ // "--test_tag_filters=foo --test_tag_filters=bar" results in just ["bar"]
+ // since the 2nd value overrides the 1st.
+ //
+ // Similarly, "--test_tag_filters=foo,bar --test_tag_filters=baz,qux" results
+ // in ["baz", "qux"]. For flags like these, the policy should specify
+ // "foo,bar" instead of separately specifying "foo" and "bar" so that the
+ // converter is appropriately invoked.
+ //
+ // Note that the opposite is not necessarily
+ // true: for a flag that specifies allowMultiple=true, "--flag=foo,bar"
+ // may fail to parse or result in an unexpected value.
+ repeated string flag_value = 1;
+
+ // Obsolete overridable and append fields.
+ reserved 2, 3;
+
+ enum Behavior {
+ UNDEFINED = 0;
+ // Change the flag value but allow it to be overridden by explicit settings
+ // from command line/config expansion/rc files.
+ // Matching old flag values: append = false, overridable = true.
+ ALLOW_OVERRIDES = 1;
+ // Append a new value for a repeatable flag, leave old values and allow
+ // further overrides.
+ // Matching old flag values: append = true, overridable = false.
+ APPEND = 2;
+ // Set a final value of the flag. Any overrides provided by the user for
+ // this flag will be ignored.
+ // Matching old flag values: append = false, overridable = false.
+ FINAL_VALUE_IGNORE_OVERRIDES = 3;
+ }
+
+ // Defines how invocation policy should interact with user settings for the
+ // same flag.
+ optional Behavior behavior = 4;
+}
+
+message UseDefault {
+ // Use the default value of the flag, as defined by Bazel (or equivalently, do
+ // not allow the user to set this flag).
+ //
+ // Note on implementation: UseDefault sets the default by clearing the flag,
+ // so that when the value is requested and no flag is found, the flag parser
+ // returns the default. This is mostly relevant for expansion flags: it will
+ // erase user values in *all* flags that the expansion flag expands to. Only
+ // use this on expansion flags if this is acceptable behavior. Since the last
+ // policy wins, later policies on this same flag will still remove the
+ // expanded UseDefault, so there is a way around, but it's really best not to
+ // use this on expansion flags at all.
+}
+
+message DisallowValues {
+ // Obsolete new_default_value field.
+ reserved 2;
+
+ // It is an error for the user to use any of these values (that is, the Bazel
+ // command will fail), unless new_value or use_default is set.
+ //
+ // For repeatable flags, if any one of the values in the flag matches a value
+ // in the list of disallowed values, an error is thrown.
+ //
+ // Care must be taken for flags with complicated converters. For example,
+ // it's possible for a repeated flag to be of type List<List<T>>, so that
+ // "--foo=a,b --foo=c,d" results in foo=[["a","b"], ["c", "d"]]. In this case,
+ // it is not possible to disallow just "b", nor will ["b", "a"] match, nor
+ // will ["b", "c"] (but ["a", "b"] will still match).
+ repeated string disallowed_values = 1;
+
+ oneof replacement_value {
+ // If set and if the value of the flag is disallowed (including the default
+ // value of the flag if the user doesn't specify a value), use this value as
+ // the value of the flag instead of raising an error. This does not apply to
+ // repeatable flags and is ignored if the flag is a repeatable flag.
+ string new_value = 3;
+
+ // If set and if the value of the flag is disallowed, use the default value
+ // of the flag instead of raising an error. Unlike new_value, this works for
+ // repeatable flags, but note that the default value for repeatable flags is
+ // always empty.
+ //
+ // Note that it is an error to disallow the default value of the flag and
+ // to set use_default, unless the flag is a repeatable flag where the
+ // default value is always the empty list.
+ UseDefault use_default = 4;
+ }
+}
+
+message AllowValues {
+ // Obsolete new_default_value field.
+ reserved 2;
+
+ // It is an error for the user to use any value not in this list, unless
+ // new_value or use_default is set.
+ repeated string allowed_values = 1;
+
+ oneof replacement_value {
+ // If set and if the value of the flag is disallowed (including the default
+ // value of the flag if the user doesn't specify a value), use this value as
+ // the value of the flag instead of raising an error. This does not apply to
+ // repeatable flags and is ignored if the flag is a repeatable flag.
+ string new_value = 3;
+
+ // If set and if the value of the flag is disallowed, use the default value
+ // of the flag instead of raising an error. Unlike new_value, this works for
+ // repeatable flags, but note that the default value for repeatable flags is
+ // always empty.
+ //
+ // Note that it is an error to disallow the default value of the flag and
+ // to set use_default, unless the flag is a repeatable flag where the
+ // default value is always the empty list.
+ UseDefault use_default = 4;
+ }
+}
diff --git a/atest/bazel/runner/src/main/protobuf/option_filters.proto b/atest/bazel/runner/src/main/protobuf/option_filters.proto
new file mode 100644
index 0000000..d931083
--- /dev/null
+++ b/atest/bazel/runner/src/main/protobuf/option_filters.proto
@@ -0,0 +1,59 @@
+// Copyright 2017 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+syntax = "proto3";
+
+package options;
+
+// option java_api_version = 2;
+option java_package = "com.google.devtools.common.options.proto";
+
+// IMPORTANT NOTE: These two enums must be kept in sync with their Java
+// equivalents in src/main/java/com/google/devtools/common/options.
+// Changing this proto has specific compatibility requirements, please see the
+// Java documentation for details.
+
+// Docs in java enum.
+enum OptionEffectTag {
+ // This option's effect or intent is unknown.
+ UNKNOWN = 0;
+
+ // This flag has literally no effect.
+ NO_OP = 1;
+
+ LOSES_INCREMENTAL_STATE = 2;
+ CHANGES_INPUTS = 3;
+ AFFECTS_OUTPUTS = 4;
+ BUILD_FILE_SEMANTICS = 5;
+ BAZEL_INTERNAL_CONFIGURATION = 6;
+ LOADING_AND_ANALYSIS = 7;
+ EXECUTION = 8;
+ HOST_MACHINE_RESOURCE_OPTIMIZATIONS = 9;
+ EAGERNESS_TO_EXIT = 10;
+ BAZEL_MONITORING = 11;
+ TERMINAL_OUTPUT = 12;
+ ACTION_COMMAND_LINES = 13;
+ TEST_RUNNER = 14;
+}
+
+// Docs in java enum.
+enum OptionMetadataTag {
+ EXPERIMENTAL = 0;
+ INCOMPATIBLE_CHANGE = 1;
+ DEPRECATED = 2;
+ HIDDEN = 3;
+ INTERNAL = 4;
+ reserved "TRIGGERED_BY_ALL_INCOMPATIBLE_CHANGES";
+ reserved 5;
+ EXPLICIT_IN_OUTPUT_PATH = 6;
+}
diff --git a/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java b/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java
new file mode 100644
index 0000000..b30da9a
--- /dev/null
+++ b/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java
@@ -0,0 +1,838 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.tradefed.testtype.bazel;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.argThat;
+import static org.mockito.Mockito.contains;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import com.android.tradefed.config.ConfigurationException;
+import com.android.tradefed.config.OptionSetter;
+import com.android.tradefed.invoker.InvocationContext;
+import com.android.tradefed.invoker.TestInformation;
+import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.result.FailureDescription;
+import com.android.tradefed.result.ILogSaverListener;
+import com.android.tradefed.result.LogDataType;
+import com.android.tradefed.result.LogFile;
+import com.android.tradefed.result.TestDescription;
+import com.android.tradefed.result.error.ErrorIdentifier;
+import com.android.tradefed.result.error.TestErrorIdentifier;
+import com.android.tradefed.result.proto.FileProtoResultReporter;
+import com.android.tradefed.result.proto.TestRecordProto.FailureStatus;
+import com.android.tradefed.util.ZipUtil;
+
+import com.google.common.base.Splitter;
+import com.google.common.io.MoreFiles;
+import com.google.common.util.concurrent.Uninterruptibles;
+import com.google.devtools.build.lib.buildeventstream.BuildEventStreamProtos;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+import org.mockito.ArgumentMatcher;
+import org.mockito.InOrder;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+@RunWith(JUnit4.class)
+public final class BazelTestTest {
+
+ private ILogSaverListener mMockListener;
+ private TestInformation mTestInfo;
+ private Path mBazelTempPath;
+
+ private static final String BAZEL_TEST_TARGETS_OPTION = "bazel-test-target-patterns";
+ private static final String BEP_FILE_OPTION_NAME = "--build_event_binary_file";
+ private static final String REPORT_CACHED_TEST_RESULTS_OPTION = "report-cached-test-results";
+ private static final long RANDOM_SEED = 1234567890L;
+
+ @Rule public final TemporaryFolder tempDir = new TemporaryFolder();
+
+ @Before
+ public void setUp() throws Exception {
+ mMockListener = mock(ILogSaverListener.class);
+ InvocationContext context = new InvocationContext();
+ context.addInvocationAttribute("module-id", "bazel-test-module-id");
+ mTestInfo = TestInformation.newBuilder().setInvocationContext(context).build();
+ mBazelTempPath =
+ Files.createDirectory(tempDir.getRoot().toPath().resolve("bazel_temp_dir"));
+ }
+
+ @Test
+ public void runSucceeds_invokesListenerEvents() throws Exception {
+ BazelTest bazelTest = newBazelTest();
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunStarted(eq(BazelTest.class.getName()), eq(0));
+ verify(mMockListener).testRunEnded(anyLong(), anyMap());
+ }
+
+ @Test
+ public void runSucceeds_tempDirEmptied() throws Exception {
+ BazelTest bazelTest = newBazelTest();
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ assertThat(listDirContents(mBazelTempPath)).isEmpty();
+ }
+
+ @Test
+ public void runSucceeds_logsSaved() throws Exception {
+ BazelTest bazelTest = newBazelTest();
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener)
+ .testLog(
+ contains(String.format("%s-log", BazelTest.QUERY_ALL_TARGETS)),
+ any(),
+ any());
+ verify(mMockListener)
+ .testLog(
+ contains(String.format("%s-log", BazelTest.QUERY_MAP_MODULES_TO_TARGETS)),
+ any(),
+ any());
+ verify(mMockListener)
+ .testLog(contains(String.format("%s-log", BazelTest.RUN_TESTS)), any(), any());
+ }
+
+ /*@Test
+ public void runSucceeds_testLogsReportedUnderModule() throws Exception {
+ BazelTest bazelTest = newBazelTest();
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ InOrder inOrder = inOrder(mMockListener);
+ inOrder.verify(mMockListener).testModuleStarted(any());
+ inOrder.verify(mMockListener)
+ .testLog(eq("tf-test-process-module-log"), eq(LogDataType.TAR_GZ), any());
+ inOrder.verify(mMockListener)
+ .testLog(eq("tf-test-process-invocation-log"), eq(LogDataType.XML), any());
+ inOrder.verify(mMockListener).testModuleEnded();
+ }*/
+
+ @Test
+ public void malformedProtoResults_runFails() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public void writeSingleTestOutputs(Path outputsDir, String testName)
+ throws IOException, ConfigurationException {
+
+ super.writeSingleTestOutputs(outputsDir, testName);
+
+ Path outputFile = outputsDir.resolve("proto-results");
+ Files.write(outputFile, "Malformed Proto File".getBytes());
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.INFRA_FAILURE));
+ }
+
+ @Test
+ public void malformedBepFile_runFails() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public void writeSingleTestResultEvent(File outputsZipFile, Path bepFile)
+ throws IOException {
+
+ Files.write(bepFile, "Malformed BEP File".getBytes());
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.TEST_FAILURE));
+ }
+
+ @Test
+ public void bepFileMissingLastMessage_runFails() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public void writeLastEvent() throws IOException {
+ // Do nothing.
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.INFRA_FAILURE));
+ }
+
+ @Test
+ public void targetsNotSet_testsAllTargets() throws Exception {
+ List<String> command = new ArrayList<>();
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.QUERY_ALL_TARGETS,
+ newPassingProcessWithStdout("//bazel/target:default_target_host"));
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout("default_target //bazel/target:default_target_host"));
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ command.addAll(builder.command());
+ return new FakeBazelTestProcess(builder, mBazelTempPath);
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ assertThat(command).contains("//bazel/target:default_target_host");
+ }
+
+ @Test
+ public void archiveRootPathNotSet_runAborted() throws Exception {
+ Properties properties = bazelTestProperties();
+ properties.remove("BAZEL_SUITE_ROOT");
+ BazelTest bazelTest = newBazelTestWithProperties(properties);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
+ }
+
+ @Test
+ public void archiveRootPathEmptyString_runAborted() throws Exception {
+ Properties properties = bazelTestProperties();
+ properties.put("BAZEL_SUITE_ROOT", "");
+ BazelTest bazelTest = newBazelTestWithProperties(properties);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
+ }
+
+ @Test
+ public void bazelQueryAllTargetsFails_runAborted() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(BazelTest.QUERY_ALL_TARGETS, newFailingProcess());
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void bazelQueryMapModuleToTargetsFails_runAborted() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(BazelTest.QUERY_MAP_MODULES_TO_TARGETS, newFailingProcess());
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void testTimeout_causesTestFailure() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public boolean waitFor(long timeout, TimeUnit unit) {
+ return false;
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
+ }
+
+ @Test
+ public void includeTestModule_runsOnlyThatModule() throws Exception {
+ String moduleInclude = "custom_module";
+ List<String> command = new ArrayList<>();
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.QUERY_ALL_TARGETS,
+ newPassingProcessWithStdout(
+ "//bazel/target:default_target_host\n//bazel/target:custom_module_host"));
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout(
+ "default_target //bazel/target:default_target_host\n"
+ + "custom_module //bazel/target:custom_module_host"));
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ command.addAll(builder.command());
+ return new FakeBazelTestProcess(builder, mBazelTempPath);
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+ OptionSetter setter = new OptionSetter(bazelTest);
+ setter.setOptionValue("include-filter", moduleInclude);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ assertThat(command).contains("//bazel/target:custom_module_host");
+ assertThat(command).doesNotContain("//bazel/target:default_target_host");
+ }
+
+ @Test
+ public void excludeTestModule_doesNotRunTestModule() throws Exception {
+ String moduleExclude = "custom_module";
+ List<String> command = new ArrayList<>();
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.QUERY_ALL_TARGETS,
+ newPassingProcessWithStdout(
+ "//bazel/target:default_target_host\n//bazel/target:custom_module_host"));
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout(
+ "default_target //bazel/target:default_target_host\n"
+ + "custom_module //bazel/target:custom_module_host"));
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ command.addAll(builder.command());
+ return new FakeBazelTestProcess(builder, mBazelTempPath);
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+ OptionSetter setter = new OptionSetter(bazelTest);
+ setter.setOptionValue("exclude-filter", moduleExclude);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ assertThat(command).doesNotContain("//bazel/target:custom_module_host");
+ assertThat(command).contains("//bazel/target:default_target_host");
+ }
+
+ @Test
+ public void excludeTestFunction_generatesExcludeFilter() throws Exception {
+ String functionExclude = "custom_module custom_module.customClass#customFunction";
+ List<String> command = new ArrayList<>();
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ command.addAll(builder.command());
+ return new FakeBazelTestProcess(builder, mBazelTempPath);
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+ OptionSetter setter = new OptionSetter(bazelTest);
+ setter.setOptionValue("exclude-filter", functionExclude);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ assertThat(command)
+ .contains(
+ "--test_arg=--global-filters:exclude-filter=custom_module"
+ + " custom_module.customClass#customFunction");
+ }
+
+ @Test
+ public void excludeAndIncludeFiltersSet_testRunAborted() throws Exception {
+ String moduleExclude = "custom_module";
+ BazelTest bazelTest = newBazelTest();
+ OptionSetter setter = new OptionSetter(bazelTest);
+ setter.setOptionValue("exclude-filter", moduleExclude);
+ setter.setOptionValue("include-filter", moduleExclude);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void queryMapModulesToTargetsEmpty_abortsRun() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(BazelTest.QUERY_MAP_MODULES_TO_TARGETS, newPassingProcessWithStdout(""));
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void multipleTargetsMappedToSingleModule_abortsRun() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout(
+ "default_target //bazel/target:default_target_1\n"
+ + "default_target //bazel/target:default_target_2"));
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void queryMapModulesToTargetsBadOutput_abortsRun() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout(
+ "default_target //bazel/target:default_target incorrect_field"));
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+ }
+
+ @Test
+ public void multipleTestsRun_reportsAllResults() throws Exception {
+ int testCount = 3;
+ Duration testDelay = Duration.ofMillis(10);
+ final AtomicLong testTime = new AtomicLong();
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ byte[] bytes = logFileContents();
+
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public Path createLogFile(String testName, Path logDir) throws IOException {
+ Path logFile = logDir.resolve(testName);
+ Files.write(logFile, bytes);
+ return logFile;
+ }
+
+ @Override
+ public void runTests() throws IOException, ConfigurationException {
+ long start = System.nanoTime();
+ for (int i = 0; i < testCount; i++) {
+ runSingleTest("test-" + i);
+ }
+ testTime.set((System.nanoTime() - start) / 1000000);
+ }
+
+ @Override
+ void singleTestBody() {
+ Uninterruptibles.sleepUninterruptibly(
+ testDelay.toMillis(), TimeUnit.MILLISECONDS);
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+ long start = System.nanoTime();
+ bazelTest.run(mTestInfo, mMockListener);
+ long totalTime = ((System.nanoTime() - start) / 1000000);
+
+ // TODO(b/267378279): Consider converting this test to a proper benchmark instead of using
+ // logging.
+ CLog.i("Total runtime: " + totalTime + "ms, test time: " + testTime.get() + "ms.");
+
+ verify(mMockListener, times(testCount)).testStarted(any(), anyLong());
+ }
+
+ @Test
+ public void reportCachedTestResultsDisabled_cachedTestResultNotReported() throws Exception {
+ FakeProcessStarter processStarter = newFakeProcessStarter();
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath) {
+ @Override
+ public void writeSingleTestResultEvent(File outputsZipFile, Path bepFile)
+ throws IOException {
+
+ writeSingleTestResultEvent(outputsZipFile, bepFile, /* cached */ true);
+ }
+ };
+ });
+ BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+ OptionSetter setter = new OptionSetter(bazelTest);
+ setter.setOptionValue(REPORT_CACHED_TEST_RESULTS_OPTION, "false");
+
+ bazelTest.run(mTestInfo, mMockListener);
+
+ verify(mMockListener, never()).testStarted(any(), anyLong());
+ }
+
+ private static byte[] logFileContents() {
+ // Seed Random to always get the same sequence of values.
+ Random rand = new Random(RANDOM_SEED);
+ byte[] bytes = new byte[1024 * 1024];
+ rand.nextBytes(bytes);
+ return bytes;
+ }
+
+ private static FakeProcess newPassingProcess() {
+ return new FakeProcess() {
+ @Override
+ public int exitValue() {
+ return 0;
+ }
+ };
+ }
+
+ private static FakeProcess newFailingProcess() {
+ return new FakeProcess() {
+ @Override
+ public int exitValue() {
+ return -1;
+ }
+ };
+ }
+
+ private static FakeProcess newPassingProcessWithStdout(String stdOut) {
+ return new FakeProcess() {
+ @Override
+ public int exitValue() {
+ return 0;
+ }
+
+ @Override
+ public InputStream getInputStream() {
+ return new ByteArrayInputStream(stdOut.getBytes());
+ }
+ };
+ }
+
+ private BazelTest newBazelTestWithProperties(Properties properties) throws Exception {
+ return new BazelTest(newFakeProcessStarter(), properties);
+ }
+
+ private BazelTest newBazelTestWithProcessStarter(BazelTest.ProcessStarter starter)
+ throws Exception {
+
+ return new BazelTest(starter, bazelTestProperties());
+ }
+
+ private BazelTest newBazelTest() throws Exception {
+ return newBazelTestWithProcessStarter(newFakeProcessStarter());
+ }
+
+ private Properties bazelTestProperties() {
+ Properties properties = new Properties();
+ properties.put("BAZEL_SUITE_ROOT", "/phony/path/to/bazel/test/suite");
+ properties.put("java.io.tmpdir", mBazelTempPath.toAbsolutePath().toString());
+
+ return properties;
+ }
+
+ private static FailureDescription hasErrorIdentifier(ErrorIdentifier error) {
+ return argThat(
+ new ArgumentMatcher<FailureDescription>() {
+ @Override
+ public boolean matches(FailureDescription right) {
+ return right.getErrorIdentifier().equals(error);
+ }
+
+ @Override
+ public String toString() {
+ return "hasErrorIdentifier(" + error.toString() + ")";
+ }
+ });
+ }
+
+ private static FailureDescription hasFailureStatus(FailureStatus status) {
+ return argThat(
+ new ArgumentMatcher<FailureDescription>() {
+ @Override
+ public boolean matches(FailureDescription right) {
+ return right.getFailureStatus().equals(status);
+ }
+
+ @Override
+ public String toString() {
+ return "hasFailureStatus(" + status.toString() + ")";
+ }
+ });
+ }
+
+ private FakeProcessStarter newFakeProcessStarter() throws IOException {
+ String targetName = "//bazel/target:default_target_host";
+ FakeProcessStarter processStarter = new FakeProcessStarter();
+ processStarter.put(BazelTest.QUERY_ALL_TARGETS, newPassingProcessWithStdout(targetName));
+ processStarter.put(
+ BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+ newPassingProcessWithStdout("default_target " + targetName));
+ processStarter.put(
+ BazelTest.RUN_TESTS,
+ builder -> {
+ return new FakeBazelTestProcess(builder, mBazelTempPath);
+ });
+ return processStarter;
+ }
+
+ private static List<Path> listDirContents(Path dir) throws IOException {
+ try (Stream<Path> fileStream = Files.list(dir)) {
+ return fileStream.collect(Collectors.toList());
+ }
+ }
+
+ private static final class FakeProcessStarter implements BazelTest.ProcessStarter {
+ private final Map<String, Function<ProcessBuilder, FakeProcess>> mTagToProcess =
+ new HashMap<>();
+
+ @Override
+ public Process start(String tag, ProcessBuilder builder) throws IOException {
+ FakeProcess process = mTagToProcess.get(tag).apply(builder);
+ process.start();
+ return process;
+ }
+
+ public void put(String tag, FakeProcess process) {
+ mTagToProcess.put(
+ tag,
+ b -> {
+ return process;
+ });
+ }
+
+ public void put(String tag, Function<ProcessBuilder, FakeProcess> process) {
+ mTagToProcess.put(tag, process);
+ }
+ }
+
+ private abstract static class FakeProcess extends Process {
+
+ private volatile boolean destroyed;
+
+ @Override
+ public void destroy() {
+ destroyed = true;
+ }
+
+ @Override
+ public int exitValue() {
+ return destroyed ? 42 : 0;
+ }
+
+ @Override
+ public InputStream getErrorStream() {
+ return new ByteArrayInputStream("".getBytes());
+ }
+
+ @Override
+ public InputStream getInputStream() {
+ return new ByteArrayInputStream("".getBytes());
+ }
+
+ @Override
+ public OutputStream getOutputStream() {
+ return new ByteArrayOutputStream(0);
+ }
+
+ @Override
+ public int waitFor() {
+ return exitValue();
+ }
+
+ public void start() throws IOException {
+ return;
+ }
+ }
+
+ private static class FakeBazelTestProcess extends FakeProcess {
+ private final Path mBepFile;
+ private final Path mBazelTempDirectory;
+
+ public FakeBazelTestProcess(ProcessBuilder builder, Path bazelTempDir) {
+ mBepFile =
+ Paths.get(
+ builder.command().stream()
+ .map(s -> Splitter.on('=').splitToList(s))
+ .filter(s -> s.get(0).equals(BEP_FILE_OPTION_NAME))
+ .findFirst()
+ .get()
+ .get(1));
+ mBazelTempDirectory = bazelTempDir;
+ }
+
+ @Override
+ public void start() throws IOException {
+ try {
+ runTests();
+ writeLastEvent();
+ } catch (ConfigurationException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ void runTests() throws IOException, ConfigurationException {
+ runSingleTest("test-1");
+ }
+
+ void runSingleTest(String testName) throws IOException, ConfigurationException {
+ Path outputDir = Files.createTempDirectory(mBazelTempDirectory, testName);
+ try {
+ singleTestBody();
+ writeSingleTestOutputs(outputDir, testName);
+ File outputsZipFile = zipSingleTestOutputsDirectory(outputDir);
+ writeSingleTestResultEvent(outputsZipFile, mBepFile);
+ } finally {
+ MoreFiles.deleteRecursively(outputDir);
+ }
+ }
+
+ void singleTestBody() {
+ // Do nothing.
+ }
+
+ void writeSingleTestOutputs(Path outputsDir, String testName)
+ throws IOException, ConfigurationException {
+
+ FileProtoResultReporter reporter = new FileProtoResultReporter();
+ OptionSetter setter = new OptionSetter(reporter);
+ Path outputFile = outputsDir.resolve("proto-results");
+ setter.setOptionValue("proto-output-file", outputFile.toAbsolutePath().toString());
+
+ Path logDir = Files.createDirectories(outputsDir.resolve("stub/-1/stub"));
+ Path isolatedJavaLog = createLogFile("isolated-java-logs.tar.gz", logDir);
+ Path tfConfig = createLogFile("tradefed-expanded-config.xml", logDir);
+
+ InvocationContext context = new InvocationContext();
+ context.addInvocationAttribute("module-id", "single-tradefed-test-module-id");
+
+ reporter.invocationStarted(context);
+ reporter.testModuleStarted(context);
+ reporter.testRunStarted("test-run", 1);
+ TestDescription testD = new TestDescription("class-name", testName);
+ reporter.testStarted(testD);
+ reporter.testEnded(testD, Collections.emptyMap());
+ reporter.testRunEnded(0, Collections.emptyMap());
+ reporter.logAssociation(
+ "module-log",
+ new LogFile(
+ isolatedJavaLog.toAbsolutePath().toString(), "", LogDataType.TAR_GZ));
+ reporter.testModuleEnded();
+ reporter.logAssociation(
+ "invocation-log",
+ new LogFile(tfConfig.toAbsolutePath().toString(), "", LogDataType.XML));
+ reporter.invocationEnded(0);
+ }
+
+ Path createLogFile(String testName, Path logDir) throws IOException {
+ Path logFile = logDir.resolve(testName);
+ Files.write(logFile, testName.getBytes());
+ return logFile;
+ }
+
+ File zipSingleTestOutputsDirectory(Path outputsDir) throws IOException {
+ List<File> files =
+ listDirContents(outputsDir).stream()
+ .map(f -> f.toFile())
+ .collect(Collectors.toList());
+ return ZipUtil.createZip(files);
+ }
+
+ void writeSingleTestResultEvent(File outputsZipFile, Path bepFile) throws IOException {
+ writeSingleTestResultEvent(outputsZipFile, bepFile, false);
+ }
+
+ void writeSingleTestResultEvent(File outputsZipFile, Path bepFile, boolean cached)
+ throws IOException {
+ try (FileOutputStream bepOutputStream = new FileOutputStream(bepFile.toFile(), true)) {
+ BuildEventStreamProtos.BuildEvent.newBuilder()
+ .setId(
+ BuildEventStreamProtos.BuildEventId.newBuilder()
+ .setTestResult(
+ BuildEventStreamProtos.BuildEventId.TestResultId
+ .getDefaultInstance())
+ .build())
+ .setTestResult(
+ BuildEventStreamProtos.TestResult.newBuilder()
+ .addTestActionOutput(
+ BuildEventStreamProtos.File.newBuilder()
+ .setName("test.outputs__outputs.zip")
+ .setUri(outputsZipFile.getAbsolutePath())
+ .build())
+ .setExecutionInfo(
+ BuildEventStreamProtos.TestResult.ExecutionInfo
+ .newBuilder()
+ .setCachedRemotely(cached)
+ .build())
+ .build())
+ .build()
+ .writeDelimitedTo(bepOutputStream);
+ }
+ }
+
+ void writeLastEvent() throws IOException {
+ try (FileOutputStream bepOutputStream = new FileOutputStream(mBepFile.toFile(), true)) {
+ BuildEventStreamProtos.BuildEvent.newBuilder()
+ .setId(BuildEventStreamProtos.BuildEventId.getDefaultInstance())
+ .setProgress(BuildEventStreamProtos.Progress.getDefaultInstance())
+ .setLastMessage(true)
+ .build()
+ .writeDelimitedTo(bepOutputStream);
+ }
+ }
+ }
+}
diff --git a/atest/bazel/runner/update_bes_protos.sh b/atest/bazel/runner/update_bes_protos.sh
new file mode 100755
index 0000000..d09f84b
--- /dev/null
+++ b/atest/bazel/runner/update_bes_protos.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Updater script for Bazel BES protos for BazelTest
+#
+# Usage: update_bes_protos.sh <commit>
+#
+# TODO(b/254334040): Move protos to prebuilts/bazel/common and update alongside
+# bazel.
+
+set -euo pipefail
+
+COMMIT="$1"; shift
+
+SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
+DEST_DIR="${SCRIPT_DIR}/src/main/protobuf"
+
+echo "Updating proto files..."
+wget -P "${DEST_DIR}" https://raw.githubusercontent.com/bazelbuild/bazel/"${COMMIT}"/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto
+wget -P "${DEST_DIR}" https://raw.githubusercontent.com/bazelbuild/bazel/"${COMMIT}"/src/main/protobuf/command_line.proto
+wget -P "${DEST_DIR}" https://raw.githubusercontent.com/bazelbuild/bazel/"${COMMIT}"/src/main/protobuf/failure_details.proto
+wget -P "${DEST_DIR}" https://raw.githubusercontent.com/bazelbuild/bazel/"${COMMIT}"/src/main/protobuf/invocation_policy.proto
+wget -P "${DEST_DIR}" https://raw.githubusercontent.com/bazelbuild/bazel/"${COMMIT}"/src/main/protobuf/option_filters.proto
+echo "Done!"
diff --git a/atest/bazel/scripts/gen_workspace_archive.sh b/atest/bazel/scripts/gen_workspace_archive.sh
index 46047ba..915c85c 100755
--- a/atest/bazel/scripts/gen_workspace_archive.sh
+++ b/atest/bazel/scripts/gen_workspace_archive.sh
@@ -16,6 +16,12 @@
# A script to generate an Atest Bazel workspace for execution on the Android CI.
+# Exit immediately on failures and disallow undefined variables.
+set -euo pipefail
+# List commands as they are executed. This helps debug the error
+# if the script exits mid-way through.
+set -x
+
function check_env_var()
{
if [ ! -n "${!1}" ] ; then
@@ -35,12 +41,14 @@
}
out=$(get_build_var PRODUCT_OUT)
-JDK_PATH="${ANDROID_BUILD_TOP}/prebuilts/jdk/jdk11/linux-x86"
-BAZEL_BINARY="${ANDROID_BUILD_TOP}/prebuilts/bazel/linux-x86_64/bazel"
-# Use the versioned JDK and Python binaries in prebuilts/ for a reproducible
+# ANDROID_BUILD_TOP is deprecated, so don't use it throughout the script.
+# But if someone sets it, we'll respect it.
+cd ${ANDROID_BUILD_TOP:-.}
+
+# Use the versioned Python binaries in prebuilts/ for a reproducible
# build with minimal reliance on host tools.
-export PATH=${ANDROID_BUILD_TOP}/prebuilts/build-tools/path/linux-x86:${ANDROID_BUILD_TOP}/prebuilts/jdk/jdk11/linux-x86/bin:${PATH}
+export PATH=`pwd`/prebuilts/build-tools/path/linux-x86:${PATH}
export \
ANDROID_PRODUCT_OUT=${out} \
@@ -48,36 +56,41 @@
ANDROID_HOST_OUT=$(get_build_var HOST_OUT) \
ANDROID_TARGET_OUT_TESTCASES=$(get_build_var TARGET_OUT_TESTCASES)
-if [ ! -n "$OUT_DIR" ] ; then
+if [ ! -n "${OUT_DIR:-}" ] ; then
OUT_DIR=$(get_build_var "OUT_DIR")
fi
-if [ ! -n "$DIST_DIR" ] ; then
+if [ ! -n "${DIST_DIR:-}" ] ; then
echo "dist dir not defined, defaulting to OUT_DIR/dist."
export DIST_DIR=${OUT_DIR}/dist
fi
-# Build Atest from source to pick up the latest changes.
-${ANDROID_BUILD_TOP}/build/soong/soong_ui.bash --make-mode atest
+# Build:
+# - Atest from source to pick up the latest changes
+# - Bazel test suite needed by BazelTest
+# - EXTRA_TARGETS requested on the commandline (used by git_master.gcl)
+targets="atest dist empty-bazel-test-suite ${EXTRA_TARGETS:-}"
+build/soong/soong_ui.bash --make-mode $targets
+
+# TODO(b/277656887): Fix the underlying atest issue that causes the workspace to not be
+# regenerated.
+rm -rf ${OUT_DIR}/atest_bazel_workspace
# Generate the initial workspace via Atest Bazel mode.
-${OUT_DIR}/host/linux-x86/bin/atest-dev --bazel-mode --dry-run -m
+${OUT_DIR}/host/linux-x86/bin/atest-dev \
+ --bazel-mode \
+ --host-unit-test-only \
+ --host \
+ -c \
+ -b # Builds dependencies without running tests.
-# Copy over some needed dependencies. We need Bazel for querying dependencies
-# and actually running the test. The JDK is for the Tradefed test runner and
-# Java tests.
-cp -L ${BAZEL_BINARY} ${OUT_DIR}/atest_bazel_workspace/bazelbin
-mkdir ${OUT_DIR}/atest_bazel_workspace/prebuilts/jdk
-cp -a ${JDK_PATH}/* ${OUT_DIR}/atest_bazel_workspace/prebuilts/jdk/.
-
-pushd ${OUT_DIR}/atest_bazel_workspace
# TODO(b/201242197): Create a stub workspace for the remote_coverage_tools
# package so that Bazel does not attempt to fetch resources online which is not
# allowed on build bots.
-mkdir remote_coverage_tools
-touch remote_coverage_tools/WORKSPACE
-cat << EOF > remote_coverage_tools/BUILD
+mkdir -p ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools
+touch ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools/WORKSPACE
+cat << EOF > ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools/BUILD
package(default_visibility = ["//visibility:public"])
filegroup(
@@ -86,39 +99,25 @@
)
EOF
-# Make directories for temporary output.
-JAVA_TEMP_DIR=$(mktemp -d)
-trap "rm -rf ${JAVA_TEMP_DIR}" EXIT
+# Create the workspace archive.
+prebuilts/build-tools/linux-x86/bin/soong_zip \
+ -o ${DIST_DIR}/atest_bazel_workspace.zip \
+ -P android-bazel-suite/ \
+ -D out/atest_bazel_workspace/ \
+ -f "out/atest_bazel_workspace/**/.*" \
+ -symlinks=false `# Follow symlinks and store the referenced files.` \
+ -sha256 `# Store SHA256 checksum for each file to enable CAS.` \
+ `# Avoid failing for dangling symlinks since these are expected` \
+ `# because we don't build all targets.` \
+ -ignore_missing_files
-BAZEL_TEMP_DIR=$(mktemp -d)
-trap "rm -rf ${BAZEL_TEMP_DIR}" EXIT
+# Merge the workspace into bazel-test-suite.
+prebuilts/build-tools/linux-x86/bin/merge_zips \
+ ${DIST_DIR}/bazel-test-suite.zip \
+ ${DIST_DIR}/empty-bazel-test-suite.zip \
+ ${DIST_DIR}/atest_bazel_workspace.zip
-# Query the list of dependencies needed by the tests.
-# TODO(b/217658764): Consolidate Bazel query functions into a separate script
-# that other components can use.
-JAVA_HOME="${JDK_PATH}" \
- "${BAZEL_BINARY}" \
- --server_javabase="${JDK_PATH}" \
- --host_jvm_args=-Djava.io.tmpdir=${JAVA_TEMP_DIR} \
- --output_user_root=${BAZEL_TEMP_DIR} \
- --max_idle_secs=5 \
- cquery \
- --override_repository=remote_coverage_tools=${ANDROID_BUILD_TOP}/out/atest_bazel_workspace/remote_coverage_tools \
- --output=starlark \
- --starlark:file=${ANDROID_BUILD_TOP}/tools/asuite/atest/bazel/format_as_soong_module_name.cquery \
- "deps( $(${BAZEL_BINARY} \
- --server_javabase="${JDK_PATH}" \
- --host_jvm_args=-Djava.io.tmpdir=${JAVA_TEMP_DIR} \
- --output_user_root=${BAZEL_TEMP_DIR} \
- --max_idle_secs=5 query "tests(...)" | paste -sd "+" -) )" | \
- sed '/^$/d' | \
- sort -u \
-> build_targets
-
-popd
-
-# Build all test dependencies.
-${ANDROID_BUILD_TOP}/build/soong/soong_ui.bash --make-mode $(cat $OUT_DIR/atest_bazel_workspace/build_targets)
-
-# Create the workspace archive which will be downloaded by the Tradefed hosts.
-tar zcfh ${DIST_DIR}/atest_bazel_workspace.tar.gz out/atest_bazel_workspace/
+# Remove the old archives we no longer need
+rm -f \
+ ${DIST_DIR}/atest_bazel_workspace.zip \
+ ${DIST_DIR}/empty-bazel-test-suite.zip
diff --git a/atest/bazel_mode.py b/atest/bazel_mode.py
index 0aeeba1..58e183f 100644
--- a/atest/bazel_mode.py
+++ b/atest/bazel_mode.py
@@ -30,28 +30,42 @@
import dataclasses
import enum
import functools
+import logging
import os
import re
+import shlex
import shutil
import subprocess
+import tempfile
+import time
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict, deque, OrderedDict
+from collections.abc import Iterable
from pathlib import Path
from types import MappingProxyType
from typing import Any, Callable, Dict, IO, List, Set
+from xml.etree import ElementTree as ET
-import atest_utils
-import constants
-import module_info
+from google.protobuf.message import DecodeError
-from atest_enum import ExitCode
-from test_finders import test_finder_base
-from test_finders import test_info
-from test_runners import test_runner_base as trb
-from test_runners import atest_tf_test_runner as tfr
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+from atest.atest_enum import DetectType, ExitCode
+from atest.metrics import metrics
+from atest.proto import file_md5_pb2
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base as trb
+from atest.test_runners import atest_tf_test_runner as tfr
+
+
+JDK_PACKAGE_NAME = 'prebuilts/robolectric_jdk'
+JDK_NAME = 'jdk'
+ROBOLECTRIC_CONFIG = 'build/make/core/robolectric_test_config_template.xml'
_BAZEL_WORKSPACE_DIR = 'atest_bazel_workspace'
_SUPPORTED_BAZEL_ARGS = MappingProxyType({
@@ -65,10 +79,23 @@
# https://docs.bazel.build/versions/main/command-line-reference.html#flag--flaky_test_attempts
constants.RETRY_ANY_FAILURE:
lambda arg_value: [f'--flaky_test_attempts={str(arg_value)}'],
+ # https://docs.bazel.build/versions/main/command-line-reference.html#flag--test_output
+ constants.VERBOSE:
+ lambda arg_value: ['--test_output=all'] if arg_value else [],
constants.BAZEL_ARG:
lambda arg_value: [item for sublist in arg_value for item in sublist]
})
+# Maps Bazel configuration names to Soong variant names.
+_CONFIG_TO_VARIANT = {
+ 'host': 'host',
+ 'device': 'target',
+}
+
+
+class AbortRunException(Exception):
+ pass
+
@enum.unique
class Features(enum.Enum):
@@ -79,12 +106,37 @@
EXPERIMENTAL_BES_PUBLISH = ('--experimental-bes-publish',
'Upload test results via BES in Bazel mode.',
False)
+ EXPERIMENTAL_JAVA_RUNTIME_DEPENDENCIES = (
+ '--experimental-java-runtime-dependencies',
+ 'Mirrors Soong Java `libs` and `static_libs` as Bazel target '
+ 'dependencies in the generated workspace. Tradefed test rules use '
+ 'these dependencies to set up the execution environment and ensure '
+ 'that all transitive runtime dependencies are present.',
+ True)
+ EXPERIMENTAL_REMOTE = (
+ '--experimental-remote',
+ 'Use Bazel remote execution and caching where supported.',
+ False)
+ EXPERIMENTAL_HOST_DRIVEN_TEST = (
+ '--experimental-host-driven-test',
+ 'Enables running host-driven device tests in Bazel mode.', True)
+ EXPERIMENTAL_ROBOLECTRIC_TEST = (
+ '--experimental-robolectric-test',
+ 'Enables running Robolectric tests in Bazel mode.', True)
def __init__(self, arg_flag, description, affects_workspace):
- self.arg_flag = arg_flag
- self.description = description
+ self._arg_flag = arg_flag
+ self._description = description
self.affects_workspace = affects_workspace
+ @property
+ def arg_flag(self):
+ return self._arg_flag
+
+ @property
+ def description(self):
+ return self._description
+
def add_parser_arguments(parser: argparse.ArgumentParser, dest: str):
for _, member in Features.__members__.items():
@@ -105,14 +157,25 @@
src_root_path = Path(os.environ.get(constants.ANDROID_BUILD_TOP))
workspace_path = get_bazel_workspace_dir()
+ resource_manager = ResourceManager(
+ src_root_path=src_root_path,
+ resource_root_path=_get_resource_root(),
+ product_out_path=Path(
+ os.environ.get(constants.ANDROID_PRODUCT_OUT)),
+ md5_checksum_file_path=workspace_path.joinpath(
+ 'workspace_md5_checksum'),
+ )
+ jdk_path = _read_robolectric_jdk_path(
+ resource_manager.get_src_file_path(ROBOLECTRIC_CONFIG, True))
+
workspace_generator = WorkspaceGenerator(
- src_root_path,
- workspace_path,
- Path(os.environ.get(constants.ANDROID_PRODUCT_OUT)),
- Path(os.environ.get(constants.ANDROID_HOST_OUT)),
- Path(atest_utils.get_build_out_dir()),
- mod_info,
- enabled_features,
+ resource_manager=resource_manager,
+ workspace_out_path=workspace_path,
+ host_out_path=Path(os.environ.get(constants.ANDROID_HOST_OUT)),
+ build_out_dir=Path(atest_utils.get_build_out_dir()),
+ mod_info=mod_info,
+ jdk_path=jdk_path,
+ enabled_features=enabled_features,
)
workspace_generator.generate()
@@ -122,33 +185,184 @@
atest_utils.get_build_target())
+class ResourceManager:
+ """Class for managing files required to generate a Bazel Workspace."""
+
+ def __init__(self,
+ src_root_path: Path,
+ resource_root_path: Path,
+ product_out_path: Path,
+ md5_checksum_file_path: Path):
+ self._root_type_to_path = {
+ file_md5_pb2.RootType.SRC_ROOT: src_root_path,
+ file_md5_pb2.RootType.RESOURCE_ROOT: resource_root_path,
+ file_md5_pb2.RootType.ABS_PATH: Path(),
+ file_md5_pb2.RootType.PRODUCT_OUT: product_out_path,
+ }
+ self._md5_checksum_file = md5_checksum_file_path
+ self._file_checksum_list = file_md5_pb2.FileChecksumList()
+
+ def get_src_file_path(
+ self,
+ rel_path: Path=None,
+ affects_workspace: bool=False
+ ) -> Path:
+ """Get the abs file path from the relative path of source_root.
+
+ Args:
+ rel_path: A relative path of the source_root.
+ affects_workspace: A boolean of whether the file affects the
+ workspace.
+
+ Returns:
+ A abs path of the file.
+ """
+ return self._get_file_path(
+ file_md5_pb2.RootType.SRC_ROOT, rel_path, affects_workspace)
+
+ def get_resource_file_path(
+ self,
+ rel_path: Path=None,
+ affects_workspace: bool=False,
+ ) -> Path:
+ """Get the abs file path from the relative path of resource_root.
+
+ Args:
+ rel_path: A relative path of the resource_root.
+ affects_workspace: A boolean of whether the file affects the
+ workspace.
+
+ Returns:
+ A abs path of the file.
+ """
+ return self._get_file_path(
+ file_md5_pb2.RootType.RESOURCE_ROOT, rel_path, affects_workspace)
+
+ def get_product_out_file_path(
+ self,
+ rel_path: Path=None,
+ affects_workspace: bool=False
+ ) -> Path:
+ """Get the abs file path from the relative path of product out.
+
+ Args:
+ rel_path: A relative path to the product out.
+ affects_workspace: A boolean of whether the file affects the
+ workspace.
+
+ Returns:
+ An abs path of the file.
+ """
+ return self._get_file_path(
+ file_md5_pb2.RootType.PRODUCT_OUT, rel_path, affects_workspace)
+
+ def _get_file_path(
+ self,
+ root_type: file_md5_pb2.RootType,
+ rel_path: Path,
+ affects_workspace: bool=True
+ ) -> Path:
+ abs_path = self._root_type_to_path[root_type].joinpath(
+ rel_path or Path())
+
+ if not affects_workspace:
+ return abs_path
+
+ if abs_path.is_dir():
+ for file in abs_path.glob('**/*'):
+ self._register_file(root_type, file)
+ else:
+ self._register_file(root_type, abs_path)
+ return abs_path
+
+ def _register_file(
+ self,
+ root_type: file_md5_pb2.RootType,
+ abs_path: Path
+ ):
+ if not abs_path.is_file():
+ logging.debug(' ignore %s: not a file.', abs_path)
+ return
+
+ rel_path = abs_path
+ if abs_path.is_relative_to(self._root_type_to_path[root_type]):
+ rel_path = abs_path.relative_to(self._root_type_to_path[root_type])
+
+ self._file_checksum_list.file_checksums.append(
+ file_md5_pb2.FileChecksum(
+ root_type=root_type,
+ rel_path=str(rel_path),
+ md5sum=atest_utils.md5sum(abs_path)
+ )
+ )
+
+ def register_file_with_abs_path(self, abs_path: Path):
+ """Register a file which affects the workspace.
+
+ Args:
+ abs_path: A abs path of the file.
+ """
+ self._register_file(file_md5_pb2.RootType.ABS_PATH, abs_path)
+
+ def save_affects_files_md5(self):
+ with open(self._md5_checksum_file, 'wb') as f:
+ f.write(self._file_checksum_list.SerializeToString())
+
+ def check_affects_files_md5(self):
+ """Check all affect files are consistent with the actual MD5."""
+ if not self._md5_checksum_file.is_file():
+ return False
+
+ with open(self._md5_checksum_file, 'rb') as f:
+ file_md5_list = file_md5_pb2.FileChecksumList()
+
+ try:
+ file_md5_list.ParseFromString(f.read())
+ except DecodeError:
+ logging.warning(
+ 'Failed to parse the workspace md5 checksum file.')
+ return False
+
+ for file_md5 in file_md5_list.file_checksums:
+ abs_path = (Path(self._root_type_to_path[file_md5.root_type])
+ .joinpath(file_md5.rel_path))
+ if not abs_path.is_file():
+ return False
+ if atest_utils.md5sum(abs_path) != file_md5.md5sum:
+ return False
+ return True
+
+
class WorkspaceGenerator:
"""Class for generating a Bazel workspace."""
# pylint: disable=too-many-arguments
- def __init__(self, src_root_path: Path, workspace_out_path: Path,
- product_out_path: Path, host_out_path: Path,
- build_out_dir: Path, mod_info: module_info.ModuleInfo,
- enabled_features: Set[Features] = None):
+ def __init__(self,
+ resource_manager: ResourceManager,
+ workspace_out_path: Path,
+ host_out_path: Path,
+ build_out_dir: Path,
+ mod_info: module_info.ModuleInfo,
+ jdk_path: Path=None,
+ enabled_features: Set[Features] = None,
+ ):
"""Initializes the generator.
Args:
- src_root_path: Path of the ANDROID_BUILD_TOP.
workspace_out_path: Path where the workspace will be output.
- product_out_path: Path of the ANDROID_PRODUCT_OUT.
host_out_path: Path of the ANDROID_HOST_OUT.
build_out_dir: Path of OUT_DIR
mod_info: ModuleInfo object.
enabled_features: Set of enabled features.
"""
self.enabled_features = enabled_features or set()
- self.src_root_path = src_root_path
+ self.resource_manager = resource_manager
self.workspace_out_path = workspace_out_path
- self.product_out_path = product_out_path
self.host_out_path = host_out_path
self.build_out_dir = build_out_dir
self.mod_info = mod_info
self.path_to_package = {}
+ self.jdk_path = jdk_path
def generate(self):
"""Generate a Bazel workspace.
@@ -157,8 +371,7 @@
workspace will be generated. Otherwise, the existing workspace will be
reused.
"""
- workspace_md5_checksum_file = self.workspace_out_path.joinpath(
- 'workspace_md5_checksum')
+ start = time.time()
enabled_features_file = self.workspace_out_path.joinpath(
'atest_bazel_mode_enabled_features')
enabled_features_file_contents = '\n'.join(sorted(
@@ -168,7 +381,7 @@
# Update the file with the set of the currently enabled features to
# make sure that changes are detected in the workspace checksum.
enabled_features_file.write_text(enabled_features_file_contents)
- if atest_utils.check_md5(workspace_md5_checksum_file):
+ if self.resource_manager.check_affects_files_md5():
return
# We raise an exception if rmtree fails to avoid leaving stale
@@ -186,13 +399,16 @@
# Note that we write the set of enabled features despite having written
# it above since the workspace no longer exists at this point.
enabled_features_file.write_text(enabled_features_file_contents)
- atest_utils.save_md5(
- [
- self.mod_info.mod_info_file_path,
- enabled_features_file,
- ],
- workspace_md5_checksum_file
- )
+
+ self.resource_manager.get_product_out_file_path(
+ self.mod_info.mod_info_file_path.relative_to(
+ self.resource_manager.get_product_out_file_path()), True)
+ self.resource_manager.register_file_with_abs_path(
+ enabled_features_file)
+ self.resource_manager.save_affects_files_md5()
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.FULL_GENERATE_BAZEL_WORKSPACE_TIME,
+ result=int(time.time() - start))
def _add_test_module_targets(self):
seen = set()
@@ -211,15 +427,21 @@
self.enabled_features and
self.mod_info.is_device_driven_test(info)):
self._resolve_dependencies(
- self._add_test_target(
- info, 'device',
- TestTarget.create_device_test_target), seen)
+ self._add_device_test_target(info, False), seen)
- if self.is_host_unit_test(info):
+ if self.mod_info.is_host_unit_test(info):
self._resolve_dependencies(
- self._add_test_target(
- info, 'host',
- TestTarget.create_deviceless_test_target), seen)
+ self._add_deviceless_test_target(info), seen)
+ elif (Features.EXPERIMENTAL_ROBOLECTRIC_TEST in
+ self.enabled_features and
+ self.mod_info.is_modern_robolectric_test(info)):
+ self._resolve_dependencies(
+ self._add_tradefed_robolectric_test_target(info), seen)
+ elif (Features.EXPERIMENTAL_HOST_DRIVEN_TEST in
+ self.enabled_features and
+ self.mod_info.is_host_driven_test(info)):
+ self._resolve_dependencies(
+ self._add_device_test_target(info, True), seen)
def _resolve_dependencies(
self, top_level_target: Target, seen: Set[Target]):
@@ -252,13 +474,28 @@
stack.append(next_top)
- def _add_test_target(self, info: Dict[str, Any], name_suffix: str,
- create_fn: Callable) -> Target:
+ def _add_device_test_target(self, info: Dict[str, Any],
+ is_host_driven: bool) -> Target:
package_name = self._get_module_path(info)
- name = f'{info["module_name"]}_{name_suffix}'
+ name_suffix = 'host' if is_host_driven else 'device'
+ name = f'{info[constants.MODULE_INFO_ID]}_{name_suffix}'
def create():
- return create_fn(
+ return TestTarget.create_device_test_target(
+ name,
+ package_name,
+ info,
+ is_host_driven,
+ )
+
+ return self._add_target(package_name, name, create)
+
+ def _add_deviceless_test_target(self, info: Dict[str, Any]) -> Target:
+ package_name = self._get_module_path(info)
+ name = f'{info[constants.MODULE_INFO_ID]}_host'
+
+ def create():
+ return TestTarget.create_deviceless_test_target(
name,
package_name,
info,
@@ -266,9 +503,21 @@
return self._add_target(package_name, name, create)
+ def _add_tradefed_robolectric_test_target(
+ self, info: Dict[str, Any]) -> Target:
+ package_name = self._get_module_path(info)
+ name = f'{info[constants.MODULE_INFO_ID]}_host'
+
+ return self._add_target(
+ package_name,
+ name,
+ lambda : TestTarget.create_tradefed_robolectric_test_target(
+ name, package_name, info, f'//{JDK_PACKAGE_NAME}:{JDK_NAME}')
+ )
+
def _add_prebuilt_target(self, info: Dict[str, Any]) -> Target:
package_name = self._get_module_path(info)
- name = info['module_name']
+ name = info[constants.MODULE_INFO_ID]
def create():
return SoongPrebuiltTarget.create(
@@ -326,27 +575,61 @@
return mod_path[0]
- def is_host_unit_test(self, info: Dict[str, Any]) -> bool:
- return self.mod_info.is_testable_module(
- info) and self.mod_info.is_host_unit_test(info)
-
def _generate_artifacts(self):
"""Generate workspace files on disk."""
self._create_base_files()
- self._symlink(src='tools/asuite/atest/bazel/rules',
- target='bazel/rules')
- self._symlink(src='tools/asuite/atest/bazel/configs',
- target='bazel/configs')
+
+ self._add_workspace_resource(src='rules', dst='bazel/rules')
+ self._add_workspace_resource(src='configs', dst='bazel/configs')
+
+ self._add_bazel_bootstrap_files()
+
# Symlink to package with toolchain definitions.
self._symlink(src='prebuilts/build-tools',
target='prebuilts/build-tools')
+
+ device_infra_path = 'vendor/google/tools/atest/device_infra'
+ if self.resource_manager.get_src_file_path(device_infra_path).exists():
+ self._symlink(src=device_infra_path,
+ target=device_infra_path)
+
self._create_constants_file()
+ self._generate_robolectric_resources()
+
for package in self.path_to_package.values():
package.generate(self.workspace_out_path)
+ def _generate_robolectric_resources(self):
+ if not self.jdk_path:
+ return
+ self._generate_jdk_resources()
+ self._generate_android_all_resources()
+
+ def _generate_jdk_resources(self):
+ # TODO(b/265596946): Create the JDK toolchain instead of using
+ # a filegroup.
+ return self._add_target(
+ JDK_PACKAGE_NAME,
+ JDK_NAME,
+ lambda : FilegroupTarget(
+ JDK_PACKAGE_NAME, JDK_NAME,
+ self.resource_manager.get_src_file_path(self.jdk_path))
+ )
+
+ def _generate_android_all_resources(self):
+ package_name = 'android-all'
+ name = 'android-all'
+
+ return self._add_target(
+ package_name,
+ name,
+ lambda : FilegroupTarget(
+ package_name, name,
+ self.host_out_path.joinpath(f'testcases/{name}'))
+ )
def _symlink(self, *, src, target):
"""Create a symbolic link in workspace pointing to source file/dir.
@@ -360,15 +643,45 @@
"""
symlink = self.workspace_out_path.joinpath(target)
symlink.parent.mkdir(parents=True, exist_ok=True)
- symlink.symlink_to(self.src_root_path.joinpath(src))
+ symlink.symlink_to(self.resource_manager.get_src_file_path(src))
def _create_base_files(self):
- self._symlink(src='tools/asuite/atest/bazel/WORKSPACE',
- target='WORKSPACE')
- self._symlink(src='tools/asuite/atest/bazel/bazelrc',
- target='.bazelrc')
+ self._add_workspace_resource(src='WORKSPACE', dst='WORKSPACE')
+ self._add_workspace_resource(src='bazelrc', dst='.bazelrc')
+
self.workspace_out_path.joinpath('BUILD.bazel').touch()
+ def _add_bazel_bootstrap_files(self):
+ self._symlink(src='tools/asuite/atest/bazel/resources/bazel.sh',
+ target='bazel.sh')
+ # TODO(b/256924541): Consolidate the JDK with the version the Roboleaf
+ # team uses.
+ self._symlink(src='prebuilts/jdk/jdk17/BUILD.bazel',
+ target='prebuilts/jdk/jdk17/BUILD.bazel')
+ self._symlink(src='prebuilts/jdk/jdk17/linux-x86',
+ target='prebuilts/jdk/jdk17/linux-x86')
+ self._symlink(src='prebuilts/bazel/linux-x86_64/bazel',
+ target='prebuilts/bazel/linux-x86_64/bazel')
+
+ def _add_workspace_resource(self, src, dst):
+ """Add resource to the given destination in workspace.
+
+ Args:
+ src: A string of a relative path to root of Bazel artifacts. This is
+ the source file/dir path that will be added to workspace.
+ dst: A string of a relative path to workspace root. This is the
+ destination file/dir path where the artifacts will be added.
+ """
+ src = self.resource_manager.get_resource_file_path(src, True)
+ dst = self.workspace_out_path.joinpath(dst)
+ dst.parent.mkdir(parents=True, exist_ok=True)
+
+ if src.is_file():
+ shutil.copy(src, dst)
+ else:
+ shutil.copytree(src, dst,
+ ignore=shutil.ignore_patterns('__init__.py'))
+
def _create_constants_file(self):
def variable_name(target_name):
@@ -385,7 +698,7 @@
targets.append(target)
with self.workspace_out_path.joinpath(
- 'constants.bzl').open('w') as f:
+ 'constants.bzl').open('w') as f:
writer = IndentWriter(f)
for target in targets:
writer.write_line(
@@ -394,6 +707,10 @@
)
+def _get_resource_root():
+ return Path(os.path.dirname(__file__)).joinpath('bazel/resources')
+
+
class Package:
"""Class for generating an entire Package on disk."""
@@ -471,8 +788,8 @@
def target(self) -> Target:
if not self._target:
- module_name = self.info['module_name']
- raise Exception(f'Target not set for ref `{module_name}`')
+ target_name = self.info[constants.MODULE_INFO_ID]
+ raise Exception(f'Target not set for ref `{target_name}`')
return self._target
@@ -509,6 +826,42 @@
pass
+class FilegroupTarget(Target):
+
+ def __init__(
+ self,
+ package_name: str,
+ target_name: str,
+ srcs_root: Path
+ ):
+ self._package_name = package_name
+ self._target_name = target_name
+ self._srcs_root = srcs_root
+
+ def name(self) -> str:
+ return self._target_name
+
+ def package_name(self) -> str:
+ return self._package_name
+
+ def write_to_build_file(self, f: IO):
+ writer = IndentWriter(f)
+ build_file_writer = BuildFileWriter(writer)
+
+ writer.write_line('filegroup(')
+
+ with writer.indent():
+ build_file_writer.write_string_attribute('name', self._target_name)
+ build_file_writer.write_glob_attribute(
+ 'srcs', [f'{self._target_name}_files/**'])
+
+ writer.write_line(')')
+
+ def create_filesystem_layout(self, package_dir: Path):
+ symlink = package_dir.joinpath(f'{self._target_name}_files')
+ symlink.symlink_to(self._srcs_root)
+
+
class TestTarget(Target):
"""Class for generating a test target."""
@@ -522,31 +875,81 @@
'bazel-result-reporter'
})
- DEVICE_TEST_PREREQUISITES = frozenset(
- {'aapt'}).union(DEVICELESS_TEST_PREREQUISITES)
+ DEVICE_TEST_PREREQUISITES = frozenset(DEVICELESS_TEST_PREREQUISITES.union(
+ frozenset({
+ 'aapt',
+ 'aapt2',
+ 'compatibility-tradefed',
+ 'vts-core-tradefed-harness',
+ })))
@staticmethod
def create_deviceless_test_target(name: str, package_name: str,
info: Dict[str, Any]):
- return TestTarget(name, package_name, info, 'tradefed_deviceless_test',
- TestTarget.DEVICELESS_TEST_PREREQUISITES)
+ return TestTarget(
+ package_name,
+ 'tradefed_deviceless_test',
+ {
+ 'name': name,
+ 'test': ModuleRef.for_info(info),
+ 'module_name': info["module_name"],
+ 'tags': info.get(constants.MODULE_TEST_OPTIONS_TAGS, []),
+ },
+ TestTarget.DEVICELESS_TEST_PREREQUISITES,
+ )
@staticmethod
def create_device_test_target(name: str, package_name: str,
- info: Dict[str, Any]):
- return TestTarget(name, package_name, info, 'tradefed_device_test',
- TestTarget.DEVICE_TEST_PREREQUISITES)
+ info: Dict[str, Any], is_host_driven: bool):
+ rule = ('tradefed_host_driven_device_test' if is_host_driven
+ else 'tradefed_device_driven_test')
- def __init__(self, name: str, package_name: str, info: Dict[str, Any],
- rule_name: str, prerequisites=frozenset()):
- self._name = name
+ return TestTarget(
+ package_name,
+ rule,
+ {
+ 'name': name,
+ 'test': ModuleRef.for_info(info),
+ 'module_name': info["module_name"],
+ 'suites': set(
+ info.get(constants.MODULE_COMPATIBILITY_SUITES, [])),
+ 'tradefed_deps': list(map(
+ ModuleRef.for_name,
+ info.get(constants.MODULE_HOST_DEPS, []))),
+ 'tags': info.get(constants.MODULE_TEST_OPTIONS_TAGS, []),
+ },
+ TestTarget.DEVICE_TEST_PREREQUISITES,
+ )
+
+ @staticmethod
+ def create_tradefed_robolectric_test_target(
+ name: str,
+ package_name: str,
+ info: Dict[str, Any],
+ jdk_label: str
+ ):
+ return TestTarget(
+ package_name,
+ 'tradefed_robolectric_test',
+ {
+ 'name': name,
+ 'test': ModuleRef.for_info(info),
+ 'module_name': info["module_name"],
+ 'tags': info.get(constants.MODULE_TEST_OPTIONS_TAGS, []),
+ 'jdk' : jdk_label,
+ },
+ TestTarget.DEVICELESS_TEST_PREREQUISITES,
+ )
+
+ def __init__(self, package_name: str, rule_name: str,
+ attributes: Dict[str, Any], prerequisites=frozenset()):
+ self._attributes = attributes
self._package_name = package_name
- self._test_module_ref = ModuleRef.for_info(info)
self._rule_name = rule_name
self._prerequisites = prerequisites
def name(self) -> str:
- return self._name
+ return self._attributes['name']
def package_name(self) -> str:
return self._package_name
@@ -556,21 +959,127 @@
def dependencies(self) -> List[ModuleRef]:
prerequisite_refs = map(ModuleRef.for_name, self._prerequisites)
- return [self._test_module_ref] + list(prerequisite_refs)
+
+ declared_dep_refs = []
+ for value in self._attributes.values():
+ if isinstance(value, Iterable):
+ declared_dep_refs.extend(
+ [dep for dep in value if isinstance(dep, ModuleRef)])
+ elif isinstance(value, ModuleRef):
+ declared_dep_refs.append(value)
+
+ return declared_dep_refs + list(prerequisite_refs)
def write_to_build_file(self, f: IO):
- prebuilt_target_name = self._test_module_ref.target().qualified_name()
+ prebuilt_target_name = self._attributes['test'].target(
+ ).qualified_name()
writer = IndentWriter(f)
+ build_file_writer = BuildFileWriter(writer)
writer.write_line(f'{self._rule_name}(')
with writer.indent():
- writer.write_line(f'name = "{self._name}",')
- writer.write_line(f'test = "{prebuilt_target_name}",')
+ build_file_writer.write_string_attribute(
+ 'name', self._attributes['name'])
+
+ build_file_writer.write_string_attribute(
+ 'module_name', self._attributes['module_name'])
+
+ build_file_writer.write_string_attribute(
+ 'test', prebuilt_target_name)
+
+ build_file_writer.write_label_list_attribute(
+ 'tradefed_deps', self._attributes.get('tradefed_deps'))
+
+ build_file_writer.write_string_list_attribute(
+ 'suites', sorted(self._attributes.get('suites', [])))
+
+ build_file_writer.write_string_list_attribute(
+ 'tags', sorted(self._attributes.get('tags', [])))
+
+ build_file_writer.write_label_attribute(
+ 'jdk', self._attributes.get('jdk', None))
writer.write_line(')')
+def _read_robolectric_jdk_path(test_xml_config_template: Path) -> Path:
+ if not test_xml_config_template.is_file():
+ return None
+
+ xml_root = ET.parse(test_xml_config_template).getroot()
+ option = xml_root.find(".//option[@name='java-folder']")
+ jdk_path = Path(option.get('value', ''))
+
+ if not jdk_path.is_relative_to('prebuilts/jdk'):
+ raise Exception(f'Failed to get "java-folder" from '
+ f'`{test_xml_config_template}`')
+
+ return jdk_path
+
+
+class BuildFileWriter:
+ """Class for writing BUILD files."""
+
+ def __init__(self, underlying: IndentWriter):
+ self._underlying = underlying
+
+ def write_string_attribute(self, attribute_name, value):
+ if value is None:
+ return
+
+ self._underlying.write_line(f'{attribute_name} = "{value}",')
+
+ def write_label_attribute(self, attribute_name: str, label_name: str):
+ if label_name is None:
+ return
+
+ self._underlying.write_line(f'{attribute_name} = "{label_name}",')
+
+ def write_string_list_attribute(self, attribute_name, values):
+ if not values:
+ return
+
+ self._underlying.write_line(f'{attribute_name} = [')
+
+ with self._underlying.indent():
+ for value in values:
+ self._underlying.write_line(f'"{value}",')
+
+ self._underlying.write_line('],')
+
+ def write_label_list_attribute(
+ self, attribute_name: str, modules: List[ModuleRef]):
+ if not modules:
+ return
+
+ self._underlying.write_line(f'{attribute_name} = [')
+
+ with self._underlying.indent():
+ for label in sorted(set(
+ m.target().qualified_name() for m in modules)):
+ self._underlying.write_line(f'"{label}",')
+
+ self._underlying.write_line('],')
+
+ def write_glob_attribute(self, attribute_name: str, patterns: List[str]):
+ self._underlying.write_line(f'{attribute_name} = glob([')
+
+ with self._underlying.indent():
+ for pattern in patterns:
+ self._underlying.write_line(f'"{pattern}",')
+
+ self._underlying.write_line(']),')
+
+
+@dataclasses.dataclass(frozen=True)
+class Dependencies:
+ static_dep_refs: List[ModuleRef]
+ runtime_dep_refs: List[ModuleRef]
+ data_dep_refs: List[ModuleRef]
+ device_data_dep_refs: List[ModuleRef]
+
+
class SoongPrebuiltTarget(Target):
"""Class for generating a Soong prebuilt target on disk."""
@@ -582,56 +1091,59 @@
configs = [
Config('host', gen.host_out_path),
- Config('device', gen.product_out_path),
+ Config('device', gen.resource_manager.get_product_out_file_path()),
]
- installed_paths = get_module_installed_paths(info, gen.src_root_path)
+ installed_paths = get_module_installed_paths(
+ info, gen.resource_manager.get_src_file_path())
config_files = group_paths_by_config(configs, installed_paths)
# For test modules, we only create symbolic link to the 'testcases'
# directory since the information in module-info is not accurate.
- #
- # Note that we use is_tf_testable_module here instead of ModuleInfo
- # class's is_testable_module method to avoid misadding a shared library
- # as a test module.
- # e.g.
- # 1. test_module A has a shared_lib (or RLIB, DYLIB) of B
- # 2. We create target B as a result of method _resolve_dependencies for
- # target A
- # 3. B matches the conditions of is_testable_module:
- # a. B has installed path.
- # b. has_config return True
- # Note that has_config method also looks for AndroidTest.xml in the
- # dir of B. If there is a test module in the same dir, B could be
- # added as a test module.
- # 4. We create symbolic link to the 'testcases' for non test target B
- # and cause errors.
- if is_tf_testable_module(gen.mod_info, info):
+ if gen.mod_info.is_tradefed_testable_module(info):
config_files = {c: [c.out_path.joinpath(f'testcases/{module_name}')]
for c in config_files.keys()}
+ enabled_features = gen.enabled_features
+
return SoongPrebuiltTarget(
- module_name,
+ info,
package_name,
config_files,
- find_runtime_dep_refs(gen.mod_info, info, configs,
- gen.src_root_path),
- find_data_dep_refs(gen.mod_info, info, configs,
- gen.src_root_path)
+ Dependencies(
+ static_dep_refs = find_static_dep_refs(
+ gen.mod_info, info, configs,
+ gen.resource_manager.get_src_file_path(), enabled_features),
+ runtime_dep_refs = find_runtime_dep_refs(
+ gen.mod_info, info, configs,
+ gen.resource_manager.get_src_file_path(), enabled_features),
+ data_dep_refs = find_data_dep_refs(
+ gen.mod_info, info, configs,
+ gen.resource_manager.get_src_file_path()),
+ device_data_dep_refs = find_device_data_dep_refs(gen, info),
+ ),
+ [
+ c for c in configs if c.name in map(
+ str.lower, info.get(constants.MODULE_SUPPORTED_VARIANTS, []))
+ ],
)
- def __init__(self, name: str, package_name: str,
+ def __init__(self,
+ info: Dict[str, Any],
+ package_name: str,
config_files: Dict[Config, List[Path]],
- runtime_dep_refs: List[ModuleRef],
- data_dep_refs: List[ModuleRef]):
- self._name = name
+ deps: Dependencies,
+ supported_configs: List[Config]):
+ self._target_name = info[constants.MODULE_INFO_ID]
+ self._module_name = info[constants.MODULE_NAME]
self._package_name = package_name
self.config_files = config_files
- self.runtime_dep_refs = runtime_dep_refs
- self.data_dep_refs = data_dep_refs
+ self.deps = deps
+ self.suites = info.get(constants.MODULE_COMPATIBILITY_SUITES, [])
+ self._supported_configs = supported_configs
def name(self) -> str:
- return self._name
+ return self._target_name
def package_name(self) -> str:
return self._package_name
@@ -641,44 +1153,52 @@
Import('//bazel/rules:soong_prebuilt.bzl', self._rule_name()),
}
- @functools.lru_cache(maxsize=None)
+ @functools.lru_cache(maxsize=128)
def supported_configs(self) -> Set[Config]:
+ # We deduce the supported configs from the installed paths since the
+ # build exports incorrect metadata for some module types such as
+ # Robolectric. The information exported from the build is only used if
+ # the module does not have any installed paths.
+ # TODO(b/232929584): Remove this once all modules correctly export the
+ # supported variants.
supported_configs = set(self.config_files.keys())
-
if supported_configs:
return supported_configs
- # If a target has no installed files, then it supports the same
- # configurations as its dependencies. This is required because some
- # build modules are just intermediate targets that don't produce any
- # output but that still have transitive dependencies.
- for ref in self.runtime_dep_refs:
- supported_configs.update(ref.target().supported_configs())
-
- return supported_configs
+ return self._supported_configs
def dependencies(self) -> List[ModuleRef]:
- all_deps = set(self.runtime_dep_refs)
- all_deps.update(self.data_dep_refs)
+ all_deps = set(self.deps.runtime_dep_refs)
+ all_deps.update(self.deps.data_dep_refs)
+ all_deps.update(self.deps.device_data_dep_refs)
+ all_deps.update(self.deps.static_dep_refs)
return list(all_deps)
def write_to_build_file(self, f: IO):
writer = IndentWriter(f)
+ build_file_writer = BuildFileWriter(writer)
writer.write_line(f'{self._rule_name()}(')
with writer.indent():
- writer.write_line(f'name = "{self._name}",')
- writer.write_line(f'module_name = "{self._name}",')
+ writer.write_line(f'name = "{self._target_name}",')
+ writer.write_line(f'module_name = "{self._module_name}",')
self._write_files_attribute(writer)
+ self._write_deps_attribute(writer, 'static_deps',
+ self.deps.static_dep_refs)
self._write_deps_attribute(writer, 'runtime_deps',
- self.runtime_dep_refs)
- self._write_deps_attribute(writer, 'data', self.data_dep_refs)
+ self.deps.runtime_dep_refs)
+ self._write_deps_attribute(writer, 'data', self.deps.data_dep_refs)
+
+ build_file_writer.write_label_list_attribute(
+ 'device_data', self.deps.device_data_dep_refs)
+ build_file_writer.write_string_list_attribute(
+ 'suites', sorted(self.suites))
writer.write_line(')')
def create_filesystem_layout(self, package_dir: Path):
- prebuilts_dir = package_dir.joinpath(self._name)
+ prebuilts_dir = package_dir.joinpath(self._target_name)
prebuilts_dir.mkdir()
for config, files in self.config_files.items():
@@ -699,13 +1219,12 @@
if not self.config_files:
return
- name = self._name
-
writer.write('files = ')
write_config_select(
writer,
self.config_files,
- lambda c, _: writer.write(f'glob(["{name}/{c.name}/**/*"])'),
+ lambda c, _: writer.write(
+ f'glob(["{self._target_name}/{c.name}/**/*"])'),
)
writer.write_line(',')
@@ -801,6 +1320,7 @@
info: module_info.Module,
configs: List[Config],
src_root_path: Path,
+ enabled_features: List[Features],
) -> List[ModuleRef]:
"""Return module references for runtime dependencies."""
@@ -815,6 +1335,10 @@
libs = set()
libs.update(info.get(constants.MODULE_SHARED_LIBS, []))
libs.update(info.get(constants.MODULE_RUNTIME_DEPS, []))
+
+ if Features.EXPERIMENTAL_JAVA_RUNTIME_DEPENDENCIES in enabled_features:
+ libs.update(info.get(constants.MODULE_LIBS, []))
+
runtime_dep_refs = _find_module_refs(mod_info, configs, src_root_path, libs)
runtime_library_class = {'RLIB_LIBRARIES', 'DYLIB_LIBRARIES'}
@@ -849,6 +1373,41 @@
info.get(constants.MODULE_DATA_DEPS, []))
+def find_device_data_dep_refs(
+ gen: WorkspaceGenerator,
+ info: module_info.Module,
+) -> List[ModuleRef]:
+ """Return module references for device data dependencies."""
+
+ return _find_module_refs(
+ gen.mod_info,
+ [Config('device', gen.resource_manager.get_product_out_file_path())],
+ gen.resource_manager.get_src_file_path(),
+ info.get(constants.MODULE_TARGET_DEPS, []))
+
+
+def find_static_dep_refs(
+ mod_info: module_info.ModuleInfo,
+ info: module_info.Module,
+ configs: List[Config],
+ src_root_path: Path,
+ enabled_features: List[Features],
+) -> List[ModuleRef]:
+ """Return module references for static libraries."""
+
+ if Features.EXPERIMENTAL_JAVA_RUNTIME_DEPENDENCIES not in enabled_features:
+ return []
+
+ static_libs = set()
+ static_libs.update(info.get(constants.MODULE_STATIC_LIBS, []))
+ static_libs.update(info.get(constants.MODULE_STATIC_DEPS, []))
+
+ return _find_module_refs(mod_info,
+ configs,
+ src_root_path,
+ static_libs)
+
+
def _find_module_refs(
mod_info: module_info.ModuleInfo,
configs: List[Config],
@@ -931,21 +1490,6 @@
writer.write(']')
-def is_tf_testable_module(mod_info: module_info.ModuleInfo,
- info: Dict[str, Any]):
- """Check if the module is a Tradefed runnable test module.
-
- ModuleInfo.is_testable_module() is from ATest's point of view. It only
- checks if a module has installed path and has local config files. This
- way is not reliable since some libraries might match these two conditions
- and be included mistakenly. Robolectric_utils is an example that matched
- these two conditions but not testable. This function make sure the module
- is a TF runnable test module.
- """
- return (mod_info.is_testable_module(info)
- and info.get(constants.MODULE_COMPATIBILITY_SUITES))
-
-
def _decorate_find_method(mod_info, finder_method_func, host, enabled_features):
"""A finder_method decorator to override TestInfo properties."""
@@ -956,6 +1500,17 @@
for tinfo in test_infos:
m_info = mod_info.get_module_info(tinfo.test_name)
+ # TODO(b/262200630): Refactor the duplicated logic in
+ # _decorate_find_method() and _add_test_module_targets() to
+ # determine whether a test should run with Atest Bazel Mode.
+
+ # Only enable modern Robolectric tests since those are the only ones
+ # TF currently supports.
+ if mod_info.is_modern_robolectric_test(m_info):
+ if Features.EXPERIMENTAL_ROBOLECTRIC_TEST in enabled_features:
+ tinfo.test_runner = BazelTestRunner.NAME
+ continue
+
# Only run device-driven tests in Bazel mode when '--host' is not
# specified and the feature is enabled.
if not host and mod_info.is_device_driven_test(m_info):
@@ -964,7 +1519,9 @@
continue
if mod_info.is_suite_in_compatibility_suites(
- 'host-unit-tests', m_info):
+ 'host-unit-tests', m_info) or (
+ Features.EXPERIMENTAL_HOST_DRIVEN_TEST in enabled_features
+ and mod_info.is_host_driven_test(m_info)):
tinfo.test_runner = BazelTestRunner.NAME
return test_infos
return use_bazel_runner
@@ -994,13 +1551,27 @@
finder.finder_info)
+class RunCommandError(subprocess.CalledProcessError):
+ """CalledProcessError but including debug information when it fails."""
+ def __str__(self):
+ return f'{super().__str__()}\n' \
+ f'stdout={self.stdout}\n\n' \
+ f'stderr={self.stderr}'
+
+
def default_run_command(args: List[str], cwd: Path) -> str:
- return subprocess.check_output(
+ result = subprocess.run(
args=args,
cwd=cwd,
text=True,
- stderr=subprocess.DEVNULL,
+ capture_output=True,
+ check=False,
)
+ if result.returncode:
+ # Provide a more detailed log message including stdout and stderr.
+ raise RunCommandError(result.returncode, result.args, result.stdout,
+ result.stderr)
+ return result.stdout
@dataclasses.dataclass
@@ -1021,7 +1592,6 @@
results_dir,
mod_info: module_info.ModuleInfo,
extra_args: Dict[str, Any]=None,
- test_infos: List[test_info.TestInfo]=None,
src_top: Path=None,
workspace_path: Path=None,
run_command: Callable=default_run_command,
@@ -1030,15 +1600,14 @@
**kwargs):
super().__init__(results_dir, **kwargs)
self.mod_info = mod_info
- self.test_infos = test_infos
self.src_top = src_top or Path(os.environ.get(
constants.ANDROID_BUILD_TOP))
- self.starlark_file = self.src_top.joinpath(
- 'tools/asuite/atest/bazel/format_as_soong_module_name.cquery')
+ self.starlark_file = _get_resource_root().joinpath(
+ 'format_as_soong_module_name.cquery')
- self.bazel_binary = self.src_top.joinpath(
- 'prebuilts/bazel/linux-x86_64/bazel')
self.bazel_workspace = workspace_path or get_bazel_workspace_dir()
+ self.bazel_binary = self.bazel_workspace.joinpath(
+ 'bazel.sh')
self.run_command = run_command
self._extra_args = extra_args or {}
self.build_metadata = build_metadata or get_default_build_metadata()
@@ -1056,27 +1625,51 @@
reporter.register_unsupported_runner(self.NAME)
ret_code = ExitCode.SUCCESS
- run_cmds = self.generate_run_commands(test_infos, extra_args)
+ try:
+ run_cmds = self.generate_run_commands(test_infos, extra_args)
+ except AbortRunException as e:
+ atest_utils.colorful_print(f'Stop running test(s): {e}',
+ constants.RED)
+ return ExitCode.ERROR
+
for run_cmd in run_cmds:
subproc = self.run(run_cmd, output_to_stdout=True)
ret_code |= self.wait_for_subprocess(subproc)
return ret_code
- def _get_bes_publish_args(self):
- args = []
+ def _get_feature_config_or_warn(self, feature, env_var_name):
+ feature_config = self.env.get(env_var_name)
+ if not feature_config:
+ logging.warning(
+ 'Ignoring `%s` because the `%s`'
+ ' environment variable is not set.',
+ # pylint: disable=no-member
+ feature, env_var_name
+ )
+ return feature_config
- if not self.env.get("ATEST_BAZEL_BES_PUBLISH_CONFIG"):
- return args
+ def _get_bes_publish_args(self, feature):
+ bes_publish_config = self._get_feature_config_or_warn(
+ feature, 'ATEST_BAZEL_BES_PUBLISH_CONFIG')
- config = self.env["ATEST_BAZEL_BES_PUBLISH_CONFIG"]
+ if not bes_publish_config:
+ return []
+
branch = self.build_metadata.build_branch
target = self.build_metadata.build_target
- args.append(f'--config={config}')
- args.append(f'--build_metadata=ab_branch={branch}')
- args.append(f'--build_metadata=ab_target={target}')
+ return [
+ f'--config={bes_publish_config}',
+ f'--build_metadata=ab_branch={branch}',
+ f'--build_metadata=ab_target={target}'
+ ]
- return args
+ def _get_remote_args(self, feature):
+ remote_config = self._get_feature_config_or_warn(
+ feature, 'ATEST_BAZEL_REMOTE_CONFIG')
+ if not remote_config:
+ return []
+ return [f'--config={remote_config}']
def host_env_check(self):
"""Check that host env has everything we need.
@@ -1086,25 +1679,51 @@
if that changes.
"""
- def get_test_runner_build_reqs(self) -> Set[str]:
- if not self.test_infos:
+ def get_test_runner_build_reqs(self, test_infos) -> Set[str]:
+ if not test_infos:
return set()
deps_expression = ' + '.join(
- sorted(self.test_info_target_label(i) for i in self.test_infos)
+ sorted(self.test_info_target_label(i) for i in test_infos)
)
- query_args = [
- self.bazel_binary,
- 'cquery',
- f'deps(tests({deps_expression}))',
- '--output=starlark',
- f'--starlark:file={self.starlark_file}',
- ]
+ with tempfile.NamedTemporaryFile() as query_file:
+ with open(query_file.name, 'w', encoding='utf-8') as _query_file:
+ _query_file.write(f'deps(tests({deps_expression}))')
- output = self.run_command(query_args, self.bazel_workspace)
+ query_args = [
+ str(self.bazel_binary),
+ 'cquery',
+ f'--query_file={query_file.name}',
+ '--output=starlark',
+ f'--starlark:file={self.starlark_file}',
+ ]
- return set(filter(bool, map(str.strip, output.splitlines())))
+ output = self.run_command(query_args, self.bazel_workspace)
+
+ targets = set()
+ robolectric_tests = set(filter(
+ self._is_robolectric_test_suite,
+ [test.test_name for test in test_infos]))
+
+ modules_to_variant = _parse_cquery_output(output)
+
+ for module, variants in modules_to_variant.items():
+
+ # Skip specifying the build variant for Robolectric test modules
+ # since they are special. Soong builds them with the `target`
+ # variant although are installed as 'host' modules.
+ if module in robolectric_tests:
+ targets.add(module)
+ continue
+
+ targets.add(_soong_target_for_variants(module, variants))
+
+ return targets
+
+ def _is_robolectric_test_suite(self, module_name: str) -> bool:
+ return self.mod_info.is_robolectric_test_suite(
+ self.mod_info.get_module_info(module_name))
def test_info_target_label(self, test: test_info.TestInfo) -> str:
module_name = test.test_name
@@ -1119,6 +1738,11 @@
return f'//{package_name}:{module_name}_{target_suffix}'
+ def _get_bazel_feature_args(self, feature, extra_args, generator):
+ if feature not in extra_args.get('BAZEL_MODE_FEATURES', []):
+ return []
+ return generator(feature)
+
# pylint: disable=unused-argument
def generate_run_commands(self, test_infos, extra_args, port=None):
"""Generate a list of run commands from TestInfos.
@@ -1140,13 +1764,22 @@
target_patterns = ' '.join(
self.test_info_target_label(i) for i in test_infos)
- bazel_args = self._parse_extra_args(test_infos, extra_args)
+ bazel_args = parse_args(test_infos, extra_args, self.mod_info)
- if Features.EXPERIMENTAL_BES_PUBLISH in extra_args.get(
- 'BAZEL_MODE_FEATURES', []):
- bazel_args.extend(self._get_bes_publish_args())
+ bazel_args.extend(
+ self._get_bazel_feature_args(
+ Features.EXPERIMENTAL_BES_PUBLISH,
+ extra_args,
+ self._get_bes_publish_args))
+ bazel_args.extend(
+ self._get_bazel_feature_args(
+ Features.EXPERIMENTAL_REMOTE,
+ extra_args,
+ self._get_remote_args))
- bazel_args_str = ' '.join(bazel_args)
+ # This is an alternative to shlex.join that doesn't exist in Python
+ # versions < 3.8.
+ bazel_args_str = ' '.join(shlex.quote(arg) for arg in bazel_args)
# Use 'cd' instead of setting the working directory in the subprocess
# call for a working --dry-run command that users can run.
@@ -1156,36 +1789,81 @@
f'test {target_patterns} {bazel_args_str}'
]
- def _parse_extra_args(self, test_infos: List[test_info.TestInfo],
- extra_args: trb.ARGS) -> trb.ARGS:
- args_to_append = []
- # Make a copy of the `extra_args` dict to avoid modifying it for other
- # Atest runners.
- extra_args_copy = extra_args.copy()
- # Map args to their native Bazel counterparts.
- for arg in _SUPPORTED_BAZEL_ARGS:
- if arg not in extra_args_copy:
- continue
- args_to_append.extend(
- self.map_to_bazel_args(arg, extra_args_copy[arg]))
- # Remove the argument since we already mapped it to a Bazel option
- # and no longer need it mapped to a Tradefed argument below.
- del extra_args_copy[arg]
+def parse_args(
+ test_infos: List[test_info.TestInfo],
+ extra_args: Dict[str, Any],
+ mod_info: module_info.ModuleInfo) -> Dict[str, Any]:
+ """Parse commandline args and passes supported args to bazel.
- # TODO(b/215461642): Store the extra_args in the top-level object so
- # that we don't have to re-parse the extra args to get BAZEL_ARG again.
- tf_args, _ = tfr.extra_args_to_tf_args(
- self.mod_info, test_infos, extra_args_copy)
+ Args:
+ test_infos: A set of TestInfo instances.
+ extra_args: A Dict of extra args to append.
+ mod_info: A ModuleInfo object.
- # Add ATest include filter argument to allow testcase filtering.
- tf_args.extend(tfr.get_include_filter(test_infos))
+ Returns:
+ A list of args to append to the run command.
+ """
- args_to_append.extend([f'--test_arg={i}' for i in tf_args])
+ args_to_append = []
+ # Make a copy of the `extra_args` dict to avoid modifying it for other
+ # Atest runners.
+ extra_args_copy = extra_args.copy()
- return args_to_append
+ # Remove the `--host` flag since we already pass that in the rule's
+ # implementation.
+ extra_args_copy.pop(constants.HOST, None)
- @staticmethod
- def map_to_bazel_args(arg: str, arg_value: Any) -> List[str]:
- return _SUPPORTED_BAZEL_ARGS[arg](
- arg_value) if arg in _SUPPORTED_BAZEL_ARGS else []
+ # Map args to their native Bazel counterparts.
+ for arg in _SUPPORTED_BAZEL_ARGS:
+ if arg not in extra_args_copy:
+ continue
+ args_to_append.extend(
+ _map_to_bazel_args(arg, extra_args_copy[arg]))
+ # Remove the argument since we already mapped it to a Bazel option
+ # and no longer need it mapped to a Tradefed argument below.
+ del extra_args_copy[arg]
+
+ # TODO(b/215461642): Store the extra_args in the top-level object so
+ # that we don't have to re-parse the extra args to get BAZEL_ARG again.
+ tf_args, _ = tfr.extra_args_to_tf_args(
+ mod_info, test_infos, extra_args_copy)
+
+ # Add ATest include filter argument to allow testcase filtering.
+ tf_args.extend(tfr.get_include_filter(test_infos))
+
+ args_to_append.extend([f'--test_arg={i}' for i in tf_args])
+
+ # Default to --test_output=errors unless specified otherwise
+ if not any(arg.startswith('--test_output=') for arg in args_to_append):
+ args_to_append.append('--test_output=errors')
+
+ return args_to_append
+
+def _map_to_bazel_args(arg: str, arg_value: Any) -> List[str]:
+ return _SUPPORTED_BAZEL_ARGS[arg](
+ arg_value) if arg in _SUPPORTED_BAZEL_ARGS else []
+
+
+def _parse_cquery_output(output: str) -> Dict[str, Set[str]]:
+ module_to_build_variants = defaultdict(set)
+
+ for line in filter(bool, map(str.strip, output.splitlines())):
+ module_name, build_variant = line.split(':')
+ module_to_build_variants[module_name].add(build_variant)
+
+ return module_to_build_variants
+
+
+def _soong_target_for_variants(
+ module_name: str,
+ build_variants: Set[str]) -> str:
+
+ if not build_variants:
+ raise Exception(f'Missing the build variants for module {module_name} '
+ f'in cquery output!')
+
+ if len(build_variants) > 1:
+ return module_name
+
+ return f'{module_name}-{_CONFIG_TO_VARIANT[list(build_variants)[0]]}'
diff --git a/atest/bazel_mode_unittest.py b/atest/bazel_mode_unittest.py
index a62b5b1..b61794d 100755
--- a/atest/bazel_mode_unittest.py
+++ b/atest/bazel_mode_unittest.py
@@ -23,9 +23,11 @@
import re
import shlex
import shutil
+import subprocess
import tempfile
import unittest
+from io import StringIO
from pathlib import Path
from typing import List
from unittest import mock
@@ -33,12 +35,12 @@
# pylint: disable=import-error
from pyfakefs import fake_filesystem_unittest
-import bazel_mode
-import constants
-import module_info
+from atest import bazel_mode
+from atest import constants
+from atest import module_info
-from test_finders import example_finder, test_finder_base, test_info
-from test_runners import atest_tf_test_runner
+from atest.test_finders import example_finder, test_finder_base, test_info
+from atest.test_runners import atest_tf_test_runner
ATEST_TF_RUNNER = atest_tf_test_runner.AtestTradefedTestRunner.NAME
@@ -53,47 +55,76 @@
def setUp(self):
self.setUpPyfakefs()
- self.src_root_path = Path('/src')
- self.out_dir_path = self.src_root_path.joinpath('out')
+ self._src_root_path = Path('/src')
+ self.out_dir_path = self._src_root_path.joinpath('out')
self.out_dir_path.mkdir(parents=True)
self.product_out_path = self.out_dir_path.joinpath('product')
self.host_out_path = self.out_dir_path.joinpath('host')
self.workspace_out_path = self.out_dir_path.joinpath('workspace')
- def create_workspace_generator(self, modules=None, enabled_features=None):
+ self._resource_root = self._src_root_path.joinpath(
+ 'tools/asuite/atest/bazel')
+
+ self.workspace_md5_checksum = self.workspace_out_path.joinpath(
+ 'workspace_md5_checksum')
+ self.resource_manager = bazel_mode.ResourceManager(
+ src_root_path=self._src_root_path,
+ resource_root_path=self._resource_root,
+ product_out_path=self.product_out_path,
+ md5_checksum_file_path = self.workspace_md5_checksum
+ )
+
+ bazel_rules = self.resource_manager.get_resource_file_path('rules')
+ bazel_rules.mkdir(parents=True)
+ self.rules_bzl_file = bazel_rules.joinpath('rules.bzl')
+ self.rules_bzl_file.touch()
+
+ bazel_configs = self.resource_manager.get_resource_file_path('configs')
+ bazel_configs.mkdir(parents=True)
+ bazel_configs.joinpath('configs.bzl').touch()
+
+ self.resource_manager.get_resource_file_path('WORKSPACE').touch()
+ self.resource_manager.get_resource_file_path('bazelrc').touch()
+
+ def create_workspace_generator(
+ self,
+ modules=None,
+ enabled_features=None,
+ jdk_path=None,
+ ):
mod_info = self.create_module_info(modules)
generator = bazel_mode.WorkspaceGenerator(
- self.src_root_path,
- self.workspace_out_path,
- self.product_out_path,
- self.host_out_path,
- self.out_dir_path,
- mod_info,
+ resource_manager=self.resource_manager,
+ workspace_out_path=self.workspace_out_path,
+ host_out_path=self.host_out_path,
+ build_out_dir=self.out_dir_path,
+ mod_info=mod_info,
+ jdk_path=jdk_path,
enabled_features=enabled_features,
)
return generator
- def run_generator(self, mod_info, enabled_features=None):
+ def run_generator(self, mod_info, enabled_features=None, jdk_path=None):
generator = bazel_mode.WorkspaceGenerator(
- self.src_root_path,
- self.workspace_out_path,
- self.product_out_path,
- self.host_out_path,
- self.out_dir_path,
- mod_info,
+ resource_manager=self.resource_manager,
+ workspace_out_path=self.workspace_out_path,
+ host_out_path=self.host_out_path,
+ build_out_dir=self.out_dir_path,
+ mod_info=mod_info,
+ jdk_path=jdk_path,
enabled_features=enabled_features,
)
generator.generate()
# pylint: disable=protected-access
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
def create_empty_module_info(self):
- fake_temp_file_name = next(tempfile._get_candidate_names())
- self.fs.create_file(fake_temp_file_name, contents='{}')
- return module_info.ModuleInfo(module_file=fake_temp_file_name)
+ fake_temp_file = self.product_out_path.joinpath(
+ next(tempfile._get_candidate_names()))
+ self.fs.create_file(fake_temp_file, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file)
def create_module_info(self, modules=None):
mod_info = self.create_empty_module_info()
@@ -105,10 +136,17 @@
for module_name in prerequisites:
info = host_module(name=module_name, path='prebuilts')
+ info[constants.MODULE_INFO_ID] = module_name
mod_info.name_to_module_info[module_name] = info
for m in modules:
+ m[constants.MODULE_INFO_ID] = m['module_name']
mod_info.name_to_module_info[m['module_name']] = m
+ for path in m['path']:
+ if path in mod_info.path_to_module_info:
+ mod_info.path_to_module_info[path].append(m)
+ else:
+ mod_info.path_to_module_info[path] = [m]
return mod_info
@@ -156,6 +194,10 @@
path = self.workspace_out_path.joinpath(package, relative_path)
self.assertTrue(path.exists())
+ def assertDirInWorkspace(self, relative_path, package=''):
+ path = self.workspace_out_path.joinpath(package, relative_path)
+ self.assertTrue(path.is_dir())
+
def assertFileNotInWorkspace(self, relative_path, package=''):
path = self.workspace_out_path.joinpath(package, relative_path)
self.assertFalse(path.exists())
@@ -275,13 +317,36 @@
workspace_generator.generate()
workspace_stat = workspace_generator.workspace_out_path.stat()
- workspace_generator.mod_info.mod_info_file_path.unlink()
+ self.workspace_md5_checksum.unlink()
workspace_generator = self.create_workspace_generator()
workspace_generator.generate()
-
new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
self.assertNotEqual(workspace_stat, new_workspace_stat)
+ def test_regenerate_workspace_when_md5_file_is_broken(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ self.workspace_md5_checksum.write_text('broken checksum file')
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ self.assertNotEqual(workspace_stat, new_workspace_stat)
+
+ def test_not_regenerate_workspace_when_workspace_files_unaffected(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ self.assertEqual(workspace_stat, new_workspace_stat)
+
def test_scrub_old_workspace_when_regenerating(self):
workspace_generator = self.create_workspace_generator()
workspace_generator.generate()
@@ -289,56 +354,59 @@
some_file.touch()
self.assertTrue(some_file.is_file())
- # Remove the md5 file to regenerate the workspace.
+ # Remove the module_info file to regenerate the workspace.
workspace_generator.mod_info.mod_info_file_path.unlink()
workspace_generator = self.create_workspace_generator()
workspace_generator.generate()
self.assertFalse(some_file.is_file())
- def test_generate_workspace_file(self):
+ def test_regenerate_workspace_when_resource_file_changed(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ with open(self.rules_bzl_file, 'a', encoding='utf8') as f:
+ f.write(' ')
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+ self.assertNotEqual(workspace_stat, new_workspace_stat)
+
+ def test_not_regenerate_workspace_when_resource_file_only_touched(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ self.rules_bzl_file.touch()
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+ self.assertEqual(workspace_stat, new_workspace_stat)
+
+ def test_copy_workspace_resources(self):
gen = self.create_workspace_generator()
- workspace_path = gen.workspace_out_path.joinpath('WORKSPACE')
gen.generate()
- self.assertSymlinkTo(
- workspace_path,
- self.src_root_path.joinpath('tools/asuite/atest/bazel/WORKSPACE')
- )
+ self.assertFileInWorkspace('WORKSPACE')
+ self.assertFileInWorkspace('.bazelrc')
+ self.assertDirInWorkspace('bazel/rules')
+ self.assertDirInWorkspace('bazel/configs')
- def test_generate_bazelrc_file(self):
- gen = self.create_workspace_generator()
- bazelrc_path = gen.workspace_out_path.joinpath('.bazelrc')
+ def test_generated_target_name(self):
+ mod_info = self.create_module_info(modules=[
+ host_unit_test_module(name='hello_world_test')
+ ])
+ info = mod_info.get_module_info('hello_world_test')
+ info[constants.MODULE_INFO_ID] = 'new_hello_world_test'
- gen.generate()
+ self.run_generator(mod_info)
- self.assertSymlinkTo(
- bazelrc_path,
- self.src_root_path.joinpath('tools/asuite/atest/bazel/bazelrc')
- )
-
- def test_generate_rules_dir(self):
- gen = self.create_workspace_generator()
- rules_dir_path = gen.workspace_out_path.joinpath('bazel/rules')
-
- gen.generate()
-
- self.assertSymlinkTo(
- rules_dir_path,
- self.src_root_path.joinpath('tools/asuite/atest/bazel/rules')
- )
-
- def test_generate_configs_dir(self):
- gen = self.create_workspace_generator()
- configs_dir_path = gen.workspace_out_path.joinpath('bazel/configs')
-
- gen.generate()
-
- self.assertSymlinkTo(
- configs_dir_path,
- self.src_root_path.joinpath('tools/asuite/atest/bazel/configs')
- )
+ self.assertTargetInWorkspace('new_hello_world_test')
+ self.assertTargetNotInWorkspace('hello_world_test')
def test_generate_host_unit_test_module_target(self):
mod_info = self.create_module_info(modules=[
@@ -377,7 +445,7 @@
self.assertFileInWorkspace('constants.bzl')
-class MultiConfigTestModuleTestTargetGenerationTest(GenerationTestFixture):
+class MultiConfigUnitTestModuleTestTargetGenerationTest(GenerationTestFixture):
"""Tests for test target generation of test modules with multi-configs."""
def test_generate_test_rule_imports(self):
@@ -391,7 +459,7 @@
self.assertInBuildFile(
'load("//bazel/rules:tradefed_test.bzl",'
- ' "tradefed_device_test", "tradefed_deviceless_test")\n',
+ ' "tradefed_device_driven_test", "tradefed_deviceless_test")\n',
package='example/tests',
)
@@ -451,12 +519,136 @@
self.assertInBuildFile(
'load("//bazel/rules:tradefed_test.bzl",'
- ' "tradefed_device_test")\n',
+ ' "tradefed_device_driven_test")\n',
package='example/tests',
)
self.assertTargetInWorkspace('hello_world_test_device',
package='example/tests')
+ def test_generate_target_with_suites(self):
+ mod_info = self.create_module_info(modules=[
+ device_test_module(
+ name='hello_world_test',
+ path='example/tests',
+ compatibility_suites=['cts', 'mts']),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ ' suites = [\n'
+ ' "cts",\n'
+ ' "mts",\n'
+ ' ],\n',
+ package='example/tests',
+ )
+
+ def test_generate_target_with_host_dependencies(self):
+ mod_info = self.create_module_info(modules=[
+ device_test_module(
+ name='hello_world_test',
+ path='example/tests',
+ host_dependencies=['vts_dep', 'cts_dep']),
+ host_module(name='vts_dep'),
+ host_module(name='cts_dep'),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ ' tradefed_deps = [\n'
+ ' "//:cts_dep",\n'
+ ' "//:vts_dep",\n'
+ ' ],\n',
+ package='example/tests',
+ )
+
+ def test_generate_target_with_device_dependencies(self):
+ mod_info = self.create_module_info(modules=[
+ host_test_module(
+ name='hello_world_test',
+ path='example/tests',
+ target_dependencies=['helper_app']),
+ device_module(name='helper_app'),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_HOST_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ ' device_data = [\n'
+ ' "//:helper_app",\n'
+ ' ],\n',
+ package='example/tests',
+ )
+
+ def test_generate_target_with_tags(self):
+ mod_info = self.create_module_info(modules=[
+ device_test_module(
+ name='hello_world_test',
+ path='example/tests',
+ test_options_tags=['no-remote']),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ ' tags = [\n'
+ ' "no-remote",\n'
+ ' ],\n',
+ package='example/tests',
+ )
+
+ def test_generate_host_driven_test_target(self):
+ mod_info = self.create_module_info(modules=[
+ host_test_module(
+ name='hello_world_test', path='example/tests'),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_HOST_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ 'tradefed_host_driven_device_test(', package='example/tests')
+
+ def test_generate_multi_config_device_test_target(self):
+ mod_info = self.create_module_info(modules=[
+ multi_config(test_module(
+ name='hello_world_test', path='example/tests')),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_HOST_DRIVEN_TEST,
+ bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST]))
+
+ self.assertInBuildFile(
+ 'load("//bazel/rules:tradefed_test.bzl", '
+ '"tradefed_device_driven_test", '
+ '"tradefed_host_driven_device_test")\n',
+ package='example/tests',
+ )
+ self.assertTargetInWorkspace('hello_world_test_device',
+ package='example/tests')
+ self.assertTargetInWorkspace('hello_world_test_host',
+ package='example/tests')
+
+ def test_not_generate_host_driven_test_target_when_feature_disabled(self):
+ mod_info = self.create_module_info(modules=[
+ multi_config(test_module(
+ name='hello_world_test', path='example/tests')),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST]))
+
+ self.assertTargetInWorkspace('hello_world_test_device',
+ package='example/tests')
+ self.assertTargetNotInWorkspace('hello_world_test_host',
+ package='example/tests')
+
def test_raise_when_prerequisite_not_in_module_info(self):
mod_info = self.create_module_info(modules=[
device_test_module(),
@@ -496,11 +688,29 @@
self.assertInBuildFile(
'tradefed_deviceless_test(\n'
' name = "hello_world_test_host",\n'
+ ' module_name = "hello_world_test",\n'
' test = "//example/tests:hello_world_test",\n'
')',
package='example/tests',
)
+ def test_generate_target_with_tags(self):
+ mod_info = self.create_module_info(modules=[
+ host_unit_test_module(
+ name='hello_world_test',
+ path='example/tests',
+ test_options_tags=['no-remote']),
+ ])
+
+ self.run_generator(mod_info)
+
+ self.assertInBuildFile(
+ ' tags = [\n'
+ ' "no-remote",\n'
+ ' ],\n',
+ package='example/tests',
+ )
+
def test_generate_test_module_prebuilt(self):
mod_info = self.create_module_info(modules=[
host_unit_test_module(name='hello_world_test'),
@@ -543,6 +753,152 @@
self.assertIn('adb', str(context.warnings[0].message))
+
+class RobolectricTestModuleTestTargetGenerationTest(GenerationTestFixture):
+ """Tests for robolectric test module test target generation."""
+
+ def setUp(self):
+ super().setUp()
+ self.robolectric_template_path = self.resource_manager.\
+ get_resource_file_path(bazel_mode.ROBOLECTRIC_CONFIG, True)
+ self.fs.create_file(self.robolectric_template_path, contents='')
+ # ResourceManager only calculates md5 when registering files. So, it is
+ # necessary to call get_resource_file_path() again after writing files.
+ self.resource_manager.get_resource_file_path(
+ bazel_mode.ROBOLECTRIC_CONFIG, True)
+
+ def test_generate_robolectric_test_target(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_test_module(
+ name=f'{module_name}',
+ compatibility_suites='robolectric-tests'),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_ROBOLECTRIC_TEST]))
+
+ self.assertInBuildFile(
+ 'load("//bazel/rules:tradefed_test.bzl",'
+ ' "tradefed_robolectric_test")\n',
+ )
+ self.assertTargetInWorkspace(f'{module_name}_host')
+
+ def test_not_generate_when_feature_disabled(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_test_module(
+ name=f'{module_name}',
+ compatibility_suites='robolectric-tests'),
+ ])
+
+ self.run_generator(mod_info)
+
+ self.assertTargetNotInWorkspace(f'{module_name}_host')
+
+ def test_not_generate_for_legacy_robolectric_test_type(self):
+ module_name = 'hello_world_test'
+ module_path = 'example/tests'
+ mod_info = self.create_module_info(modules=[
+ robolectric_test_module(
+ name=f'{module_name}', path=module_path),
+ ])
+
+ self.run_generator(mod_info, enabled_features=set([
+ bazel_mode.Features.EXPERIMENTAL_ROBOLECTRIC_TEST]))
+
+ self.assertFileNotInWorkspace('BUILD.bazel', package=f'{module_path}')
+
+ def test_generate_jdk_target(self):
+ gen = self.create_workspace_generator(jdk_path=Path('jdk_src_root'))
+
+ gen.generate()
+
+ self.assertInBuildFile(
+ 'filegroup(\n'
+ f' name = "{bazel_mode.JDK_NAME}",\n'
+ ' srcs = glob([\n'
+ f' "{bazel_mode.JDK_NAME}_files/**",\n',
+ package=f'{bazel_mode.JDK_PACKAGE_NAME}'
+ )
+
+ def test_not_generate_jdk_target_when_no_jdk_path(self):
+ gen = self.create_workspace_generator(jdk_path=None)
+
+ gen.generate()
+
+ self.assertFileNotInWorkspace(
+ 'BUILD.bazel', package=f'{bazel_mode.JDK_PACKAGE_NAME}')
+
+ def test_create_symlinks_to_jdk(self):
+ jdk_path = Path('jdk_path')
+ gen = self.create_workspace_generator(jdk_path=jdk_path)
+
+ gen.generate()
+
+ self.assertSymlinkTo(
+ self.workspace_out_path.joinpath(
+ f'{bazel_mode.JDK_PACKAGE_NAME}/{bazel_mode.JDK_NAME}_files'),
+ self.resource_manager.get_src_file_path(f'{jdk_path}'))
+
+ def test_generate_android_all_target(self):
+ gen = self.create_workspace_generator(jdk_path=Path('jdk_src_root'))
+
+ gen.generate()
+
+ self.assertInBuildFile(
+ 'filegroup(\n'
+ ' name = "android-all",\n'
+ ' srcs = glob([\n'
+ ' "android-all_files/**",\n',
+ package='android-all'
+ )
+
+ def test_not_generate_android_all_target_when_no_jdk_path(self):
+ gen = self.create_workspace_generator(jdk_path=None)
+
+ gen.generate()
+
+ self.assertFileNotInWorkspace(
+ 'BUILD.bazel', package='android-all')
+
+ def test_create_symlinks_to_android_all(self):
+ module_name = 'android-all'
+ gen = self.create_workspace_generator(jdk_path=Path('jdk_src_root'))
+
+ gen.generate()
+
+ self.assertSymlinkTo(
+ self.workspace_out_path.joinpath(
+ f'{module_name}/{module_name}_files'),
+ self.host_out_path.joinpath(f'testcases/{module_name}'))
+
+ def test_regenerate_workspace_when_robolectric_template_changed(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ with open(self.robolectric_template_path, 'a', encoding='utf8') as f:
+ f.write(' ')
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+ self.assertNotEqual(workspace_stat, new_workspace_stat)
+
+ def test_not_regenerate_workspace_when_robolectric_template_touched(self):
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+ workspace_stat = workspace_generator.workspace_out_path.stat()
+
+ self.robolectric_template_path.touch()
+ workspace_generator = self.create_workspace_generator()
+ workspace_generator.generate()
+
+ new_workspace_stat = workspace_generator.workspace_out_path.stat()
+ self.assertEqual(workspace_stat, new_workspace_stat)
+
+
class ModulePrebuiltTargetGenerationTest(GenerationTestFixture):
"""Tests for module prebuilt target generation."""
@@ -572,6 +928,9 @@
' "//bazel/rules:device": glob(["libhello/device/**/*"]),\n'
' "//bazel/rules:host": glob(["libhello/host/**/*"]),\n'
' }),\n'
+ ' suites = [\n'
+ ' "host-unit-tests",\n'
+ ' ],\n'
')\n'
)
@@ -821,18 +1180,37 @@
def test_generate_target_for_rlib_dependency(self):
mod_info = self.create_module_info(modules=[
- supported_test_module(dependencies=['libhello']),
- rlib(module(name='libhello'))
+ multi_config(host_unit_suite(module(
+ name='hello_world_test',
+ dependencies=['libhost', 'libdevice']))),
+ rlib(module(name='libhost', supported_variants=['HOST'])),
+ rlib(module(name='libdevice', supported_variants=['DEVICE'])),
])
self.run_generator(mod_info)
self.assertInBuildFile(
'soong_uninstalled_prebuilt(\n'
- ' name = "libhello",\n'
- ' module_name = "libhello",\n'
+ ' name = "libhost",\n'
+ ' module_name = "libhost",\n'
')\n'
)
+ self.assertInBuildFile(
+ 'soong_uninstalled_prebuilt(\n'
+ ' name = "libdevice",\n'
+ ' module_name = "libdevice",\n'
+ ')\n'
+ )
+ self.assertInBuildFile(
+ ' runtime_deps = select({\n'
+ ' "//bazel/rules:device": [\n'
+ ' "//:libdevice",\n'
+ ' ],\n'
+ ' "//bazel/rules:host": [\n'
+ ' "//:libhost",\n'
+ ' ],\n'
+ ' }),\n'
+ )
def test_generate_target_for_rlib_dylib_dependency(self):
mod_info = self.create_module_info(modules=[
@@ -935,7 +1313,6 @@
host_module(
name='libhello',
installed=[str(host_file)],
- auto_test_config=['true']
)
])
package_path = self.workspace_out_path
@@ -1026,7 +1403,6 @@
self.assertTargetNotInWorkspace('libdata')
-@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
def create_empty_module_info():
with fake_filesystem_unittest.Patcher() as patcher:
# pylint: disable=protected-access
@@ -1064,6 +1440,11 @@
return device_only_config(test_module(**kwargs))
+def robolectric_test_module(**kwargs):
+ kwargs.setdefault('name', 'hello_world_test')
+ return host_only_config(robolectric(test_module(**kwargs)))
+
+
def host_module(**kwargs):
m = module(**kwargs)
@@ -1096,7 +1477,10 @@
return test(module(**kwargs))
+# TODO(b/274822450): Using a builder pattern to reduce the number of parameters
+# instead of disabling the warning.
# pylint: disable=too-many-arguments
+# pylint: disable=too-many-locals
def module(
name=None,
path=None,
@@ -1108,13 +1492,18 @@
runtime_dependencies=None,
data=None,
data_dependencies=None,
+ compatibility_suites=None,
+ host_dependencies=None,
+ target_dependencies=None,
+ test_options_tags=None,
+ supported_variants=None,
):
name = name or 'libhello'
m = {}
m['module_name'] = name
- m['class'] = classes
+ m['class'] = classes or ['']
m['path'] = [path or '']
m['installed'] = installed or []
m['is_unit_test'] = 'false'
@@ -1124,6 +1513,11 @@
m['dependencies'] = dependencies or []
m['data'] = data or []
m['data_dependencies'] = data_dependencies or []
+ m['compatibility_suites'] = compatibility_suites or []
+ m['host_dependencies'] = host_dependencies or []
+ m['target_dependencies'] = target_dependencies or []
+ m['test_options_tags'] = test_options_tags or []
+ m['supported_variants'] = supported_variants or []
return m
@@ -1148,6 +1542,11 @@
return info
+def robolectric(info):
+ info['class'] = ['ROBOLECTRIC']
+ return info
+
+
def host_unit_suite(info):
info = test(info)
info.setdefault('compatibility_suites', []).append('host-unit-tests')
@@ -1297,13 +1696,11 @@
class DecorateFinderMethodTest(GenerationTestFixture):
"""Tests for _decorate_find_method()."""
- def setUp(self):
- self.setUpPyfakefs()
-
def test_host_unit_test_with_host_arg_runner_is_overridden(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
host_unit_test_module(name=MODULE_NAME)
])
@@ -1318,9 +1715,10 @@
self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
def test_host_unit_test_without_host_arg_runner_is_overridden(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
host_unit_test_module(name=MODULE_NAME)
])
@@ -1335,9 +1733,10 @@
self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
def test_device_test_with_host_arg_runner_is_preserved(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
device_test_module(name=MODULE_NAME)
])
@@ -1358,9 +1757,10 @@
self.assertEqual(test_infos[0].test_runner, ATEST_TF_RUNNER)
def test_device_test_without_host_arg_runner_is_overridden(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
device_test_module(name=MODULE_NAME)
])
@@ -1381,9 +1781,10 @@
self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
def test_multi_config_test_with_host_arg_runner_is_overridden(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
multi_config(supported_test_module(name=MODULE_NAME))
])
@@ -1404,9 +1805,10 @@
self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
def test_multi_config_test_without_host_arg_runner_is_overridden(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
multi_config(supported_test_module(name=MODULE_NAME))
])
@@ -1426,10 +1828,11 @@
self.assertEqual(len(test_infos), 1)
self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
- def test_host_non_unit_test_with_host_arg_runner_is_preserved(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def test_host_non_unit_test_with_host_arg_runner_is_overridden(self):
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
host_test_module(name=MODULE_NAME)
])
@@ -1439,7 +1842,7 @@
original_finder,
host=True,
enabled_features=[
- bazel_mode.Features.EXPERIMENTAL_DEVICE_DRIVEN_TEST
+ bazel_mode.Features.EXPERIMENTAL_HOST_DRIVEN_TEST
]
)
@@ -1447,12 +1850,13 @@
new_finder.test_finder_instance, MODULE_NAME)
self.assertEqual(len(test_infos), 1)
- self.assertEqual(test_infos[0].test_runner, ATEST_TF_RUNNER)
+ self.assertEqual(test_infos[0].test_runner, BAZEL_RUNNER)
def test_disable_device_driven_test_feature_runner_is_preserved(self):
- original_find_method = lambda obj, test_id:(
- self.create_single_test_infos(obj, test_id, test_name=MODULE_NAME,
- runner=ATEST_TF_RUNNER))
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
mod_info = self.create_module_info(modules=[
device_test_module(name=MODULE_NAME)
])
@@ -1466,6 +1870,24 @@
self.assertEqual(len(test_infos), 1)
self.assertEqual(test_infos[0].test_runner, ATEST_TF_RUNNER)
+ def test_disable_host_driven_test_feature_runner_is_preserved(self):
+ def original_find_method(obj, test_id):
+ return self.create_single_test_infos(
+ obj, test_id, test_name=MODULE_NAME,
+ runner=ATEST_TF_RUNNER)
+ mod_info = self.create_module_info(modules=[
+ host_test_module(name=MODULE_NAME)
+ ])
+ original_finder = self.create_finder(mod_info, original_find_method)
+ new_finder = bazel_mode.create_new_finder(
+ mod_info, original_finder, host=True)
+
+ test_infos = new_finder.find_method(
+ new_finder.test_finder_instance, MODULE_NAME)
+
+ self.assertEqual(len(test_infos), 1)
+ self.assertEqual(test_infos[0].test_runner, ATEST_TF_RUNNER)
+
# pylint: disable=unused-argument
def create_single_test_infos(self, obj, test_id, test_name=MODULE_NAME,
runner=ATEST_TF_RUNNER):
@@ -1486,73 +1908,109 @@
modules=[
supported_test_module(name='test1', path='path1'),
],
- test_infos=[],
run_command=run_command,
)
- reqs = runner.get_test_runner_build_reqs()
+ reqs = runner.get_test_runner_build_reqs([])
self.assertFalse(reqs)
def test_query_bazel_test_targets_deps_with_host_arg(self):
- run_command = self.mock_run_command()
+ query_file_contents = StringIO()
+ def get_query_file_content(args: List[str], _) -> str:
+ query_file_contents.write(_get_query_file_content(args))
+ return ''
+
runner = self.create_bazel_test_runner(
modules=[
multi_config(host_unit_test_module(name='test1', path='path1')),
multi_config(host_unit_test_module(name='test2', path='path2')),
+ multi_config(test_module(name='test3', path='path3')),
],
- test_infos = [
- test_info_of('test2'),
- test_info_of('test1'), # Intentionally out of order.
- ],
- run_command=run_command,
+ run_command=get_query_file_content,
host=True,
)
- runner.get_test_runner_build_reqs()
+ runner.get_test_runner_build_reqs([
+ test_info_of('test2'),
+ test_info_of('test1'), # Intentionally out of order.
+ test_info_of('test3'),
+ ])
- call_args = run_command.call_args[0][0]
- self.assertIn(
- 'deps(tests(//path1:test1_host + //path2:test2_host))',
- call_args,
- )
+ self.assertEqual(
+ 'deps(tests(//path1:test1_host + '
+ '//path2:test2_host + '
+ '//path3:test3_host))',
+ query_file_contents.getvalue())
def test_query_bazel_test_targets_deps_without_host_arg(self):
- run_command = self.mock_run_command()
+ query_file_contents = StringIO()
+ def get_query_file_content(args: List[str], _) -> str:
+ query_file_contents.write(_get_query_file_content(args))
+ return ''
+
runner = self.create_bazel_test_runner(
modules=[
multi_config(host_unit_test_module(name='test1', path='path1')),
host_unit_test_module(name='test2', path='path2'),
+ multi_config(test_module(name='test3', path='path3')),
],
- test_infos = [
- test_info_of('test2'),
- test_info_of('test1'),
- ],
- run_command=run_command,
+ run_command=get_query_file_content,
)
- runner.get_test_runner_build_reqs()
+ runner.get_test_runner_build_reqs([
+ test_info_of('test2'),
+ test_info_of('test1'),
+ test_info_of('test3'),
+ ])
- call_args = run_command.call_args[0][0]
- self.assertIn(
- 'deps(tests(//path1:test1_device + //path2:test2_host))',
- call_args,
- )
+ self.assertEqual(
+ 'deps(tests(//path1:test1_device + '
+ '//path2:test2_host + '
+ '//path3:test3_device))',
+ query_file_contents.getvalue())
def test_trim_whitespace_in_bazel_query_output(self):
run_command = self.mock_run_command(
- return_value='\n'.join([' test1 ', 'test2 ', ' ']))
+ return_value='\n'.join([' test1:host ', 'test2:device ', ' ']))
runner = self.create_bazel_test_runner(
modules=[
supported_test_module(name='test1', path='path1'),
],
- test_infos = [test_info_of('test1')],
run_command=run_command,
)
- reqs = runner.get_test_runner_build_reqs()
+ reqs = runner.get_test_runner_build_reqs([test_info_of('test1')])
- self.assertSetEqual({'test1', 'test2'}, reqs)
+ self.assertSetEqual({'test1-host', 'test2-target'}, reqs)
+
+ def test_build_variants_in_bazel_query_output(self):
+ run_command = self.mock_run_command(
+ return_value='\n'.join([
+ 'test1:host',
+ 'test2:host', 'test2:device',
+ 'test3:device',
+ 'test4:host', 'test4:host',
+ ]))
+ runner = self.create_bazel_test_runner(
+ modules=[
+ supported_test_module(name='test1', path='path1'),
+ supported_test_module(name='test2', path='path2'),
+ supported_test_module(name='test3', path='path3'),
+ supported_test_module(name='test4', path='path4'),
+ ],
+ run_command = run_command,
+ )
+
+ reqs = runner.get_test_runner_build_reqs([
+ test_info_of('test1'),
+ test_info_of('test2'),
+ test_info_of('test3'),
+ test_info_of('test4')])
+
+ self.assertSetEqual(
+ {'test1-host', 'test2', 'test3-target', 'test4-host'},
+ reqs)
def test_generate_single_run_command(self):
test_infos = [test_info_of('test1')]
@@ -1563,19 +2021,23 @@
self.assertEqual(1, len(cmd))
def test_generate_run_command_containing_targets_with_host_arg(self):
- test_infos = [test_info_of('test1'), test_info_of('test2')]
+ test_infos = [test_info_of('test1'),
+ test_info_of('test2'),
+ test_info_of('test3')]
runner = self.create_bazel_test_runner(
[
multi_config(host_unit_test_module(name='test1', path='path')),
multi_config(host_unit_test_module(name='test2', path='path')),
+ multi_config(test_module(name='test3', path='path')),
],
- test_infos,
host=True
)
cmd = runner.generate_run_commands(test_infos, {})
- self.assertTokensIn(['//path:test1_host', '//path:test2_host'], cmd[0])
+ self.assertTokensIn(
+ ['//path:test1_host', '//path:test2_host', '//path:test3_host'],
+ cmd[0])
def test_generate_run_command_containing_targets_without_host_arg(self):
test_infos = [test_info_of('test1'), test_info_of('test2')]
@@ -1584,7 +2046,6 @@
multi_config(host_unit_test_module(name='test1', path='path')),
host_unit_test_module(name='test2', path='path'),
],
- test_infos,
)
cmd = runner.generate_run_commands(test_infos, {})
@@ -1624,16 +2085,14 @@
'--test_arg=--world=value',
'--option1=value1'], cmd[0])
- def test_generate_run_command_with_tf_supported_host_arg(self):
+ def test_generate_run_command_with_tf_supported_all_abi_arg(self):
test_infos = [test_info_of('test1')]
runner = self.create_bazel_test_runner_for_tests(test_infos)
- extra_args = {constants.HOST: True}
+ extra_args = {constants.ALL_ABI: True}
cmd = runner.generate_run_commands(test_infos, extra_args)
- self.assertTokensIn(['--test_arg=-n',
- '--test_arg=--prioritize-host-config',
- '--test_arg=--skip-host-arch-check'], cmd[0])
+ self.assertTokensIn(['--test_arg=--all-abi'], cmd[0])
def test_generate_run_command_with_iterations_args(self):
test_infos = [test_info_of('test1')]
@@ -1683,9 +2142,40 @@
'--build_metadata=ab_target=aosp_cf_x86_64_phone-userdebug'
], cmd[0])
+ def test_generate_run_command_with_remote_enabled(self):
+ test_infos = [test_info_of('test1')]
+ extra_args = {
+ constants.BAZEL_MODE_FEATURES: [
+ bazel_mode.Features.EXPERIMENTAL_REMOTE
+ ]
+ }
+ env = {
+ 'ATEST_BAZELRC': '/dir/atest.bazelrc',
+ 'ATEST_BAZEL_REMOTE_CONFIG': 'remote'
+ }
+ runner = self.create_bazel_test_runner_for_tests(
+ test_infos, env=env)
+
+ cmd = runner.generate_run_commands(
+ test_infos,
+ extra_args,
+ )
+
+ self.assertTokensIn([
+ '--config=remote',
+ ], cmd[0])
+
+ def test_generate_run_command_with_verbose_args(self):
+ test_infos = [test_info_of('test1')]
+ runner = self.create_bazel_test_runner_for_tests(test_infos)
+ extra_args = {constants.VERBOSE: True}
+
+ cmd = runner.generate_run_commands(test_infos, extra_args)
+
+ self.assertTokensIn(['--test_output=all'], cmd[0])
+
def create_bazel_test_runner(self,
modules,
- test_infos,
run_command=None,
host=False,
build_metadata=None,
@@ -1693,7 +2183,6 @@
return bazel_mode.BazelTestRunner(
'result_dir',
mod_info=create_module_info(modules),
- test_infos=test_infos,
src_top=Path('/src'),
workspace_path=Path('/src/workspace'),
run_command=run_command or self.mock_run_command(),
@@ -1709,11 +2198,13 @@
return self.create_bazel_test_runner(
modules=[supported_test_module(name=t.test_name, path='path')
for t in test_infos],
- test_infos=test_infos,
build_metadata=build_metadata,
env=env
)
+ def create_completed_process(self, args, returncode, stdout):
+ return subprocess.CompletedProcess(args, returncode, stdout)
+
def mock_run_command(self, **kwargs):
return mock.create_autospec(bazel_mode.default_run_command, **kwargs)
@@ -1759,5 +2250,13 @@
class_name, frozenset(methods) if methods else frozenset())
+def _get_query_file_content(args: List[str]) -> str:
+ for arg in args:
+ if arg.startswith('--query_file='):
+ return Path(arg.split('=')[1]).read_text()
+
+ raise Exception('Query file not found!')
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/atest/bug_detector.py b/atest/bug_detector.py
index b286140..cfd114b 100644
--- a/atest/bug_detector.py
+++ b/atest/bug_detector.py
@@ -21,10 +21,10 @@
import json
import os
-import atest_utils
-import constants
+from atest import atest_utils
+from atest import constants
-from metrics import metrics_utils
+from atest.metrics import metrics_utils
_META_FILE = os.path.join(atest_utils.get_misc_dir(),
'.config', 'asuite', 'atest_history.json')
diff --git a/atest/bug_detector_unittest.py b/atest/bug_detector_unittest.py
index dd9e8f6..7d26f75 100755
--- a/atest/bug_detector_unittest.py
+++ b/atest/bug_detector_unittest.py
@@ -25,9 +25,9 @@
from unittest import mock
-import bug_detector
-import constants
-import unittest_constants as uc
+from atest import bug_detector
+from atest import constants
+from atest import unittest_constants as uc
TEST_DICT = {
'test1': {
diff --git a/atest/cli_translator.py b/atest/cli_translator.py
index a7554c3..c67587d 100644
--- a/atest/cli_translator.py
+++ b/atest/cli_translator.py
@@ -27,20 +27,23 @@
import sys
import time
-from typing import List
+from dataclasses import dataclass
+from pathlib import Path
+from typing import List, Set
-import atest_error
-import atest_utils
-import bazel_mode
-import constants
-import test_finder_handler
-import test_mapping
+from atest import atest_error
+from atest import atest_utils
+from atest import bazel_mode
+from atest import constants
+from atest import test_finder_handler
+from atest import test_mapping
-from atest_enum import DetectType, ExitCode
-from metrics import metrics
-from metrics import metrics_utils
-from test_finders import module_finder
-from test_finders import test_finder_utils
+from atest.atest_enum import DetectType, ExitCode
+from atest.metrics import metrics
+from atest.metrics import metrics_utils
+from atest.test_finders import module_finder
+from atest.test_finders import test_info
+from atest.test_finders import test_finder_utils
FUZZY_FINDER = 'FUZZY'
CACHE_FINDER = 'CACHE'
@@ -50,8 +53,13 @@
_COMMENTS_RE = re.compile(r'(?m)[\s\t]*(#|//).*|(\".*?\")')
_COMMENTS = frozenset(['//', '#'])
+@dataclass
+class TestIdentifier:
+ """Class that stores test and the corresponding mainline modules (if any)."""
+ test_name: str
+ module_names: List[str]
+ binary_names: List[str]
-#pylint: disable=no-self-use
class CLITranslator:
"""
CLITranslator class contains public method translate() and some private
@@ -82,6 +90,7 @@
bazel_mode_features: List of args.bazel_mode_features.
"""
self.mod_info = mod_info
+ self.root_dir = os.getenv(constants.ANDROID_BUILD_TOP, os.sep)
self._bazel_mode = bazel_mode_enabled
self._bazel_mode_features = bazel_mode_features or []
self._host = host
@@ -89,13 +98,14 @@
self.msg = ''
if print_cache_msg:
self.msg = ('(Test info has been cached for speeding up the next '
- 'run, if test info need to be updated, please add -c '
+ 'run, if test info needs to be updated, please add -c '
'to clean the old cache.)')
+ self.fuzzy_search = True
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
- def _find_test_infos(self, test, tm_test_detail):
+ def _find_test_infos(self, test, tm_test_detail) -> Set[test_info.TestInfo]:
"""Return set of TestInfos based on a given test.
Args:
@@ -112,9 +122,16 @@
test_finders = []
test_info_str = ''
find_test_err_msg = None
- test_name, mainline_modules = atest_utils.parse_mainline_modules(test)
- if not self._verified_mainline_modules(test_name, mainline_modules):
+ test_identifier = parse_test_identifier(test)
+ test_name = test_identifier.test_name
+ if not self._verified_mainline_modules(test_identifier):
return test_infos
+ if self.mod_info and test in self.mod_info.roboleaf_tests:
+ # Roboleaf bazel will discover and build dependencies so we can
+ # skip finding dependencies.
+ print(f'Found \'{atest_utils.colorize(test, constants.GREEN)}\''
+ ' as ROBOLEAF_CONVERTED_MODULE')
+ return [self.mod_info.roboleaf_tests[test]]
find_methods = test_finder_handler.get_find_methods_for_test(
self.mod_info, test)
if self._bazel_mode:
@@ -135,34 +152,42 @@
find_test_err_msg = e
if found_test_infos:
finder_info = finder.finder_info
- for test_info in found_test_infos:
+ for t_info in found_test_infos:
test_deps = set()
if self.mod_info:
test_deps = self.mod_info.get_install_module_dependency(
- test_info.test_name)
+ t_info.test_name)
logging.debug('(%s) Test dependencies: %s',
- test_info.test_name, test_deps)
+ t_info.test_name, test_deps)
if tm_test_detail:
- test_info.data[constants.TI_MODULE_ARG] = (
+ t_info.data[constants.TI_MODULE_ARG] = (
tm_test_detail.options)
- test_info.from_test_mapping = True
- test_info.host = tm_test_detail.host
+ t_info.from_test_mapping = True
+ t_info.host = tm_test_detail.host
if finder_info != CACHE_FINDER:
- test_info.test_finder = finder_info
+ t_info.test_finder = finder_info
+ mainline_modules = test_identifier.module_names
if mainline_modules:
- test_info.test_name = test
+ t_info.test_name = test
+ # TODO(b/261607500): Replace usages of raw_test_name
+ # with test_name once we can ensure that it doesn't
+ # break any code that expects Mainline modules in the
+ # string.
+ t_info.raw_test_name = test_name
# TODO: remove below statement when soong can also
# parse TestConfig and inject mainline modules information
# to module-info.
- test_info.mainline_modules = mainline_modules
+ for mod in mainline_modules:
+ t_info.add_mainline_module(mod)
+
# Only add dependencies to build_targets when they are in
# module info
test_deps_in_mod_info = [
test_dep for test_dep in test_deps
if self.mod_info.is_module(test_dep)]
- test_info.build_targets = set(test_info.build_targets)
- test_info.build_targets.update(test_deps_in_mod_info)
- test_infos.add(test_info)
+ for dep in test_deps_in_mod_info:
+ t_info.add_build_target(dep)
+ test_infos.add(t_info)
test_found = True
print("Found '%s' as %s" % (
atest_utils.colorize(test, constants.GREEN),
@@ -173,11 +198,14 @@
test_info_str = ','.join([str(x) for x in found_test_infos])
break
if not test_found:
- f_results = self._fuzzy_search_and_msg(test, find_test_err_msg)
- if f_results:
- test_infos.update(f_results)
- test_found = True
- test_finders.append(FUZZY_FINDER)
+ print('No test found for: {}'.format(
+ atest_utils.colorize(test, constants.RED)))
+ if self.fuzzy_search:
+ f_results = self._fuzzy_search_and_msg(test, find_test_err_msg)
+ if f_results:
+ test_infos.update(f_results)
+ test_found = True
+ test_finders.append(FUZZY_FINDER)
metrics.FindTestFinishEvent(
duration=metrics_utils.convert_duration(
time.time() - test_find_starts),
@@ -194,7 +222,7 @@
print(self.msg)
return test_infos
- def _verified_mainline_modules(self, test, mainline_modules):
+ def _verified_mainline_modules(self, test_identifier: TestIdentifier) -> bool:
""" Verify the test with mainline modules is acceptable.
The test must be a module and mainline modules are in module-info.
@@ -203,24 +231,39 @@
and no duplication.
Args:
- test: A string representing test references
- mainline_modules: A string of mainline_modules.
+ test_identifier: a TestIdentifier object.
Returns:
True if this test is acceptable. Otherwise, print the reason and
return False.
"""
- if not mainline_modules:
+ mainline_binaries = test_identifier.binary_names
+ if not mainline_binaries:
return True
+
+ def mark_red(items):
+ return atest_utils.colorize(items, constants.RED)
+ test = test_identifier.test_name
if not self.mod_info.is_module(test):
- print('Error: "%s" is not a testable module.'
- % atest_utils.colorize(test, constants.RED))
+ print('Error: "{}" is not a testable module.'.format(
+ mark_red(test)))
return False
- if not self.mod_info.has_mainline_modules(test, mainline_modules):
- print('Error: Mainline modules "%s" were not defined for %s in '
- 'neither build file nor test config.'
- % (atest_utils.colorize(mainline_modules, constants.RED),
- atest_utils.colorize(test, constants.RED)))
+ # Exit earlier if the given mainline modules are unavailable in the
+ # branch.
+ unknown_modules = [module for module in test_identifier.module_names
+ if not self.mod_info.is_module(module)]
+ if unknown_modules:
+ print('Error: Cannot find {} in module info!'.format(
+ mark_red(', '.join(unknown_modules))))
+ return False
+ # Exit earlier if Atest cannot find relationship between the test and
+ # the mainline binaries.
+ mainline_binaries = test_identifier.binary_names
+ if not self.mod_info.has_mainline_modules(test, mainline_binaries):
+ print('Error: Mainline modules "{}" were not defined for {} in '
+ 'neither build file nor test config.'.format(
+ mark_red(', '.join(mainline_binaries)),
+ mark_red(test)))
return False
return True
@@ -234,8 +277,6 @@
Returns:
A list of TestInfos if found, otherwise None.
"""
- print('No test found for: %s' %
- atest_utils.colorize(test, constants.RED))
# Currently we focus on guessing module names. Append names on
# results if more finders support fuzzy searching.
if atest_utils.has_chars(test, TESTNAME_CHARS):
@@ -357,8 +398,10 @@
not test_mapping.is_match_file_patterns(
test_mapping_file, test)):
continue
+ test_name = parse_test_identifier(
+ test['name']).test_name
test_mod_info = self.mod_info.name_to_module_info.get(
- atest_utils.parse_mainline_modules(test['name'])[0])
+ test_name)
if not test_mod_info :
print('WARNING: %s is not a valid build target and '
'may not be discoverable by TreeHugger. If you '
@@ -369,8 +412,9 @@
'if the test module is not built for your '
'current lunch target.\n' %
atest_utils.colorize(test['name'], constants.RED))
- elif not any(x in test_mod_info['compatibility_suites'] for
- x in constants.TEST_MAPPING_SUITES):
+ elif not any(
+ x in test_mod_info.get('compatibility_suites', []) for
+ x in constants.TEST_MAPPING_SUITES):
print('WARNING: Please add %s to either suite: %s for '
'this TEST_MAPPING file to work with TreeHugger.' %
(atest_utils.colorize(test['name'],
@@ -459,8 +503,7 @@
if include_subdirs:
test_mapping_files.update(atest_utils.find_files(path, file_name))
# Include all possible TEST_MAPPING files in parent directories.
- root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, os.sep)
- while path not in (root_dir, os.sep):
+ while path not in (self.root_dir, os.sep):
path = os.path.dirname(path)
test_mapping_file = os.path.join(path, file_name)
if os.path.exists(test_mapping_file):
@@ -498,12 +541,6 @@
return tests, all_tests
- def _gather_build_targets(self, test_infos):
- targets = set()
- for test_info in test_infos:
- targets |= test_info.build_targets
- return targets
-
def _get_test_mapping_tests(self, args, exit_if_no_test_found=True):
"""Find the tests in TEST_MAPPING files.
@@ -610,22 +647,41 @@
A tuple with set of build_target strings and list of TestInfos.
"""
tests = args.tests
- # Test details from TEST_MAPPING files
- test_details_list = None
- # Loading Host Unit Tests.
- host_unit_tests = []
+ # Disable fuzzy searching when running with test mapping related args.
+ self.fuzzy_search = args.fuzzy_search
detect_type = DetectType.TEST_WITH_ARGS
if not args.tests or atest_utils.is_test_mapping(args):
+ self.fuzzy_search = False
detect_type = DetectType.TEST_NULL_ARGS
start = time.time()
- if not args.tests:
+ # Not including host unit tests if user specify --test-mapping or
+ # --smart-testing-local arg.
+ host_unit_tests = []
+ if not any((
+ args.tests, args.test_mapping, args.smart_testing_local)):
logging.debug('Finding Host Unit Tests...')
- path = os.path.relpath(
- os.path.realpath(''),
- os.environ.get(constants.ANDROID_BUILD_TOP, ''))
host_unit_tests = test_finder_utils.find_host_unit_tests(
- self.mod_info, path)
+ self.mod_info,
+ str(Path(os.getcwd()).relative_to(self.root_dir)))
logging.debug('Found host_unit_tests: %s', host_unit_tests)
+ if args.smart_testing_local:
+ modified_files = set()
+ if args.tests:
+ for test_path in args.tests:
+ if not Path(test_path).is_dir():
+ atest_utils.colorful_print(
+ f'Found invalid dir {test_path}'
+ r'Please specify test paths for probing.',
+ constants.RED)
+ sys.exit(ExitCode.INVALID_SMART_TESTING_PATH)
+ modified_files |= atest_utils.get_modified_files(test_path)
+ else:
+ modified_files = atest_utils.get_modified_files(os.getcwd())
+ logging.info('Found modified files: %s...',
+ ', '.join(modified_files))
+ tests = list(modified_files)
+ # Test details from TEST_MAPPING files
+ test_details_list = None
if atest_utils.is_test_mapping(args):
if args.enable_file_patterns:
self.enable_file_patterns = True
@@ -657,12 +713,25 @@
metrics.LocalDetectEvent(
detect_type=detect_type,
result=int(finished_time))
- for test_info in test_infos:
- logging.debug('%s\n', test_info)
- build_targets = self._gather_build_targets(test_infos)
+ for t_info in test_infos:
+ logging.debug('%s\n', t_info)
if not self._bazel_mode:
if host_unit_tests or self._has_host_unit_test(tests):
msg = (r"It is recommended to run host unit tests with "
r"--bazel-mode.")
atest_utils.colorful_print(msg, constants.YELLOW)
- return build_targets, test_infos
+ return test_infos
+
+
+# TODO: (b/265359291) Raise Exception when the brackets are not in pair.
+def parse_test_identifier(test: str) -> TestIdentifier:
+ """Get mainline module names and binaries information."""
+ result = constants.TEST_WITH_MAINLINE_MODULES_RE.match(test)
+ if not result:
+ return TestIdentifier(test, [], [])
+ test_name = result.group('test')
+ mainline_binaries = result.group('mainline_modules').split('+')
+ mainline_modules = [re.sub(atest_utils.MAINLINE_MODULES_EXT_RE, '', m)
+ for m in mainline_binaries]
+ logging.debug('mainline_modules: %s', mainline_modules)
+ return TestIdentifier(test_name, mainline_modules, mainline_binaries)
diff --git a/atest/cli_translator_unittest.py b/atest/cli_translator_unittest.py
index 172f4b1..4b61f58 100755
--- a/atest/cli_translator_unittest.py
+++ b/atest/cli_translator_unittest.py
@@ -29,19 +29,20 @@
from io import StringIO
from unittest import mock
-import atest_utils
-import cli_translator as cli_t
-import constants
-import module_info
-import test_finder_handler
-import test_mapping
-import unittest_constants as uc
-import unittest_utils
+from atest import atest_arg_parser
+from atest import atest_utils
+from atest import cli_translator as cli_t
+from atest import constants
+from atest import module_info
+from atest import test_finder_handler
+from atest import test_mapping
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from metrics import metrics
-from test_finders import module_finder
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
+from atest.metrics import metrics
+from atest.test_finders import module_finder
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
# TEST_MAPPING related consts
@@ -61,6 +62,7 @@
SEARCH_DIR_RE = re.compile(r'^find ([^ ]*).*$')
BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
+HOST_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/host/linux-x86')
#pylint: disable=unused-argument
def gettestinfos_side_effect(test_names, test_mapping_test_details=None,
@@ -80,7 +82,6 @@
#pylint: disable=protected-access
-#pylint: disable=no-self-use
class CLITranslatorUnittests(unittest.TestCase):
"""Unit tests for cli_t.py"""
@@ -89,7 +90,9 @@
self.ctr = cli_t.CLITranslator()
# Create a mock of args.
- self.args = mock.Mock
+ parser = atest_arg_parser.AtestArgParser()
+ parser.add_atest_args()
+ self.args = parser.parse_args()
self.args.tests = []
# Test mapping related args
self.args.test_mapping = False
@@ -229,8 +232,6 @@
test_detail2.options,
test_info.data[constants.TI_MODULE_ARG])
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_finder.ModuleFinder, 'get_fuzzy_searching_results')
@mock.patch.object(metrics, 'FindTestFinishEvent')
@mock.patch.object(test_finder_handler, 'get_find_methods_for_test')
@@ -238,7 +239,8 @@
self, mock_getfindmethods, _metrics, mock_getfuzzyresults,):
"""Test _get_test_infos method."""
mod_info = module_info.ModuleInfo(
- module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE))
+ module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE),
+ index_dir=HOST_OUT_DIR)
ctr = cli_t.CLITranslator(mod_info=mod_info)
null_test_info = set()
mock_getfindmethods.return_value = []
@@ -246,32 +248,40 @@
unittest_utils.assert_strict_equal(
self, ctr._get_test_infos('not_exist_module'), null_test_info)
+ @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
+ return_value=set())
@mock.patch.object(cli_t.CLITranslator, '_has_host_unit_test')
@mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
side_effect=gettestinfos_side_effect)
- def test_translate_class(self, _info, host_unit_tests):
+ def test_translate_class(self, _info, host_unit_tests, _find):
"""Test translate method for tests by class name."""
# Check that we can find a class.
host_unit_tests.return_value = False
self.args.tests = [uc.CLASS_NAME]
self.args.host_unit_test_only = False
- targets, test_infos = self.ctr.translate(self.args)
+ test_infos = self.ctr.translate(self.args)
unittest_utils.assert_strict_equal(
- self, targets, uc.CLASS_BUILD_TARGETS)
+ self,
+ _gather_build_targets(test_infos),
+ uc.CLASS_BUILD_TARGETS)
unittest_utils.assert_strict_equal(self, test_infos, {uc.CLASS_INFO})
+ @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
+ return_value=set())
@mock.patch.object(cli_t.CLITranslator, '_has_host_unit_test')
@mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
side_effect=gettestinfos_side_effect)
- def test_translate_module(self, _info, host_unit_tests):
+ def test_translate_module(self, _info, host_unit_tests, _find):
"""Test translate method for tests by module or class name."""
# Check that we get all the build targets we expect.
host_unit_tests.return_value = []
self.args.tests = [uc.MODULE_NAME, uc.CLASS_NAME]
self.args.host_unit_test_only = False
- targets, test_infos = self.ctr.translate(self.args)
+ test_infos = self.ctr.translate(self.args)
unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+ self,
+ _gather_build_targets(test_infos),
+ uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
uc.CLASS_INFO})
@@ -291,9 +301,11 @@
self.args.host = False
self.args.host_unit_test_only = False
host_unit_tests.return_value = False
- targets, test_infos = self.ctr.translate(self.args)
+ test_infos = self.ctr.translate(self.args)
unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+ self,
+ _gather_build_targets(test_infos),
+ uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
uc.CLASS_INFO})
@@ -311,14 +323,17 @@
self.args.test_mapping = True
self.args.host = False
host_unit_tests.return_value = False
- targets, test_infos = self.ctr.translate(self.args)
+ test_infos = self.ctr.translate(self.args)
unittest_utils.assert_strict_equal(
- self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+ self,
+ _gather_build_targets(test_infos),
+ uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
uc.CLASS_INFO})
def test_find_tests_by_test_mapping_presubmit(self):
"""Test _find_tests_by_test_mapping method to locate presubmit tests."""
+ # TODO: (b/264015241) Stop mocking build variables.
os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
with mock.patch.dict('os.environ', os_environ_mock, clear=True):
tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -335,6 +350,7 @@
def test_find_tests_by_test_mapping_postsubmit(self):
"""Test _find_tests_by_test_mapping method to locate postsubmit tests.
"""
+ # TODO: (b/264015241) Stop mocking build variables.
os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
with mock.patch.dict('os.environ', os_environ_mock, clear=True):
tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -353,6 +369,7 @@
def test_find_tests_by_test_mapping_all_group(self):
"""Test _find_tests_by_test_mapping method to locate postsubmit tests.
"""
+ # TODO: (b/264015241) Stop mocking build variables.
os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
with mock.patch.dict('os.environ', os_environ_mock, clear=True):
tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -371,6 +388,7 @@
def test_find_tests_by_test_mapping_include_subdir(self):
"""Test _find_tests_by_test_mapping method to include sub directory."""
+ # TODO: (b/264015241) Stop mocking build variables.
os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
with mock.patch.dict('os.environ', os_environ_mock, clear=True):
tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -418,13 +436,12 @@
self.assertEqual(test_mapping_dict, test_mapping_dict_gloden)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_info.ModuleInfo, 'get_testable_modules')
def test_extract_testable_modules_by_wildcard(self, mock_mods):
"""Test _extract_testable_modules_by_wildcard method."""
mod_info = module_info.ModuleInfo(
- module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE))
+ module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE),
+ index_dir=HOST_OUT_DIR)
ctr = cli_t.CLITranslator(mod_info=mod_info)
mock_mods.return_value = ['test1', 'test2', 'test3', 'test11',
'Test22', 'Test100', 'aTest101']
@@ -444,14 +461,16 @@
self.assertEqual(ctr._extract_testable_modules_by_wildcard(expr3),
result3)
+ @mock.patch.object(cli_t.CLITranslator, '_has_host_unit_test',
+ return_value=True)
@mock.patch.object(test_finder_utils, 'find_host_unit_tests',
return_value=[uc.HOST_UNIT_TEST_NAME_1,
uc.HOST_UNIT_TEST_NAME_2])
@mock.patch.object(cli_t.CLITranslator, '_find_tests_by_test_mapping')
@mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
side_effect=gettestinfos_side_effect)
- def test_translate_test_mapping_host_unit_test(self, _info, mock_testmapping,
- _find_unit_tests):
+ def test_translate_test_mapping_host_unit_test(
+ self, _info, mock_testmapping, _find_unit_tests, _has_host_unit_test):
"""Test translate method for tests belong to host unit tests."""
# Check that test mappings feeds into get_test_info properly.
test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
@@ -460,7 +479,7 @@
self.args.tests = []
self.args.host = False
self.args.host_unit_test_only = False
- _, test_infos = self.ctr.translate(self.args)
+ test_infos = self.ctr.translate(self.args)
unittest_utils.assert_strict_equal(self,
test_infos,
{uc.MODULE_INFO,
@@ -468,5 +487,84 @@
uc.MODULE_INFO_HOST_1,
uc.MODULE_INFO_HOST_2})
+ @mock.patch.object(cli_t.CLITranslator, '_has_host_unit_test',
+ return_value=True)
+ @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
+ return_value=[uc.HOST_UNIT_TEST_NAME_1,
+ uc.HOST_UNIT_TEST_NAME_2])
+ @mock.patch.object(cli_t.CLITranslator, '_find_tests_by_test_mapping')
+ @mock.patch.object(cli_t.CLITranslator, '_get_test_infos',
+ side_effect=gettestinfos_side_effect)
+ def test_translate_test_mapping_without_host_unit_test(
+ self, _info, mock_testmapping, _find_unit_tests, _has_host_unit_test):
+ """Test translate method not using host unit tests if test_mapping arg .
+ """
+ # Check that test mappings feeds into get_test_info properly.
+ test_detail1 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST)
+ test_detail2 = test_mapping.TestDetail(uc.TEST_MAPPING_TEST_WITH_OPTION)
+ mock_testmapping.return_value = ([test_detail1, test_detail2], None)
+ self.args.tests = []
+ self.args.host = False
+ self.args.test_mapping = True
+ self.args.host_unit_test_only = False
+ test_infos = self.ctr.translate(self.args)
+ unittest_utils.assert_strict_equal(
+ self,
+ test_infos,
+ {uc.MODULE_INFO, uc.CLASS_INFO})
+
+
+class ParseTestIdentifierTest(unittest.TestCase):
+ """Test parse_test_identifier with different test names."""
+
+ def test_no_mainline_modules(self):
+ """non-mainline module testing."""
+ given = 'testName'
+
+ identifier = cli_t.parse_test_identifier(given)
+
+ self.assertEqual('testName', identifier.test_name)
+ self.assertEqual([], identifier.module_names)
+ self.assertEqual([], identifier.binary_names)
+
+ def test_single_mainline_module(self):
+ """only one mainline module."""
+ given = 'testName[Module1.apk]'
+
+ identifier = cli_t.parse_test_identifier(given)
+
+ self.assertEqual('testName', identifier.test_name)
+ self.assertEqual(['Module1'], identifier.module_names)
+ self.assertEqual(['Module1.apk'], identifier.binary_names)
+
+ def test_multiple_mainline_modules(self):
+ """multiple mainline modules."""
+ given = 'testName[Module1.apk+Module2.apex]'
+
+ identifier = cli_t.parse_test_identifier(given)
+
+ self.assertEqual('testName', identifier.test_name)
+ self.assertEqual(
+ ['Module1', 'Module2'], identifier.module_names)
+ self.assertEqual(
+ ['Module1.apk', 'Module2.apex'], identifier.binary_names)
+
+ def test_missing_closing_bracket(self):
+ """test the brackets are not in pair"""
+ given = 'testName[Module1.apk+Module2.apex'
+
+ identifier = cli_t.parse_test_identifier(given)
+
+ self.assertEqual(given, identifier.test_name)
+ self.assertEqual([], identifier.module_names)
+ self.assertEqual([], identifier.binary_names)
+
if __name__ == '__main__':
unittest.main()
+
+
+def _gather_build_targets(test_infos):
+ targets = set()
+ for t_info in test_infos:
+ targets |= t_info.build_targets
+ return targets
diff --git a/atest/constants.py b/atest/constants.py
index 3d7a6cf..b9d0b5d 100644
--- a/atest/constants.py
+++ b/atest/constants.py
@@ -18,7 +18,7 @@
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
-from constants_default import *
+from atest.constants_default import *
# Now try to import the various constant files outside this repo to overwrite
diff --git a/atest/constants_default.py b/atest/constants_default.py
index e3122a0..34e2f77 100644
--- a/atest/constants_default.py
+++ b/atest/constants_default.py
@@ -45,6 +45,7 @@
SHARDING = 'SHARDING'
ALL_ABI = 'ALL_ABI'
HOST = 'HOST'
+DEVICE_ONLY = 'DEVICE_ONLY'
CUSTOM_ARGS = 'CUSTOM_ARGS'
DRY_RUN = 'DRY_RUN'
ANDROID_SERIAL = 'ANDROID_SERIAL'
@@ -70,8 +71,11 @@
ENABLE_DEVICE_PREPARER = 'ENABLE_DEVICE_PREPARER'
ANNOTATION_FILTER = 'ANNOTATION_FILTER'
BAZEL_ARG = 'BAZEL_ARG'
+COVERAGE = 'COVERAGE'
TEST_FILTER = 'TEST_FILTER'
TEST_TIMEOUT = 'TEST_TIMEOUT'
+VERBOSE = 'VERBOSE'
+LD_LIBRARY_PATH = 'LD_LIBRARY_PATH'
# Robolectric Types:
ROBOTYPE_MODERN = 1
@@ -101,8 +105,15 @@
MODULE_IS_UNIT_TEST = 'is_unit_test'
MODULE_SHARED_LIBS = 'shared_libs'
MODULE_RUNTIME_DEPS = 'runtime_dependencies'
+MODULE_STATIC_DEPS = 'static_dependencies'
MODULE_DATA_DEPS = 'data_dependencies'
MODULE_SUPPORTED_VARIANTS = 'supported_variants'
+MODULE_LIBS = 'libs'
+MODULE_STATIC_LIBS = 'static_libs'
+MODULE_HOST_DEPS = 'host_dependencies'
+MODULE_TARGET_DEPS = 'target_dependencies'
+MODULE_TEST_OPTIONS_TAGS = 'test_options_tags'
+MODULE_INFO_ID = 'module_info_id'
# Env constants
@@ -186,13 +197,6 @@
INTERNAL = 'INTERNAL_RUN'
INTERNAL_EMAIL = '@google.com'
INTERNAL_HOSTNAME = ['.google.com', 'c.googlers.com']
-CONTENT_LICENSES_URL = 'https://source.android.com/setup/start/licenses'
-CONTRIBUTOR_AGREEMENT_URL = {
- 'INTERNAL': 'https://cla.developers.google.com/',
- 'EXTERNAL': 'https://opensource.google.com/docs/cla/'
-}
-PRIVACY_POLICY_URL = 'https://policies.google.com/privacy'
-TERMS_SERVICE_URL = 'https://policies.google.com/terms'
TOOL_NAME = 'atest'
SUB_TOOL_NAME = ''
USER_FROM_TOOL = 'USER_FROM_TOOL'
@@ -237,7 +241,6 @@
# Atest index path and relative dirs/caches.
INDEX_DIR = os.path.join(os.getenv(ANDROID_HOST_OUT, ''), 'indexes')
LOCATE_CACHE = os.path.join(INDEX_DIR, 'plocate.db')
-LOCATE_CACHE_MD5 = os.path.join(INDEX_DIR, 'plocate.md5')
BUILDFILES_MD5 = os.path.join(INDEX_DIR, 'buildfiles.md5')
INT_INDEX = os.path.join(INDEX_DIR, 'integration.idx')
CLASS_INDEX = os.path.join(INDEX_DIR, 'classes.idx')
@@ -246,7 +249,6 @@
QCLASS_INDEX = os.path.join(INDEX_DIR, 'fqcn.idx')
MODULE_INDEX = 'modules.idx'
MODULE_INFO_MD5 = 'module-info.md5'
-VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
# Regeular Expressions
CC_EXT_RE = re.compile(r'.*\.(cc|cpp)$')
@@ -257,8 +259,6 @@
r'(?P<test_name>\w+)\s*,\s*(?P<method_name>\w+)\)\s*\{')
# Used by locate command.
CC_GREP_RE = r'^\s*(TYPED_TEST(_P)*|TEST(_F|_P)*)\s*\(\w+,'
-# Used by find command.
-CC_GREP_KWRE = r'^\s*(TYPED_TEST(_P)*|TEST(_F|_P)*)\s*\({2},'
# e.g. /path/to/Javafile.java:package com.android.settings.accessibility
# grab the path, Javafile(class) and com.android.settings.accessibility(package)
CLASS_OUTPUT_RE = re.compile(r'(?P<java_path>.*/(?P<class>[A-Z]\w+)\.\w+)[:].*')
@@ -270,18 +270,13 @@
ATEST_RESULT_ROOT = '/tmp/atest_result'
ATEST_TEST_RECORD_PROTO = 'test_record.proto'
LATEST_RESULT_FILE = os.path.join(ATEST_RESULT_ROOT, 'LATEST', 'test_result')
-ACLOUD_REPORT_FILE_RE = re.compile(r'.*--report[_-]file(=|\s+)(?P<report_file>[\w/.]+)')
TEST_WITH_MAINLINE_MODULES_RE = re.compile(r'(?P<test>.*)\[(?P<mainline_modules>.*'
r'[.](apk|apks|apex))\]$')
-# Tests list which need vts_kernel_tests as test dependency
-REQUIRED_KERNEL_TEST_MODULES = [
+# Tests list which need vts_ltp_tests as test dependency
+REQUIRED_LTP_TEST_MODULES = [
'vts_ltp_test_arm',
'vts_ltp_test_arm_64',
- 'vts_linux_kselftest_arm_32',
- 'vts_linux_kselftest_arm_64',
- 'vts_linux_kselftest_x86_32',
- 'vts_linux_kselftest_x86_64',
'vts_ltp_test_arm_64_lowmem',
'vts_ltp_test_arm_64_hwasan',
'vts_ltp_test_arm_64_lowmem_hwasan',
@@ -289,6 +284,13 @@
'vts_ltp_test_x86_64',
'vts_ltp_test_x86'
]
+# Tests list which need vts_kselftest_tests as test dependency
+REQUIRED_KSELFTEST_TEST_MODULES = [
+ 'vts_linux_kselftest_arm_32',
+ 'vts_linux_kselftest_arm_64',
+ 'vts_linux_kselftest_x86_32',
+ 'vts_linux_kselftest_x86_64',
+]
# XTS suite set dependency.
SUITE_DEPS = {}
@@ -376,15 +378,15 @@
'not_' + TF_PARA_MULTIABI}
# ATest integration test related constants.
-INTEGRATION_TESTS = [os.path.join(
- os.environ.get(ANDROID_BUILD_TOP, os.getcwd()),
- 'tools/asuite/atest/test_plans/INTEGRATION_TESTS')]
VERIFY_DATA_PATH = os.path.join(
os.environ.get(ANDROID_BUILD_TOP, os.getcwd()),
'tools/asuite/atest/test_data/test_commands.json')
VERIFY_ENV_PATH = os.path.join(
os.environ.get(ANDROID_BUILD_TOP, os.getcwd()),
'tools/asuite/atest/test_data/test_environ.json')
+RUNNER_COMMAND_PATH = os.path.join(
+ os.environ.get(ANDROID_BUILD_TOP, os.getcwd()),
+ 'tools/asuite/atest/test_data/runner_commands.json')
# Gtest Types
GTEST_REGULAR = 'regular native test'
@@ -402,3 +404,6 @@
REQUIRE_DEVICES_MSG = (
'Please ensure there is at least one connected device via:\n'
' $ adb devices')
+
+# Default shard num.
+SHARD_NUM = 2
diff --git a/atest/coverage/Android.bp b/atest/coverage/Android.bp
new file mode 100644
index 0000000..0e6c8f1
--- /dev/null
+++ b/atest/coverage/Android.bp
@@ -0,0 +1,15 @@
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_binary_host {
+ name: "jacoco_to_lcov_converter",
+ main_class: "com/android/jacocolcov/JacocoToLcovConverter",
+ srcs: ["com/android/jacocolcov/JacocoToLcovConverter.java"],
+ static_libs: [
+ "commons-cli-1.2",
+ "error_prone_annotations",
+ "guava",
+ "jacocoagent",
+ ],
+}
diff --git a/atest/coverage/com/android/jacocolcov/JacocoToLcovConverter.java b/atest/coverage/com/android/jacocolcov/JacocoToLcovConverter.java
new file mode 100644
index 0000000..93abb14
--- /dev/null
+++ b/atest/coverage/com/android/jacocolcov/JacocoToLcovConverter.java
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.jacocolcov;
+
+import static org.jacoco.core.analysis.ICounter.EMPTY;
+import static org.jacoco.core.analysis.ISourceNode.UNKNOWN_LINE;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.SetMultimap;
+import com.google.errorprone.annotations.FormatMethod;
+import com.google.errorprone.annotations.FormatString;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.jacoco.core.analysis.Analyzer;
+import org.jacoco.core.analysis.CoverageBuilder;
+import org.jacoco.core.analysis.IClassCoverage;
+import org.jacoco.core.analysis.ILine;
+import org.jacoco.core.analysis.IMethodCoverage;
+import org.jacoco.core.tools.ExecFileLoader;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.stream.Stream;
+
+/** Converts JaCoCo measurements and class files into a lcov-format coverage report. */
+final class JacocoToLcovConverter {
+
+ // Command line flags.
+ private static final String CLASSFILES_OPTION = "classfiles";
+ private static final String SOURCEPATH_OPTION = "sourcepath";
+ private static final String OUTPUT_OPTION = "o";
+ private static final String VERBOSE_OPTION = "v";
+ private static final String STRICT_OPTION = "strict";
+
+ public static void main(String[] args) {
+ Options options = new Options();
+
+ options.addOption(
+ OptionBuilder.hasArg()
+ .withArgName("<path>")
+ .withDescription("location of the Java class files")
+ .create(CLASSFILES_OPTION));
+
+ options.addOption(
+ OptionBuilder.hasArg()
+ .withArgName("<path>")
+ .withDescription("location of the source files")
+ .create(SOURCEPATH_OPTION));
+
+ options.addOption(
+ OptionBuilder.isRequired()
+ .hasArg()
+ .withArgName("<destfile>")
+ .withDescription("location to write lcov data")
+ .create(OUTPUT_OPTION));
+
+ options.addOption(OptionBuilder.withDescription("verbose logging").create(VERBOSE_OPTION));
+
+ options.addOption(
+ OptionBuilder.withDescription("fail if any error is encountered")
+ .create(STRICT_OPTION));
+
+ CommandLineParser parser = new PosixParser();
+ CommandLine cmd;
+
+ try {
+ cmd = parser.parse(options, args);
+ } catch (ParseException e) {
+ logError("error parsing command line options: %s", e.getMessage());
+ System.exit(1);
+ return;
+ }
+
+ String[] classFiles = cmd.getOptionValues(CLASSFILES_OPTION);
+ String[] sourcePaths = cmd.getOptionValues(SOURCEPATH_OPTION);
+ String outputFile = cmd.getOptionValue(OUTPUT_OPTION);
+ boolean verbose = cmd.hasOption(VERBOSE_OPTION);
+ boolean strict = cmd.hasOption(STRICT_OPTION);
+ String[] execFiles = cmd.getArgs();
+
+ JacocoToLcovConverter converter = new JacocoToLcovConverter(verbose, strict);
+
+ try {
+ if (sourcePaths != null) {
+ for (String sourcePath : sourcePaths) {
+ converter.indexSourcePath(Paths.get(sourcePath));
+ }
+ }
+
+ if (execFiles != null) {
+ for (String execFile : execFiles) {
+ converter.loadExecFile(Paths.get(execFile));
+ }
+ }
+
+ if (classFiles != null) {
+ for (String classFile : classFiles) {
+ converter.loadClassFile(Paths.get(classFile));
+ }
+ }
+
+ converter.write(Paths.get(outputFile));
+ } catch (IOException e) {
+ logError("failed to generate a coverage report: %s", e.getMessage());
+ System.exit(2);
+ }
+ }
+
+ private Analyzer analyzer;
+ private final CoverageBuilder builder;
+ private final ExecFileLoader loader;
+ private final boolean verbose;
+ private final boolean strict;
+
+ private int execFilesLoaded;
+ private int classFilesLoaded;
+ private SetMultimap<String, Path> sourceFiles;
+
+ JacocoToLcovConverter(final boolean verbose, final boolean strict) {
+ this.verbose = verbose;
+ this.strict = strict;
+ analyzer = null;
+ builder = new CoverageBuilder();
+ loader = new ExecFileLoader();
+ execFilesLoaded = 0;
+ classFilesLoaded = 0;
+ sourceFiles = HashMultimap.create();
+ }
+
+ /**
+ * Indexes the path and all subdirectories for Java or Kotlin files.
+ *
+ * @param path the path to search for files
+ */
+ void indexSourcePath(final Path path) throws IOException {
+ try (Stream<Path> stream = Files.walk(path)) {
+ stream.filter(Files::isRegularFile)
+ .filter(p -> p.toString().endsWith(".java") || p.toString().endsWith(".kt"))
+ .forEach(p -> sourceFiles.put(p.getFileName().toString(), p.toAbsolutePath()));
+ }
+ }
+
+ /**
+ * Loads JaCoCo execution data files.
+ *
+ * <p>If strict is not set, logs any exception thrown and returns. If strict is set, rethrows
+ * any exception encountered while loading the file. Execution data files are occasionally
+ * malformed and will cause the tool to fail if strict is set.
+ *
+ * @param execFile the file to load
+ * @throws IOException on error reading file or incorrect file format
+ */
+ void loadExecFile(final Path execFile) throws IOException {
+ try {
+ logVerbose("Loading exec file %s", execFile);
+ loader.load(execFile.toFile());
+ execFilesLoaded++;
+ } catch (IOException e) {
+ logError("Failed to load exec file %s", execFile);
+ if (strict) {
+ throw e;
+ }
+ logError(e.getMessage());
+ }
+ }
+
+ /**
+ * Loads uninstrumented Java class files.
+ *
+ * <p>This should be run only after loading all exec files, otherwise coverage data may be
+ * incorrect.
+ *
+ * @param classFile the class file or class file archive to load
+ * @throws IOException on error reading file or incorrect file format
+ */
+ void loadClassFile(final Path classFile) throws IOException {
+ if (analyzer == null) {
+ analyzer = new Analyzer(loader.getExecutionDataStore(), builder);
+ }
+
+ logVerbose("Loading class file %s", classFile);
+ analyzer.analyzeAll(classFile.toFile());
+ classFilesLoaded++;
+ }
+
+ /**
+ * Writes out the lcov format file based on the exec data and class files loaded.
+ *
+ * @param outputFile the file to write to
+ * @throws IOException on error writing to the output file
+ */
+ void write(final Path outputFile) throws IOException {
+ logVerbose(
+ "%d exec files loaded and %d class files loaded.",
+ execFilesLoaded, classFilesLoaded);
+
+ try (BufferedWriter writer = Files.newBufferedWriter(outputFile, StandardCharsets.UTF_8)) {
+ // Write lcov header test name: <test name>. Displayed on the front page but otherwise
+ // not used for anything important.
+ writeLine(writer, "TN:%s", outputFile.getFileName());
+
+ for (IClassCoverage coverage : builder.getClasses()) {
+ if (coverage.isNoMatch()) {
+ String message = "Mismatch in coverage data for " + coverage.getName();
+ logVerbose(message);
+ if (strict) {
+ throw new IOException(message);
+ }
+ }
+ // Looping over coverage.getMethods() is done multiple times below due to lcov
+ // ordering requirements.
+ // lcov was designed around native code, and uses functions rather than methods as
+ // its terminology of choice. We use methods here as we are working with Java code.
+ int methodsFound = 0;
+ int methodsHit = 0;
+ int linesFound = 0;
+ int linesHit = 0;
+
+ // Sourcefile information: <absolute path to sourcefile>. If the sourcefile does not
+ // match any file given on --sourcepath, it will not be included in the coverage
+ // report.
+ String sourcefile = findSourceFileMatching(sourcefile(coverage));
+ if (sourcefile == null) {
+ continue;
+ }
+ writeLine(writer, "SF:%s", sourcefile);
+
+ // Function information: <starting line>,<name>.
+ for (IMethodCoverage method : coverage.getMethods()) {
+ writeLine(writer, "FN:%d,%s", method.getFirstLine(), name(method));
+ }
+
+ // Function coverage information: <execution count>,<name>.
+ for (IMethodCoverage method : coverage.getMethods()) {
+ int count = method.getMethodCounter().getCoveredCount();
+ writeLine(writer, "FNDA:%d,%s", count, name(method));
+
+ methodsFound++;
+ if (count > 0) {
+ methodsHit++;
+ }
+ }
+
+ // Write the count of methods(functions) found and hit.
+ writeLine(writer, "FNF:%d", methodsFound);
+ writeLine(writer, "FNH:%d", methodsHit);
+
+ // TODO: Write branch coverage information.
+
+ // Write line coverage information.
+ for (IMethodCoverage method : coverage.getMethods()) {
+ int start = method.getFirstLine();
+ int end = method.getLastLine();
+
+ if (start == UNKNOWN_LINE || end == UNKNOWN_LINE) {
+ continue;
+ }
+
+ for (int i = start; i <= end; i++) {
+ ILine line = method.getLine(i);
+ if (line.getStatus() == EMPTY) {
+ continue;
+ }
+ int count = line.getInstructionCounter().getCoveredCount();
+ writeLine(writer, "DA:%d,%d", i, count);
+
+ linesFound++;
+ if (count > 0) {
+ linesHit++;
+ }
+ }
+ }
+
+ // Write the count of lines hit and found.
+ writeLine(writer, "LH:%d", linesHit);
+ writeLine(writer, "LF:%d", linesFound);
+
+ // End of the sourcefile block.
+ writeLine(writer, "end_of_record");
+ }
+ }
+
+ log("Coverage data written to %s", outputFile);
+ }
+
+ /**
+ * Finds the absolute path to the sourcefile that ends with the given file path.
+ *
+ * <p>Searches all the files indexed on -sourcepath and returns the first file that matches the
+ * package and class name. The input is the full Java class name, separated by `/` rather than
+ * `.`
+ *
+ * @param filename the filename to match
+ * @return the absolute path to the file, or null if none was found
+ */
+ private String findSourceFileMatching(String filename) {
+ String key = Paths.get(filename).getFileName().toString();
+ for (Path path : sourceFiles.get(key)) {
+ if (path.endsWith(filename)) {
+ logVerbose("%s matched to %s", filename, path);
+ return path.toAbsolutePath().toString();
+ }
+ }
+ logVerbose("%s did not match any source path", filename);
+ return null;
+ }
+
+ /** Writes a line to the file. */
+ @FormatMethod
+ private static void writeLine(
+ BufferedWriter writer, @FormatString String format, Object... args) throws IOException {
+ writer.write(String.format(format, args));
+ writer.newLine();
+ }
+
+ /** Prints log message. */
+ @FormatMethod
+ private static void log(@FormatString String format, Object... args) {
+ System.out.println(String.format(format, args));
+ }
+
+ /** Prints verbose log. */
+ @FormatMethod
+ private void logVerbose(@FormatString String format, Object... args) {
+ logVerbose(String.format(format, args));
+ }
+
+ /** Prints verbose log. */
+ private void logVerbose(String message) {
+ if (verbose) {
+ System.out.println(message);
+ }
+ }
+
+ /** Prints format string error message. */
+ @FormatMethod
+ private static void logError(@FormatString String format, Object... args) {
+ logError(String.format(format, args));
+ }
+
+ /** Prints error message. */
+ private static void logError(String message) {
+ System.err.println(message);
+ }
+
+ /** Converts IClassCoverage to a sourcefile path. */
+ private static String sourcefile(IClassCoverage coverage) {
+ return coverage.getPackageName() + "/" + coverage.getSourceFileName();
+ }
+
+ /** Converts IMethodCoverage to a unique method descriptor. */
+ private static String name(IMethodCoverage coverage) {
+ return coverage.getName() + coverage.getDesc();
+ }
+}
diff --git a/atest/coverage/coverage.py b/atest/coverage/coverage.py
new file mode 100644
index 0000000..d1b80ee
--- /dev/null
+++ b/atest/coverage/coverage.py
@@ -0,0 +1,272 @@
+# Copyright 2022, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Code coverage instrumentation and collection functionality."""
+
+import logging
+import os
+import subprocess
+
+from pathlib import Path
+from typing import List, Set
+
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+from atest.test_finders import test_info
+
+CLANG_VERSION='r475365b'
+
+def build_env_vars():
+ """Environment variables for building with code coverage instrumentation.
+
+ Returns:
+ A dict with the environment variables to set.
+ """
+ env_vars = {
+ 'CLANG_COVERAGE': 'true',
+ 'NATIVE_COVERAGE_PATHS': '*',
+ 'EMMA_INSTRUMENT': 'true',
+ }
+ return env_vars
+
+
+def tf_args(*value):
+ """TradeFed command line arguments needed to collect code coverage.
+
+ Returns:
+ A list of the command line arguments to append.
+ """
+ del value
+ build_top = Path(os.environ.get(constants.ANDROID_BUILD_TOP))
+ llvm_profdata = build_top.joinpath(
+ f'prebuilts/clang/host/linux-x86/clang-{CLANG_VERSION}')
+ return ('--coverage',
+ '--coverage-toolchain', 'JACOCO',
+ '--coverage-toolchain', 'CLANG',
+ '--auto-collect', 'JAVA_COVERAGE',
+ '--auto-collect', 'CLANG_COVERAGE',
+ '--llvm-profdata-path', str(llvm_profdata))
+
+
+def generate_coverage_report(results_dir: str,
+ test_infos: List[test_info.TestInfo],
+ mod_info: module_info.ModuleInfo):
+ """Generates HTML code coverage reports based on the test info."""
+
+ soong_intermediates = Path(
+ atest_utils.get_build_out_dir()).joinpath('soong/.intermediates')
+
+ # Collect dependency and source file information for the tests and any
+ # Mainline modules.
+ dep_modules = _get_test_deps(test_infos, mod_info)
+ src_paths = _get_all_src_paths(dep_modules, mod_info)
+
+ # Collect JaCoCo class jars from the build for coverage report generation.
+ jacoco_report_jars = {}
+ unstripped_native_binaries = set()
+ for module in dep_modules:
+ for path in mod_info.get_paths(module):
+ module_dir = soong_intermediates.joinpath(path, module)
+ # Check for uninstrumented Java class files to report coverage.
+ classfiles = list(
+ module_dir.rglob('jacoco-report-classes/*.jar'))
+ if classfiles:
+ jacoco_report_jars[module] = classfiles
+
+ # Check for unstripped native binaries to report coverage.
+ unstripped_native_binaries.update(
+ module_dir.glob('*cov*/unstripped/*'))
+
+ if jacoco_report_jars:
+ _generate_java_coverage_report(jacoco_report_jars, src_paths,
+ results_dir, mod_info)
+
+ if unstripped_native_binaries:
+ _generate_native_coverage_report(unstripped_native_binaries,
+ results_dir)
+
+
+def _get_test_deps(test_infos, mod_info):
+ """Gets all dependencies of the TestInfo, including Mainline modules."""
+ deps = set()
+
+ for info in test_infos:
+ deps.add(info.raw_test_name)
+ deps |= _get_transitive_module_deps(
+ mod_info.get_module_info(info.raw_test_name), mod_info, deps)
+
+ # Include dependencies of any Mainline modules specified as well.
+ if not info.mainline_modules:
+ continue
+
+ for mainline_module in info.mainline_modules:
+ deps.add(mainline_module)
+ deps |= _get_transitive_module_deps(
+ mod_info.get_module_info(mainline_module), mod_info, deps)
+
+ return deps
+
+
+def _get_transitive_module_deps(info,
+ mod_info: module_info.ModuleInfo,
+ seen: Set[str]) -> Set[str]:
+ """Gets all dependencies of the module, including .impl versions."""
+ deps = set()
+
+ for dep in info.get(constants.MODULE_DEPENDENCIES, []):
+ if dep in seen:
+ continue
+
+ seen.add(dep)
+
+ dep_info = mod_info.get_module_info(dep)
+
+ # Mainline modules sometimes depend on `java_sdk_library` modules that
+ # generate synthetic build modules ending in `.impl` which do not appear
+ # in the ModuleInfo. Strip this suffix to prevent incomplete dependency
+ # information when generating coverage reports.
+ # TODO(olivernguyen): Reconcile this with
+ # ModuleInfo.get_module_dependency(...).
+ if not dep_info:
+ dep = dep.removesuffix('.impl')
+ dep_info = mod_info.get_module_info(dep)
+
+ if not dep_info:
+ continue
+
+ deps.add(dep)
+ deps |= _get_transitive_module_deps(dep_info, mod_info, seen)
+
+ return deps
+
+
+def _get_all_src_paths(modules, mod_info):
+ """Gets the set of directories containing any source files from the modules.
+ """
+ src_paths = set()
+
+ for module in modules:
+ info = mod_info.get_module_info(module)
+ if not info:
+ continue
+
+ # Do not report coverage for test modules.
+ if mod_info.is_testable_module(info):
+ continue
+
+ src_paths.update(
+ os.path.dirname(f) for f in info.get(constants.MODULE_SRCS, []))
+
+ src_paths = {p for p in src_paths if not _is_generated_code(p)}
+ return src_paths
+
+
+def _is_generated_code(path):
+ return 'soong/.intermediates' in path
+
+
+def _generate_java_coverage_report(report_jars, src_paths, results_dir,
+ mod_info):
+ build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
+ out_dir = os.path.join(results_dir, 'java_coverage')
+ jacoco_files = atest_utils.find_files(results_dir, '*.ec')
+
+ os.mkdir(out_dir)
+ jacoco_lcov = mod_info.get_module_info('jacoco_to_lcov_converter')
+ jacoco_lcov = os.path.join(build_top, jacoco_lcov['installed'][0])
+ lcov_reports = []
+
+ for name, classfiles in report_jars.items():
+ dest = f'{out_dir}/{name}.info'
+ cmd = [jacoco_lcov, '-o', dest]
+ for classfile in classfiles:
+ cmd.append('-classfiles')
+ cmd.append(str(classfile))
+ for src_path in src_paths:
+ cmd.append('-sourcepath')
+ cmd.append(src_path)
+ cmd.extend(jacoco_files)
+ try:
+ subprocess.run(cmd, check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as err:
+ atest_utils.colorful_print(
+ f'Failed to generate coverage for {name}:', constants.RED)
+ logging.exception(err.stdout)
+ atest_utils.colorful_print(f'Coverage for {name} written to {dest}.',
+ constants.GREEN)
+ lcov_reports.append(dest)
+
+ _generate_lcov_report(out_dir, lcov_reports, build_top)
+
+
+def _generate_native_coverage_report(unstripped_native_binaries, results_dir):
+ build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
+ out_dir = os.path.join(results_dir, 'native_coverage')
+ profdata_files = atest_utils.find_files(results_dir, '*.profdata')
+
+ os.mkdir(out_dir)
+ cmd = ['llvm-cov',
+ 'show',
+ '-format=html',
+ f'-output-dir={out_dir}',
+ f'-path-equivalence=/proc/self/cwd,{build_top}']
+ for profdata in profdata_files:
+ cmd.append('--instr-profile')
+ cmd.append(profdata)
+ for binary in unstripped_native_binaries:
+ # Exclude .rsp files. These are files containing the command line used
+ # to generate the unstripped binaries, but are stored in the same
+ # directory as the actual output binary.
+ if not binary.match('*.rsp'):
+ cmd.append(f'--object={str(binary)}')
+
+ try:
+ subprocess.run(cmd, check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ atest_utils.colorful_print(f'Native coverage written to {out_dir}.',
+ constants.GREEN)
+ except subprocess.CalledProcessError as err:
+ atest_utils.colorful_print('Failed to generate native code coverage.',
+ constants.RED)
+ logging.exception(err.stdout)
+
+
+def _generate_lcov_report(out_dir, reports, root_dir=None):
+ cmd = ['genhtml', '-q', '-o', out_dir]
+ if root_dir:
+ cmd.extend(['-p', root_dir])
+ cmd.extend(reports)
+ try:
+ subprocess.run(cmd, check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ atest_utils.colorful_print(
+ f'Code coverage report written to {out_dir}.',
+ constants.GREEN)
+ atest_utils.colorful_print(
+ f'To open, Ctrl+Click on file://{out_dir}/index.html',
+ constants.GREEN)
+ except subprocess.CalledProcessError as err:
+ atest_utils.colorful_print('Failed to generate HTML coverage report.',
+ constants.RED)
+ logging.exception(err.stdout)
+ except FileNotFoundError:
+ atest_utils.colorful_print('genhtml is not on the $PATH.',
+ constants.RED)
+ atest_utils.colorful_print(
+ 'Run `sudo apt-get install lcov -y` to install this tool.',
+ constants.RED)
diff --git a/atest/logstorage/__init__.py b/atest/logstorage/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/logstorage/__init__.py
+++ /dev/null
diff --git a/atest/logstorage/atest_gcp_utils.py b/atest/logstorage/atest_gcp_utils.py
index a1cc684..6202511 100644
--- a/atest/logstorage/atest_gcp_utils.py
+++ b/atest/logstorage/atest_gcp_utils.py
@@ -49,9 +49,9 @@
except ModuleNotFoundError as e:
logging.debug('Import error due to %s', e)
-from logstorage import logstorage_utils
-import atest_utils
-import constants
+from atest.logstorage import logstorage_utils
+from atest import atest_utils
+from atest import constants
class RunFlowFlags():
"""Flags for oauth2client.tools.run_flow."""
@@ -172,7 +172,8 @@
flow=flow, storage=storage, flags=flags)
return credentials
- def _get_sso_access_token(self):
+ @staticmethod
+ def _get_sso_access_token():
"""Use stubby command line to exchange corp sso to a scoped oauth
token.
@@ -244,10 +245,11 @@
if os.path.exists(not_upload_file):
os.remove(not_upload_file)
else:
- if extra_args.get(constants.DISABLE_UPLOAD_RESULT):
- if os.path.exists(creds_f):
- os.remove(creds_f)
- Path(not_upload_file).touch()
+ # TODO(b/275113186): Change back to default upload after AnTS upload
+ # extremely slow problem be solved.
+ if os.path.exists(creds_f):
+ os.remove(creds_f)
+ Path(not_upload_file).touch()
# If DO_NOT_UPLOAD not exist, ATest will try to get the credential
# from the file.
@@ -257,11 +259,12 @@
client_secret=constants.CLIENT_SECRET,
user_agent='atest').get_credential_with_auth_flow(creds_f)
+ # TODO(b/275113186): Change back the warning message after the bug solved.
atest_utils.colorful_print(
- 'WARNING: In order to allow uploading local test results to AnTS, it '
- 'is recommended you add the option --request-upload-result. This option'
- ' only needs to set once and takes effect until --disable-upload-result'
- ' is set.', constants.YELLOW)
+ 'WARNING: AnTS upload disabled by default due to upload slowly'
+ '(b/275113186). If you still want to upload test result to AnTS, '
+ 'please add the option --request-upload-result manually.',
+ constants.YELLOW)
return None
def _prepare_data(creds):
diff --git a/atest/logstorage/atest_gcp_utils_unittest.py b/atest/logstorage/atest_gcp_utils_unittest.py
index 89975bd..4cd02b3 100644
--- a/atest/logstorage/atest_gcp_utils_unittest.py
+++ b/atest/logstorage/atest_gcp_utils_unittest.py
@@ -23,9 +23,9 @@
from pathlib import Path
from unittest import mock
-import constants
+from atest import constants
-from logstorage import atest_gcp_utils
+from atest.logstorage import atest_gcp_utils
class AtestGcpUtilsUnittests(unittest.TestCase):
"""Unit tests for atest_gcp_utils.py"""
@@ -135,5 +135,7 @@
os.remove(not_upload_file)
atest_gcp_utils.fetch_credential(tmp_folder, dict())
- self.assertEqual(1, mock_get_credential_with_auth_flow.call_count)
- self.assertFalse(os.path.exists(not_upload_file))
+ # TODO(b/275113186): Change back to assertEqual 1 and assertFalse after
+ # switch back to default not upload.
+ self.assertEqual(0, mock_get_credential_with_auth_flow.call_count)
+ self.assertTrue(os.path.exists(not_upload_file))
diff --git a/atest/logstorage/logstorage_utils.py b/atest/logstorage/logstorage_utils.py
index 5f056f2..07b305b 100644
--- a/atest/logstorage/logstorage_utils.py
+++ b/atest/logstorage/logstorage_utils.py
@@ -51,8 +51,8 @@
except ImportError as e:
logging.debug('Import error due to: %s', e)
-import constants
-import metrics
+from atest import constants
+from atest import metrics
class BuildClient:
diff --git a/atest/metrics/__init__.py b/atest/metrics/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/metrics/__init__.py
+++ /dev/null
diff --git a/atest/metrics/clearcut_client.py b/atest/metrics/clearcut_client.py
index 4987c17..199a111 100644
--- a/atest/metrics/clearcut_client.py
+++ b/atest/metrics/clearcut_client.py
@@ -24,6 +24,7 @@
"""
import logging
+import ssl
import threading
import time
@@ -39,7 +40,7 @@
from urllib2 import HTTPError
from urllib2 import URLError
-from proto import clientanalytics_pb2
+from atest.proto import clientanalytics_pb2
_CLEARCUT_PROD_URL = 'https://play.googleapis.com/log'
_DEFAULT_BUFFER_SIZE = 100 # Maximum number of events to be buffered.
@@ -150,6 +151,7 @@
self._send_to_clearcut(log_request.SerializeToString())
#pylint: disable=broad-except
+ #pylint: disable=protected-access
def _send_to_clearcut(self, data):
"""Sends a POST request with data as the body.
@@ -158,6 +160,7 @@
"""
request = Request(self._clearcut_url, data=data)
try:
+ ssl._create_default_https_context = ssl._create_unverified_context
response = urlopen(request)
msg = response.read()
logging.debug('LogRequest successfully sent to Clearcut.')
@@ -169,9 +172,9 @@
/ 1000 + time.time())
logging.debug('LogResponse: %s', log_response)
except HTTPError as e:
- logging.debug('Failed to push events to Clearcut. Error code: %d',
+ logging.warning('Failed to push events to Clearcut. Error code: %d',
e.code)
- except URLError:
- logging.debug('Failed to push events to Clearcut.')
+ except URLError as e:
+ logging.warning('Failed to push events to Clearcut. Reason: %s', e)
except Exception as e:
- logging.debug(e)
+ logging.warning(e)
diff --git a/atest/metrics/metrics.py b/atest/metrics/metrics.py
index f6446a6..c8962a9 100644
--- a/atest/metrics/metrics.py
+++ b/atest/metrics/metrics.py
@@ -16,9 +16,9 @@
Metrics class.
"""
-import constants
+from atest import constants
-from . import metrics_base
+from atest.metrics import metrics_base
class AtestStartEvent(metrics_base.MetricsBase):
"""
diff --git a/atest/metrics/metrics_base.py b/atest/metrics/metrics_base.py
index a058c8c..35ff961 100644
--- a/atest/metrics/metrics_base.py
+++ b/atest/metrics/metrics_base.py
@@ -25,14 +25,14 @@
import time
import uuid
-import asuite_metrics
-import constants
+from atest import asuite_metrics
+from atest import constants
-from proto import clientanalytics_pb2
-from proto import external_user_log_pb2
-from proto import internal_user_log_pb2
+from atest.proto import clientanalytics_pb2
+from atest.proto import external_user_log_pb2
+from atest.proto import internal_user_log_pb2
-from . import clearcut_client
+from atest.metrics import clearcut_client
INTERNAL_USER = 0
EXTERNAL_USER = 1
@@ -78,19 +78,10 @@
Returns:
INTERNAL_USER if user is internal, EXTERNAL_USER otherwise.
"""
- try:
- output = subprocess.check_output(
- ['git', 'config', '--get', 'user.email'], universal_newlines=True)
- if output and output.strip().endswith(constants.INTERNAL_EMAIL):
- return INTERNAL_USER
- except OSError:
- # OSError can be raised when running atest_unittests on a host
- # without git being set up.
- logging.debug('Unable to determine if this is an external run, git is '
- 'not found.')
- except subprocess.CalledProcessError:
- logging.debug('Unable to determine if this is an external run, email '
- 'is not found in git config.')
+ email = get_user_email()
+ if email.endswith(constants.INTERNAL_EMAIL):
+ return INTERNAL_USER
+
try:
hostname = socket.getfqdn()
if (hostname and
diff --git a/atest/metrics/metrics_utils.py b/atest/metrics/metrics_utils.py
index a43b8f6..a06c5e5 100644
--- a/atest/metrics/metrics_utils.py
+++ b/atest/metrics/metrics_utils.py
@@ -22,8 +22,16 @@
import time
import traceback
-from . import metrics
-from . import metrics_base
+from atest.metrics import metrics
+from atest.metrics import metrics_base
+
+CONTENT_LICENSES_URL = 'https://source.android.com/setup/start/licenses'
+CONTRIBUTOR_AGREEMENT_URL = {
+ 'INTERNAL': 'https://cla.developers.google.com/',
+ 'EXTERNAL': 'https://opensource.google.com/docs/cla/'
+}
+PRIVACY_POLICY_URL = 'https://policies.google.com/privacy'
+TERMS_SERVICE_URL = 'https://policies.google.com/terms'
def static_var(varname, value):
@@ -56,7 +64,7 @@
resolution.
Args:
- dur_time_sec: The time in seconds as a floating point number.
+ diff_time_sec: The time in seconds as a floating point number.
Returns:
A dict of Duration.
@@ -97,7 +105,7 @@
duration=convert_duration(time.time()-get_start_time()),
exit_code=exit_code,
stacktrace=stacktrace,
- logs=logs)
+ logs=str(logs))
# pylint: disable=no-member
if clearcut:
clearcut.flush_events()
@@ -126,3 +134,31 @@
test_references=test_references,
cwd=cwd,
os=operating_system)
+
+
+def print_data_collection_notice(colorful=True):
+ """Print the data collection notice."""
+ red = '31m'
+ green = '32m'
+ start = '\033[1;'
+ end = '\033[0m'
+ delimiter = '=' * 18
+ anonymous = ''
+ user_type = 'INTERNAL'
+ if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
+ anonymous = ' anonymous'
+ user_type = 'EXTERNAL'
+ notice = (' We collect%s usage statistics in accordance with our Content '
+ 'Licenses (%s), Contributor License Agreement (%s), Privacy '
+ 'Policy (%s) and Terms of Service (%s).'
+ ) % (anonymous,
+ CONTENT_LICENSES_URL,
+ CONTRIBUTOR_AGREEMENT_URL[user_type],
+ PRIVACY_POLICY_URL,
+ TERMS_SERVICE_URL)
+ if colorful:
+ print(f'\n{delimiter}\n{start}{red}Notice:{end}')
+ print(f'{start}{green} {notice}{end}\n{delimiter}\n')
+ else:
+ print(f'\n{delimiter}\nNotice:')
+ print(f' {notice}\n{delimiter}\n')
diff --git a/atest/metrics/metrics_utils_unittest.py b/atest/metrics/metrics_utils_unittest.py
new file mode 100755
index 0000000..d73ae7d
--- /dev/null
+++ b/atest/metrics/metrics_utils_unittest.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+#
+# Copyright 2022, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for metrics_utils."""
+
+# pylint: disable=invalid-name, line-too-long
+
+import sys
+import unittest
+
+from io import StringIO
+from unittest import mock
+
+from atest.metrics import metrics_utils
+
+class MetricsUtilsUnittests(unittest.TestCase):
+ """Unit tests for metrics_utils.py"""
+ def setUp(self) -> None:
+ self.maxDiff = None
+
+ @mock.patch('atest.metrics.metrics_base.get_user_type')
+ def test_print_data_collection_notice(self, mock_get_user_type):
+ """Test method print_data_collection_notice."""
+
+ # get_user_type return 1(external).
+ mock_get_user_type.return_value = 1
+ notice_str = ('\n==================\nNotice:\n'
+ ' We collect anonymous usage statistics '
+ 'in accordance with our '
+ 'Content Licenses (https://source.android.com/setup/start/licenses), '
+ 'Contributor License Agreement (https://opensource.google.com/docs/cla/), '
+ 'Privacy Policy (https://policies.google.com/privacy) and '
+ 'Terms of Service (https://policies.google.com/terms).'
+ '\n==================\n\n')
+ capture_output = StringIO()
+ sys.stdout = capture_output
+ metrics_utils.print_data_collection_notice(colorful=False)
+ sys.stdout = sys.__stdout__
+ self.assertEqual(capture_output.getvalue(), notice_str)
+
+ # get_user_type return 0(internal).
+ red = '31m'
+ green = '32m'
+ start = '\033[1;'
+ end = '\033[0m'
+ mock_get_user_type.return_value = 0
+ notice_str = (f'\n==================\n{start}{red}Notice:{end}\n'
+ f'{start}{green} We collect usage statistics '
+ f'in accordance with our '
+ f'Content Licenses (https://source.android.com/setup/start/licenses), '
+ f'Contributor License Agreement (https://cla.developers.google.com/), '
+ f'Privacy Policy (https://policies.google.com/privacy) and '
+ f'Terms of Service (https://policies.google.com/terms).{end}'
+ f'\n==================\n\n')
+ capture_output = StringIO()
+ sys.stdout = capture_output
+ metrics_utils.print_data_collection_notice()
+ sys.stdout = sys.__stdout__
+ self.assertEqual(capture_output.getvalue(), notice_str)
diff --git a/atest/module_info.py b/atest/module_info.py
index bd5ebd6..31dfd78 100644
--- a/atest/module_info.py
+++ b/atest/module_info.py
@@ -16,25 +16,26 @@
Module Info class used to hold cached module-info.json.
"""
-# pylint: disable=line-too-long
+# pylint: disable=line-too-long,too-many-lines
import json
import logging
import os
import pickle
+import re
import shutil
-import sys
import tempfile
import time
from pathlib import Path
-from typing import Any, Dict
+from typing import Any, Dict, List, Set
-import atest_utils
-import constants
+from atest import atest_utils
+from atest import constants
-from atest_enum import DetectType, ExitCode
-from metrics import metrics
+from atest.atest_enum import DetectType
+from atest.metrics import metrics
+
# JSON file generated by build system that lists all buildable targets.
_MODULE_INFO = 'module-info.json'
@@ -53,7 +54,12 @@
class ModuleInfo:
"""Class that offers fast/easy lookup for Module related details."""
- def __init__(self, force_build=False, module_file=None, index_dir=None):
+ def __init__(
+ self,
+ force_build=False,
+ module_file=None,
+ index_dir=None,
+ no_generate=False):
"""Initialize the ModuleInfo object.
Load up the module-info.json file and initialize the helper vars.
@@ -87,18 +93,27 @@
module_info file regardless if it's created or not.
module_file: String of path to file to load up. Used for testing.
index_dir: String of path to store testable module index and md5.
+ no_generate: Boolean to indicate if we should populate module info
+ from the soong artifacts; setting to true will
+ leave module info empty.
"""
+ # TODO(b/263199608): Refactor the ModuleInfo constructor.
+ # The module-info constructor does too much. We should never be doing
+ # real work in a constructor and should only use it to inject
+ # dependencies.
+
# force_build could be from "-m" or smart_build(build files change).
self.force_build = force_build
# update_merge_info flag will merge dep files only when any of them have
# changed even force_build == True.
self.update_merge_info = False
+ self.roboleaf_tests = {}
+
# Index and checksum files that will be used.
- if not index_dir:
- index_dir = Path(
- os.getenv(constants.ANDROID_HOST_OUT,
- tempfile.TemporaryDirectory().name)).joinpath('indexes')
- index_dir = Path(index_dir)
+ index_dir = (
+ Path(index_dir) if index_dir else
+ Path(os.getenv(constants.ANDROID_HOST_OUT)).joinpath('indexes')
+ )
if not index_dir.is_dir():
index_dir.mkdir(parents=True)
self.module_index = index_dir.joinpath(constants.MODULE_INDEX)
@@ -113,6 +128,11 @@
os.getenv(constants.ANDROID_PRODUCT_OUT, '')).joinpath(_MERGED_INFO)
self.mod_info_file_path = Path(module_file) if module_file else None
+
+ if no_generate:
+ self.name_to_module_info = {}
+ return
+
module_info_target, name_to_module_info = self._load_module_info_file(
module_file)
self.name_to_module_info = name_to_module_info
@@ -158,19 +178,8 @@
module_file_path = os.path.join(
os.environ.get(constants.ANDROID_PRODUCT_OUT), _MODULE_INFO)
module_info_target = module_file_path
- # Make sure module-info exist and could be load properly.
- if not atest_utils.is_valid_json_file(module_file_path) or force_build:
- logging.debug('Generating %s - this is required for '
- 'initial runs or forced rebuilds.', _MODULE_INFO)
- build_start = time.time()
- if not atest_utils.build([module_info_target],
- verbose=logging.getLogger().isEnabledFor(
- logging.DEBUG)):
- sys.exit(ExitCode.BUILD_FAILURE)
- build_duration = time.time() - build_start
- metrics.LocalDetectEvent(
- detect_type=DetectType.ONLY_BUILD_MODULE_INFO,
- result=int(build_duration))
+ if force_build:
+ atest_utils.build_module_info_target(module_info_target)
return module_info_target, module_file_path
def _load_module_info_file(self, module_file):
@@ -209,7 +218,8 @@
# module_info_target stays None.
module_info_target = None
file_path = module_file
- previous_checksum = self._get_module_info_checksums()
+ previous_checksum = atest_utils.load_json_safely(
+ self.module_info_checksum)
if not file_path:
module_info_target, file_path = self._discover_mod_file_and_target(
self.force_build)
@@ -217,38 +227,36 @@
# Even undergone a rebuild after _discover_mod_file_and_target(), merge
# atest_merged_dep.json only when module_deps_infos actually change so
# that Atest can decrease disk I/O and ensure data accuracy at all.
- module_deps_infos = [file_path, self.java_dep_path, self.cc_dep_path]
- self._save_module_info_checksum(module_deps_infos)
self.update_merge_info = self.need_update_merged_file(previous_checksum)
+ start = time.time()
if self.update_merge_info:
# Load the $ANDROID_PRODUCT_OUT/module-info.json for merging.
- with open(file_path) as module_info_json:
- mod_info = self._merge_build_system_infos(
- json.load(module_info_json))
+ module_info_json = atest_utils.load_json_safely(file_path)
+ if Path(file_path).name == _MODULE_INFO and not module_info_json:
+ # Rebuild module-info.json when it has invalid format. However,
+ # if the file_path doesn't end with module-info.json, it could
+ # be from unit tests and won't trigger rebuild.
+ atest_utils.build_module_info_target(module_info_target)
+ start = time.time()
+ module_info_json = atest_utils.load_json_safely(file_path)
+ mod_info = self._merge_build_system_infos(module_info_json)
+ duration = time.time() - start
+ logging.debug('Merging module info took %ss', duration)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.MODULE_MERGE_MS, result=int(duration*1000))
else:
# Load $ANDROID_PRODUCT_OUT/atest_merged_dep.json directly.
- with open(self.merged_dep_path) as merged_info_json:
+ with open(self.merged_dep_path, encoding='utf-8') as merged_info_json:
mod_info = json.load(merged_info_json)
+ duration = time.time() - start
+ logging.debug('Loading module info took %ss', duration)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.MODULE_LOAD_MS, result=int(duration*1000))
_add_missing_variant_modules(mod_info)
logging.debug('Loading %s as module-info.', self.merged_dep_path)
return module_info_target, mod_info
- def _get_module_info_checksums(self):
- """Load the module-info.md5 and return the content.
-
- Returns:
- A dict of filename and checksum.
- """
- if os.path.exists(self.module_info_checksum):
- with open(self.module_info_checksum) as cache:
- try:
- content = json.load(cache)
- return content
- except json.JSONDecodeError:
- pass
- return {}
-
- def _save_module_info_checksum(self, filenames):
+ def _save_module_info_checksum(self):
"""Dump the checksum of essential module info files.
* module-info.json
* module_bp_cc_deps.json
@@ -257,7 +265,10 @@
dirname = Path(self.module_info_checksum).parent
if not dirname.is_dir():
dirname.mkdir(parents=True)
- atest_utils.save_md5(filenames, self.module_info_checksum)
+ atest_utils.save_md5([
+ self.mod_info_file_path,
+ self.java_dep_path,
+ self.cc_dep_path], self.module_info_checksum)
@staticmethod
def _get_path_to_module_info(name_to_module_info):
@@ -293,6 +304,7 @@
logging.debug(r'Indexing testable modules... '
r'(This is required whenever module-info.json '
r'was rebuilt.)')
+ Path(self.module_index).parent.mkdir(parents=True, exist_ok=True)
with open(self.module_index, 'wb') as cache:
try:
pickle.dump(content, cache, protocol=2)
@@ -330,7 +342,13 @@
def is_module(self, name):
"""Return True if name is a module, False otherwise."""
- if self.get_module_info(name):
+ info = self.get_module_info(name)
+ # From aosp/2293302 it started merging all modules' dependency in bp
+ # even the module is not be exposed to make, and those modules could not
+ # be treated as a build target using m. Only treat input name as module
+ # if it also has the module_name attribute which means it could be a
+ # build target for m.
+ if info and info.get(constants.MODULE_NAME):
return True
return False
@@ -367,10 +385,10 @@
Returns:
True if it exists in mod_info, False otherwise.
"""
- if mod_info:
- return suite in mod_info.get(
- constants.MODULE_COMPATIBILITY_SUITES, [])
- return []
+ if not isinstance(mod_info, dict):
+ return False
+ return suite in mod_info.get(
+ constants.MODULE_COMPATIBILITY_SUITES, [])
def get_testable_modules(self, suite=None):
"""Return the testable modules of the given suite name.
@@ -416,55 +434,57 @@
result=int(duration))
return modules
- def is_testable_module(self, mod_info):
+ def is_tradefed_testable_module(self, info: Dict[str, Any]) -> bool:
+ """Check whether the module is a Tradefed executable test."""
+ if not info:
+ return False
+ if not info.get(constants.MODULE_INSTALLED, []):
+ return False
+ return self.has_test_config(info)
+
+ def is_testable_module(self, info: Dict[str, Any]) -> bool:
"""Check if module is something we can test.
A module is testable if:
- - it's installed, or
+ - it's a tradefed testable module, or
- it's a robolectric module (or shares path with one).
Args:
- mod_info: Dict of module info to check.
+ info: Dict of module info to check.
Returns:
True if we can test this module, False otherwise.
"""
- if not mod_info:
+ if not info:
return False
- if mod_info.get(constants.MODULE_INSTALLED) and self.has_test_config(mod_info):
+ if self.is_tradefed_testable_module(info):
return True
- if self.is_robolectric_test(mod_info.get(constants.MODULE_NAME)):
+ if self.is_legacy_robolectric_test(info.get(constants.MODULE_NAME)):
return True
return False
- def has_test_config(self, mod_info):
+ def has_test_config(self, info: Dict[str, Any]) -> bool:
"""Validate if this module has a test config.
A module can have a test config in the following manner:
- - AndroidTest.xml at the module path.
- test_config be set in module-info.json.
- Auto-generated config via the auto_test_config key
in module-info.json.
Args:
- mod_info: Dict of module info to check.
+ info: Dict of module info to check.
Returns:
True if this module has a test config, False otherwise.
"""
- # Check if test_config in module-info is set.
- for test_config in mod_info.get(constants.MODULE_TEST_CONFIG, []):
- if os.path.isfile(os.path.join(self.root_dir, test_config)):
- return True
- # Check for AndroidTest.xml at the module path.
- for path in mod_info.get(constants.MODULE_PATH, []):
- if os.path.isfile(os.path.join(self.root_dir, path,
- constants.MODULE_CONFIG)):
- return True
- # Check if the module has an auto-generated config.
- return self.is_auto_gen_test_config(mod_info.get(constants.MODULE_NAME))
+ return bool(info.get(constants.MODULE_TEST_CONFIG, []) or
+ info.get('auto_test_config', []))
- def get_robolectric_test_name(self, module_name):
+ def is_legacy_robolectric_test(self, module_name: str) -> bool:
+ """Return whether the module_name is a legacy Robolectric test"""
+ return bool(self.get_robolectric_test_name(module_name))
+
+ def get_robolectric_test_name(self, module_name: str) -> str:
"""Returns runnable robolectric module name.
This method is for legacy robolectric tests and returns one of associated
@@ -482,16 +502,23 @@
String of the first-matched associated module that belongs to the
actual robolectric module, None if nothing has been found.
"""
- module_name_info = self.get_module_info(module_name)
- if not module_name_info:
- return None
- module_paths = module_name_info.get(constants.MODULE_PATH, [])
- if module_paths:
- for mod in self.get_module_names(module_paths[0]):
- mod_info = self.get_module_info(mod)
- if self.is_robolectric_module(mod_info):
- return mod
- return None
+ info = self.get_module_info(module_name) or {}
+ module_paths = info.get(constants.MODULE_PATH, [])
+ if not module_paths:
+ return ''
+ filtered_module_names = [
+ name
+ for name in self.get_module_names(module_paths[0])
+ if name.startswith("Run")
+ ]
+ return next(
+ (
+ name
+ for name in filtered_module_names
+ if self.is_legacy_robolectric_class(self.get_module_info(name))
+ ),
+ '',
+ )
def is_robolectric_test(self, module_name):
"""Check if the given module is a robolectric test.
@@ -524,8 +551,10 @@
To determine whether the test is a modern/legacy robolectric test:
1. Traverse all modules share the module path. If one of the
modules has a ROBOLECTRIC class, it is a robolectric test.
- 2. If found an Android.bp in that path, it's a modern one, otherwise
- it's a legacy test and will go to the build route.
+ 2. If the 'robolectric-test` in the compatibility_suites, it's a
+ modern one, otherwise it's a legacy test. This is accurate since
+ aosp/2308586 already set the test suite of `robolectric-test`
+ for all `modern` Robolectric tests in Soong.
Args:
module_name: String of module to check.
@@ -535,28 +564,110 @@
1: a modern robolectric test(defined in Android.bp)
2: a legacy robolectric test(defined in Android.mk)
"""
- not_a_robo_test = 0
- module_name_info = self.get_module_info(module_name)
- if not module_name_info:
- return not_a_robo_test
- mod_path = module_name_info.get(constants.MODULE_PATH, [])
- if mod_path:
- # Check1: If the associated modules are "ROBOLECTRIC".
- is_a_robotest = False
- modules_in_path = self.get_module_names(mod_path[0])
- for mod in modules_in_path:
- mod_info = self.get_module_info(mod)
- if self.is_robolectric_module(mod_info):
- is_a_robotest = True
- break
- if not is_a_robotest:
- return not_a_robo_test
- # Check 2: If found Android.bp in path, call it a modern test.
- bpfile = os.path.join(self.root_dir, mod_path[0], 'Android.bp')
- if os.path.isfile(bpfile):
- return constants.ROBOTYPE_MODERN
+ info = self.get_module_info(module_name)
+ if not info:
+ return 0
+ # Some Modern mode Robolectric test has related module which compliant
+ # with the Legacy Robolectric test. In this case, the Modern mode
+ # Robolectric tests should prior to Legacy mode.
+ if self.is_modern_robolectric_test(info):
+ return constants.ROBOTYPE_MODERN
+ if self.is_legacy_robolectric_test(module_name):
return constants.ROBOTYPE_LEGACY
- return not_a_robo_test
+ return 0
+
+ def get_instrumentation_target_apps(self, module_name: str) -> Dict:
+ """Return target APKs of an instrumentation test.
+
+ Returns:
+ A dict of target module and target APK(s). e.g.
+ {"FooService": {"/path/to/the/FooService.apk"}}
+ """
+ # 1. Determine the actual manifest filename from an Android.bp(if any)
+ manifest = self.get_filepath_from_module(module_name,
+ 'AndroidManifest.xml')
+ bpfile = self.get_filepath_from_module(module_name, 'Android.bp')
+ if bpfile.is_file():
+ bp_info = atest_utils.get_bp_content(bpfile, 'android_test')
+ if not bp_info or not bp_info.get(module_name):
+ return {}
+ manifest = self.get_filepath_from_module(
+ module_name,
+ bp_info.get(module_name).get('manifest'))
+ xml_info = atest_utils.get_manifest_info(manifest)
+ # 2. Translate package name to a module name.
+ package = xml_info.get('package')
+ target_package = xml_info.get('target_package')
+ # Ensure it's an instrumentation test(excluding self-instrmented)
+ if target_package and package != target_package:
+ logging.debug('Found %s an instrumentation test.', module_name)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.FOUND_INSTRUMENTATION_TEST, result=1)
+ target_module = self.get_target_module_by_pkg(
+ package=target_package,
+ search_from=manifest.parent)
+ if target_module:
+ return self.get_artifact_map(target_module)
+ return {}
+
+ # pylint: disable=anomalous-backslash-in-string
+ def get_target_module_by_pkg(self, package: str, search_from: Path) -> str:
+ """Translate package name to the target module name.
+
+ This method is dedicated to determine the target module by translating
+ a package name.
+
+ Phase 1: Find out possible manifest files among parent directories.
+ Phase 2. Look for the defined package fits the given name, and ensure
+ it is not a persistent app.
+ Phase 3: Translate the manifest path to possible modules. A valid module
+ must fulfill:
+ 1. The 'class' type must be ['APPS'].
+ 2. It is not a Robolectric test.
+
+ Returns:
+ A string of module name.
+ """
+ xmls = []
+ for pth in search_from.parents:
+ if pth == Path(self.root_dir):
+ break
+ for name in os.listdir(pth):
+ if pth.joinpath(name).is_file():
+ match = re.match('.*AndroidManifest.*\.xml$', name)
+ if match:
+ xmls.append(os.path.join(pth, name))
+ possible_modules = []
+ for xml in xmls:
+ rel_dir = str(Path(xml).relative_to(self.root_dir).parent)
+ logging.debug('Looking for package "%s" in %s...', package, xml)
+ xml_info = atest_utils.get_manifest_info(xml)
+ if xml_info.get('package') == package:
+ if xml_info.get('persistent'):
+ logging.debug('%s is a persistent app.', package)
+ continue
+ for _m in self.path_to_module_info.get(rel_dir):
+ possible_modules.append(_m)
+ if possible_modules:
+ for mod in possible_modules:
+ name = mod.get('module_name')
+ if (mod.get('class') == ['APPS'] and
+ not self.is_robolectric_test(name)):
+ return name
+ return ''
+
+ def get_artifact_map(self, module_name: str) -> Dict:
+ """Get the installed APK path of the given module."""
+ target_mod_info = self.get_module_info(module_name)
+ artifact_map = {}
+ if target_mod_info:
+ apks = set()
+ artifacts = target_mod_info.get('installed')
+ for artifact in artifacts:
+ if Path(artifact).suffix == '.apk':
+ apks.add(os.path.join(self.root_dir, artifact))
+ artifact_map.update({module_name: apks})
+ return artifact_map
def is_auto_gen_test_config(self, module_name):
"""Check if the test config file will be generated automatically.
@@ -573,22 +684,24 @@
return auto_test_config and auto_test_config[0]
return False
- def is_robolectric_module(self, mod_info):
- """Check if a module is a robolectric module.
+ def is_legacy_robolectric_class(self, info: Dict[str, Any]) -> bool:
+ """Check if the class is `ROBOLECTRIC`
This method is for legacy robolectric tests that the associated modules
contain:
'class': ['ROBOLECTRIC']
Args:
- mod_info: ModuleInfo to check.
+ info: ModuleInfo to check.
Returns:
- True if module is a robolectric module, False otherwise.
+ True if the attribute class in mod_info is ROBOLECTRIC, False
+ otherwise.
"""
- if mod_info:
- return (mod_info.get(constants.MODULE_CLASS, [None])[0] ==
- constants.MODULE_CLASS_ROBOLECTRIC)
+ if info:
+ module_classes = info.get(constants.MODULE_CLASS, [])
+ return (module_classes and
+ module_classes[0] == constants.MODULE_CLASS_ROBOLECTRIC)
return False
def is_native_test(self, module_name):
@@ -604,30 +717,36 @@
return constants.MODULE_CLASS_NATIVE_TESTS in mod_info.get(
constants.MODULE_CLASS, [])
- def has_mainline_modules(self, module_name, mainline_modules):
+ def has_mainline_modules(self,
+ module_name: str, mainline_binaries: List[str]) -> bool:
"""Check if the mainline modules are in module-info.
Args:
module_name: A string of the module name.
- mainline_modules: A list of mainline modules.
+ mainline_binaries: A list of mainline module binaries.
Returns:
- True if mainline_modules is in module-info, False otherwise.
+ True if mainline_binaries is in module-info, False otherwise.
"""
mod_info = self.get_module_info(module_name)
# Check 'test_mainline_modules' attribute of the module-info.json.
- if mainline_modules in mod_info.get(constants.MODULE_MAINLINE_MODULES,
- []):
- return True
+ mm_in_mf = mod_info.get(constants.MODULE_MAINLINE_MODULES, [])
+ ml_modules_set = set(mainline_binaries)
+ if mm_in_mf:
+ return contains_same_mainline_modules(
+ ml_modules_set, set(mm_in_mf))
for test_config in mod_info.get(constants.MODULE_TEST_CONFIG, []):
# Check the value of 'mainline-param' in the test config.
if not self.is_auto_gen_test_config(module_name):
- return mainline_modules in atest_utils.get_mainline_param(
- os.path.join(self.root_dir, test_config))
+ return contains_same_mainline_modules(
+ ml_modules_set,
+ atest_utils.get_mainline_param(
+ os.path.join(self.root_dir, test_config)))
# Unable to verify mainline modules in an auto-gen test config.
logging.debug('%s is associated with an auto-generated test config.',
module_name)
return True
+ return False
def _merge_build_system_infos(self, name_to_module_info,
java_bp_info_path=None, cc_bp_info_path=None):
@@ -644,22 +763,19 @@
Returns:
Dict of updated name_to_module_info.
"""
- start = time.time()
# Merge _JAVA_DEP_INFO
if not java_bp_info_path:
java_bp_info_path = self.java_dep_path
- if atest_utils.is_valid_json_file(java_bp_info_path):
- with open(java_bp_info_path) as json_file:
- java_bp_infos = json.load(json_file)
- logging.debug('Merging Java build info: %s', java_bp_info_path)
- name_to_module_info = self._merge_soong_info(
- name_to_module_info, java_bp_infos)
+ java_bp_infos = atest_utils.load_json_safely(java_bp_info_path)
+ if java_bp_infos:
+ logging.debug('Merging Java build info: %s', java_bp_info_path)
+ name_to_module_info = self._merge_soong_info(
+ name_to_module_info, java_bp_infos)
# Merge _CC_DEP_INFO
if not cc_bp_info_path:
cc_bp_info_path = self.cc_dep_path
- if atest_utils.is_valid_json_file(cc_bp_info_path):
- with open(cc_bp_info_path) as json_file:
- cc_bp_infos = json.load(json_file)
+ cc_bp_infos = atest_utils.load_json_safely(cc_bp_info_path)
+ if cc_bp_infos:
logging.debug('Merging CC build info: %s', cc_bp_info_path)
# CC's dep json format is different with java.
# Below is the example content:
@@ -679,20 +795,20 @@
name_to_module_info, cc_bp_infos.get('modules', {}))
# If $ANDROID_PRODUCT_OUT was not created in pyfakefs, simply return it
# without dumping atest_merged_dep.json in real.
+
+ # Adds the key into module info as a unique ID.
+ for key, info in name_to_module_info.items():
+ info[constants.MODULE_INFO_ID] = key
+
if not self.merged_dep_path.parent.is_dir():
return name_to_module_info
# b/178559543 saving merged module info in a temp file and copying it to
# atest_merged_dep.json can eliminate the possibility of accessing it
# concurrently and resulting in invalid JSON format.
- temp_file = tempfile.NamedTemporaryFile()
- with open(temp_file.name, 'w') as _temp:
- json.dump(name_to_module_info, _temp, indent=0)
- shutil.copy(temp_file.name, self.merged_dep_path)
- temp_file.close()
- duration = time.time() - start
- logging.debug('Merging module info took %ss', duration)
- metrics.LocalDetectEvent(
- detect_type=DetectType.MODULE_MERGE, result=int(duration))
+ with tempfile.NamedTemporaryFile() as temp_file:
+ with open(temp_file.name, 'w', encoding='utf-8') as _temp:
+ json.dump(name_to_module_info, _temp, indent=0)
+ shutil.copy(temp_file.name, self.merged_dep_path)
return name_to_module_info
def _merge_soong_info(self, name_to_module_info, mod_bp_infos):
@@ -705,21 +821,29 @@
Returns:
Dict of updated name_to_module_info.
"""
- merge_items = [constants.MODULE_DEPENDENCIES, constants.MODULE_SRCS]
+ merge_items = [constants.MODULE_DEPENDENCIES, constants.MODULE_SRCS,
+ constants.MODULE_LIBS, constants.MODULE_STATIC_LIBS,
+ constants.MODULE_STATIC_DEPS, constants.MODULE_PATH]
for module_name, dep_info in mod_bp_infos.items():
- if name_to_module_info.get(module_name, None):
- mod_info = name_to_module_info.get(module_name)
- for merge_item in merge_items:
- dep_info_values = dep_info.get(merge_item, [])
- mod_info_values = mod_info.get(merge_item, [])
- mod_info_values.extend(dep_info_values)
- mod_info_values.sort()
- # deduplicate values just in case.
- mod_info_values = list(dict.fromkeys(mod_info_values))
- name_to_module_info[
- module_name][merge_item] = mod_info_values
+ mod_info = name_to_module_info.setdefault(module_name, {})
+ for merge_item in merge_items:
+ dep_info_values = dep_info.get(merge_item, [])
+ mod_info_values = mod_info.get(merge_item, [])
+ mod_info_values.extend(dep_info_values)
+ mod_info_values.sort()
+ # deduplicate values just in case.
+ mod_info_values = list(dict.fromkeys(mod_info_values))
+ name_to_module_info[
+ module_name][merge_item] = mod_info_values
return name_to_module_info
+ def get_filepath_from_module(self, module_name: str, filename: str) -> Path:
+ """Return absolute path of the given module and filename."""
+ mod_path = self.get_paths(module_name)
+ if mod_path:
+ return Path(self.root_dir).joinpath(mod_path[0], filename)
+ return Path()
+
def get_module_dependency(self, module_name, depend_on=None):
"""Get the dependency sets for input module.
@@ -781,7 +905,11 @@
Returns:
True if one of the scienarios reaches, False otherwise.
"""
- return (checksum != self._get_module_info_checksums() or
+ current_checksum = {str(name): atest_utils.md5sum(name) for name in [
+ self.mod_info_file_path,
+ self.java_dep_path,
+ self.cc_dep_path]}
+ return (checksum != current_checksum or
not Path(self.merged_dep_path).is_file())
def is_unit_test(self, mod_info):
@@ -791,21 +919,39 @@
mod_info: ModuleInfo to check.
Returns:
- True if if input module is unit test, False otherwise.
+ True if input module is unit test, False otherwise.
"""
return mod_info.get(constants.MODULE_IS_UNIT_TEST, '') == 'true'
- def is_host_unit_test(self, mod_info):
+ def is_host_unit_test(self, info: Dict[str, Any]) -> bool:
"""Return True if input module is host unit test, False otherwise.
Args:
+ info: ModuleInfo to check.
+
+ Returns:
+ True if input module is host unit test, False otherwise.
+ """
+ return self.is_tradefed_testable_module(info) and \
+ self.is_suite_in_compatibility_suites('host-unit-tests', info)
+
+ def is_modern_robolectric_test(self, info: Dict[str, Any]) -> bool:
+ """Return whether 'robolectric-tests' is in 'compatibility_suites'."""
+ return self.is_tradefed_testable_module(info) and \
+ self.is_robolectric_test_suite(info)
+
+ def is_robolectric_test_suite(self, mod_info) -> bool:
+ """Return True if 'robolectric-tests' in the compatibility_suites.
+
+ Args:
mod_info: ModuleInfo to check.
Returns:
- True if if input module is host unit test, False otherwise.
+ True if the 'robolectric-tests' is in the compatibility_suites,
+ False otherwise.
"""
- return self.is_suite_in_compatibility_suites(
- 'host-unit-tests', mod_info)
+ return self.is_suite_in_compatibility_suites('robolectric-tests',
+ mod_info)
def is_device_driven_test(self, mod_info):
"""Return True if input module is device driven test, False otherwise.
@@ -814,10 +960,25 @@
mod_info: ModuleInfo to check.
Returns:
- True if if input module is device driven test, False otherwise.
+ True if input module is device driven test, False otherwise.
"""
- return self.is_testable_module(mod_info) and 'DEVICE' in mod_info.get(
- constants.MODULE_SUPPORTED_VARIANTS, [])
+ if self.is_robolectric_test_suite(mod_info):
+ return False
+
+ return self.is_tradefed_testable_module(mod_info) and \
+ 'DEVICE' in mod_info.get(constants.MODULE_SUPPORTED_VARIANTS, [])
+
+ def is_host_driven_test(self, mod_info):
+ """Return True if input module is host driven test, False otherwise.
+
+ Args:
+ mod_info: ModuleInfo to check.
+
+ Returns:
+ True if input module is host driven test, False otherwise.
+ """
+ return self.is_tradefed_testable_module(mod_info) and \
+ 'HOST' in mod_info.get(constants.MODULE_SUPPORTED_VARIANTS, [])
def _any_module(self, _: Module) -> bool:
return True
@@ -848,9 +1009,46 @@
modules.append(mod_name)
return modules
+ def get_modules_by_path_in_srcs(self, path: str) -> Set:
+ """Get the module name that the given path belongs to.(in 'srcs')
+
+ Args:
+ path: Relative path to ANDROID_BUILD_TOP of a file.
+
+ Returns:
+ A set of string for matched module names, empty set if nothing find.
+ """
+ modules = set()
+ for _, mod_info in self.name_to_module_info.items():
+ if str(path) in mod_info.get(constants.MODULE_SRCS, []):
+ modules.add(mod_info.get(constants.MODULE_NAME))
+ return modules
+
+ def get_modules_by_include_deps(
+ self, deps: Set[str],
+ testable_module_only: bool = False) -> Set[str]:
+ """Get the matched module names for the input dependencies.
+
+ Args:
+ deps: A set of string for dependencies.
+ testable_module_only: Option if only want to get testable module.
+
+ Returns:
+ A set of matched module names for the input dependencies.
+ """
+ modules = set()
+
+ for mod_name in (self.get_testable_modules() if testable_module_only
+ else self.name_to_module_info.keys()):
+ mod_info = self.get_module_info(mod_name)
+ if mod_info and deps.intersection(
+ set(mod_info.get(constants.MODULE_DEPENDENCIES, []))):
+ modules.add(mod_info.get(constants.MODULE_NAME))
+ return modules
+
def _add_missing_variant_modules(name_to_module_info: Dict[str, Module]):
- missing_modules = dict()
+ missing_modules = {}
# Android's build system automatically adds a suffix for some build module
# variants. For example, a module-info entry for a module originally named
@@ -858,11 +1056,25 @@
# not be able to find. We add such entries if not already present so they
# can be looked up using their declared module name.
for mod_name, mod_info in name_to_module_info.items():
- declared_module_name = mod_info.get(constants.MODULE_NAME)
- if declared_module_name == mod_name:
- continue
+ declared_module_name = mod_info.get(constants.MODULE_NAME, mod_name)
if declared_module_name in name_to_module_info:
continue
missing_modules.setdefault(declared_module_name, mod_info)
name_to_module_info.update(missing_modules)
+
+def contains_same_mainline_modules(mainline_modules: Set[str], module_lists: Set[str]):
+ """Check if mainline modules listed on command line is
+ the same set as config.
+
+ Args:
+ mainline_modules: A list of mainline modules from triggered test.
+ module_lists: A list of concatenate mainline module string from test configs.
+
+ Returns
+ True if the set mainline modules from triggered test is in the test configs.
+ """
+ for module_string in module_lists:
+ if mainline_modules == set(module_string.split('+')):
+ return True
+ return False
diff --git a/atest/module_info_unittest.py b/atest/module_info_unittest.py
index 4a19557..7afe4ad 100755
--- a/atest/module_info_unittest.py
+++ b/atest/module_info_unittest.py
@@ -16,8 +16,10 @@
"""Unittests for module_info."""
+# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
+# pylint: disable=too-many-lines
import os
import shutil
@@ -27,10 +29,13 @@
from pathlib import Path
from unittest import mock
-import constants
-import module_info
-import unittest_utils
-import unittest_constants as uc
+# pylint: disable=import-error
+from pyfakefs import fake_filesystem_unittest
+
+from atest import constants
+from atest import module_info
+from atest import unittest_utils
+from atest import unittest_constants as uc
JSON_FILE_PATH = os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE)
CC_DEP_PATH = os.path.join(uc.TEST_DATA_DIR, uc.CC_DEP_FILE)
@@ -69,12 +74,14 @@
constants.MODULE_CLASS: ['random_class']}
NAME_TO_MODULE_INFO = {'random_name' : MODULE_INFO}
# Mocking path allows str only, use os.path instead of Path.
-BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
+with tempfile.TemporaryDirectory() as temp_dir:
+ BUILD_TOP_DIR = temp_dir
SOONG_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/soong')
PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
HOST_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/host/linux-x86')
-#pylint: disable=protected-access
+# TODO: (b/263199608) Suppress too-many-public-methods after refactoring.
+#pylint: disable=protected-access, too-many-public-methods
class ModuleInfoUnittests(unittest.TestCase):
"""Unit tests for module_info.py"""
@@ -94,11 +101,16 @@
if self.merged_dep_path.is_file():
os.remove(self.merged_dep_path)
+ # TODO: (b/264015241) Stop mocking build variables.
+ # TODO: (b/263199608) Re-write the test after refactoring module-info.py
+ @mock.patch.object(module_info.ModuleInfo, 'need_update_merged_file')
@mock.patch('json.load', return_value={})
@mock.patch('builtins.open', new_callable=mock.mock_open)
@mock.patch('os.path.isfile', return_value=True)
- def test_load_mode_info_file_out_dir_handling(self, _isfile, _open, _json):
+ def test_load_mode_info_file_out_dir_handling(self, _isfile, _open, _json,
+ _merge):
"""Test _load_module_info_file out dir handling."""
+ _merge.return_value = False
# Test out default out dir is used.
build_top = '/path/to/top'
default_out_dir = os.path.join(build_top, 'out/dir/here')
@@ -133,8 +145,6 @@
self.assertEqual(custom_abs_out_dir_mod_targ,
mod_info.module_info_target)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_info.ModuleInfo, '_load_module_info_file')
def test_get_path_to_module_info(self, mock_load_module):
"""Test that we correctly create the path to module info dict."""
@@ -156,41 +166,33 @@
self.assertDictEqual(path_to_mod_info,
mod_info._get_path_to_module_info(mod_info_dict))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_is_module(self):
"""Test that we get the module when it's properly loaded."""
# Load up the test json file and check that module is in it
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
self.assertTrue(mod_info.is_module(EXPECTED_MOD_TARGET))
self.assertFalse(mod_info.is_module(UNEXPECTED_MOD_TARGET))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_get_path(self):
"""Test that we get the module path when it's properly loaded."""
# Load up the test json file and check that module is in it
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
self.assertEqual(mod_info.get_paths(EXPECTED_MOD_TARGET),
EXPECTED_MOD_TARGET_PATH)
self.assertEqual(mod_info.get_paths(MOD_NO_PATH), [])
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_get_module_names(self):
"""test that we get the module name properly."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
self.assertEqual(mod_info.get_module_names(EXPECTED_MOD_TARGET_PATH[0]),
[EXPECTED_MOD_TARGET])
unittest_utils.assert_strict_equal(
self, mod_info.get_module_names(PATH_TO_MULT_MODULES),
MULT_MOODULES_WITH_SHARED_PATH)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_path_to_mod_info(self):
"""test that we get the module name properly."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
module_list = []
for path_to_mod_info in mod_info.path_to_module_info[PATH_TO_MULT_MODULES_WITH_MULTI_ARCH]:
module_list.append(path_to_mod_info.get(constants.MODULE_NAME))
@@ -198,11 +200,9 @@
TESTABLE_MODULES_WITH_SHARED_PATH.sort()
self.assertEqual(module_list, TESTABLE_MODULES_WITH_SHARED_PATH)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_is_suite_in_compatibility_suites(self):
"""Test is_suite_in_compatibility_suites."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
info = {'compatibility_suites': []}
self.assertFalse(mod_info.is_suite_in_compatibility_suites("cts", info))
info2 = {'compatibility_suites': ["cts"]}
@@ -213,6 +213,8 @@
self.assertTrue(mod_info.is_suite_in_compatibility_suites("vts10", info3))
self.assertFalse(mod_info.is_suite_in_compatibility_suites("ats", info3))
+ # TODO: (b/264015241) Stop mocking build variables.
+ # TODO: (b/263199608) Re-write the test after refactoring module-info.py
@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR,
constants.ANDROID_HOST_OUT:HOST_OUT_DIR})
@@ -222,7 +224,7 @@
"""Test get_testable_modules."""
# 1. No modules.idx yet, will run _get_testable_modules()
mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- self.assertEqual(len(mod_info.get_testable_modules()), 28)
+ self.assertEqual(len(mod_info.get_testable_modules()), 29)
# 2. read modules.idx.
expected_modules = {'dep_test_module', 'MainModule2', 'test_dep_level_1_1'}
@@ -239,106 +241,10 @@
@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
- @mock.patch.object(module_info.ModuleInfo, 'has_test_config')
- @mock.patch.object(module_info.ModuleInfo, 'is_robolectric_test')
- def test_is_testable_module(self, mock_is_robo_test, mock_has_test_config):
- """Test is_testable_module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mock_is_robo_test.return_value = False
- mock_has_test_config.return_value = True
- installed_module_info = {constants.MODULE_INSTALLED:
- uc.DEFAULT_INSTALL_PATH}
- non_installed_module_info = {constants.MODULE_NAME: 'rand_name'}
- # Empty mod_info or a non-installed module.
- self.assertFalse(mod_info.is_testable_module(non_installed_module_info))
- self.assertFalse(mod_info.is_testable_module({}))
- # Testable Module or is a robo module for non-installed module.
- self.assertTrue(mod_info.is_testable_module(installed_module_info))
- mock_has_test_config.return_value = False
- self.assertFalse(mod_info.is_testable_module(installed_module_info))
- mock_is_robo_test.return_value = True
- self.assertTrue(mod_info.is_testable_module(non_installed_module_info))
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
- @mock.patch.object(module_info.ModuleInfo, 'is_auto_gen_test_config')
- def test_has_test_config(self, mock_is_auto_gen):
- """Test has_test_config."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- info = {constants.MODULE_PATH:[uc.TEST_DATA_DIR]}
- mock_is_auto_gen.return_value = True
- # Validate we see the config when it's auto-generated.
- self.assertTrue(mod_info.has_test_config(info))
- self.assertTrue(mod_info.has_test_config({}))
- # Validate when actual config exists and there's no auto-generated config.
- mock_is_auto_gen.return_value = False
- info = {constants.MODULE_PATH:[uc.TEST_DATA_DIR]}
- self.assertTrue(mod_info.has_test_config(info))
- self.assertFalse(mod_info.has_test_config({}))
- # Validate the case mod_info MODULE_TEST_CONFIG be set
- info2 = {constants.MODULE_PATH:[uc.TEST_CONFIG_DATA_DIR],
- constants.MODULE_TEST_CONFIG:[os.path.join(
- uc.TEST_CONFIG_DATA_DIR, "a.xml.data")]}
- self.assertTrue(mod_info.has_test_config(info2))
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
- @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
- def test_get_robolectric_test_name(self, mock_get_module_names):
- """Test get_robolectric_test_name."""
- # Happy path testing, make sure we get the run robo target.
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mod_info.name_to_module_info = MOD_NAME_INFO_DICT
- mod_info.path_to_module_info = MOD_PATH_INFO_DICT
- mock_get_module_names.return_value = [ASSOCIATED_ROBO_MODULE, ROBO_MODULE]
- self.assertEqual(mod_info.get_robolectric_test_name(
- ROBO_MODULE), ASSOCIATED_ROBO_MODULE)
- # Let's also make sure we don't return anything when we're not supposed
- # to.
- mock_get_module_names.return_value = [ROBO_MODULE]
- self.assertEqual(mod_info.get_robolectric_test_name(
- ROBO_MODULE), None)
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
- @mock.patch.object(module_info.ModuleInfo, 'is_robolectric_module')
- @mock.patch('os.path.isfile', return_value=False)
- @mock.patch.object(module_info.ModuleInfo, 'get_module_info')
- @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
- def test_get_robolectric_type(self, mock_get_module_names, mock_get_module_info,
- mock_isfile, mock_is_robo_mod):
- """Test get_robolectric_type."""
- # Happy path testing, make sure we get the run robo target.
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- mod_info.name_to_module_info = MOD_NAME_INFO_DICT
- mod_info.path_to_module_info = MOD_PATH_INFO_DICT
- mock_isfile.return_value = False
- mock_get_module_names.return_value = [ASSOCIATED_ROBO_MODULE, ROBO_MODULE]
- mock_get_module_info.return_value = ASSOCIATED_ROBO_MODULE_INFO
- # Test on an legacy associated robo module.
- self.assertEqual(
- mod_info.get_robolectric_type(ASSOCIATED_ROBO_MODULE), constants.ROBOTYPE_LEGACY)
- # Test on a legacy robo module.
- self.assertEqual(
- mod_info.get_robolectric_type(ROBO_MODULE), constants.ROBOTYPE_LEGACY)
- # Test on a modern robo module.
- mock_isfile.return_value = True
- self.assertEqual(
- mod_info.get_robolectric_type(ROBO_MODULE), constants.ROBOTYPE_MODERN)
- # Two situations that are not a robolectric test:
- # 1. Not is_robolectric_module:
- mock_is_robo_mod.return_value = False
- self.assertEqual(mod_info.get_robolectric_type(ROBO_MODULE), 0)
- # 2. The path in the mod_info is inexistent.
- mod_info.path_to_module_info = {'/inexist': ['Foo', 'RunFoo']}
- self.assertEqual(mod_info.get_robolectric_type(ROBO_MODULE), 0)
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_info.ModuleInfo, 'get_robolectric_type')
def test_is_robolectric_test(self, mock_type):
"""Test is_robolectric_test."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
mock_type.return_value = constants.ROBOTYPE_MODERN
self.assertTrue(mod_info.is_robolectric_test(ROBO_MODULE))
mock_type.return_value = constants.ROBOTYPE_LEGACY
@@ -346,12 +252,10 @@
mock_type.return_value = 0
self.assertFalse(mod_info.is_robolectric_test(ROBO_MODULE))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_info.ModuleInfo, 'is_module')
def test_is_auto_gen_test_config(self, mock_is_module):
"""Test is_auto_gen_test_config correctly detects the module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
mock_is_module.return_value = True
is_auto_test_config = {'auto_test_config': [True]}
is_not_auto_test_config = {'auto_test_config': [False]}
@@ -366,24 +270,9 @@
self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME3))
self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME4))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
- def test_is_robolectric_module(self):
- """Test is_robolectric_module correctly detects the module."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- is_robolectric_module = {'class': ['ROBOLECTRIC']}
- is_not_robolectric_module = {'class': ['OTHERS']}
- MOD_INFO_DICT[MOD_NAME1] = is_robolectric_module
- MOD_INFO_DICT[MOD_NAME2] = is_not_robolectric_module
- mod_info.name_to_module_info = MOD_INFO_DICT
- self.assertTrue(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME1]))
- self.assertFalse(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME2]))
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_merge_build_system_infos(self):
"""Test _merge_build_system_infos."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
mod_info_1 = {constants.MODULE_NAME: 'module_1',
constants.MODULE_DEPENDENCIES: []}
name_to_mod_info = {'module_1' : mod_info_1}
@@ -394,11 +283,20 @@
name_to_mod_info['module_1'].get(constants.MODULE_DEPENDENCIES),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
+ def test_merge_build_system_infos_missing_keys(self):
+ """Test _merge_build_system_infos for keys missing from module-info.json."""
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+ name_to_mod_info = mod_info._merge_build_system_infos(
+ {}, java_bp_info_path=self.java_dep_path)
+
+ expect_deps = ['test_dep_level_1_1']
+ self.assertEqual(
+ name_to_mod_info['not_in_module_info'].get(constants.MODULE_DEPENDENCIES),
+ expect_deps)
+
def test_merge_dependency_with_ori_dependency(self):
"""Test _merge_dependency."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
mod_info_1 = {constants.MODULE_NAME: 'module_1',
constants.MODULE_DEPENDENCIES: ['ori_dep_1']}
name_to_mod_info = {'module_1' : mod_info_1}
@@ -409,11 +307,76 @@
name_to_mod_info['module_1'].get(constants.MODULE_DEPENDENCIES),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
+ @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:uc.TEST_DATA_DIR,
constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
+ def test_get_instrumentation_target_apps(self):
+ mod_info = module_info.ModuleInfo(
+ module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+ artifacts = {
+ 'AmSlam': {os.path.join(uc.TEST_DATA_DIR,
+ "out/target/product/generic/data/app/AmSlam/AmSlam.apk")}
+ }
+ # 1. If Android.bp is available, use `manifest` to determine the actual
+ # manifest.
+ bp_context = """android_test {
+ name: "AmSlamTests",
+ manifest: 'AndroidManifest.xml',
+ instrumentation_for: "AmSlam"
+ }"""
+ bp_file = os.path.join(uc.TEST_DATA_DIR, 'foo/bar/AmSlam/test/Android.bp')
+ with open(bp_file, 'w', encoding='utf-8') as cache:
+ cache.write(bp_context)
+ self.assertEqual(
+ mod_info.get_instrumentation_target_apps('AmSlamTests'), artifacts)
+ os.remove(bp_file)
+ # 2. If Android.bp is unavailable, search `AndroidManifest.xml`
+ # arbitrarily.
+ self.assertEqual(
+ mod_info.get_instrumentation_target_apps('AmSlamTests'), artifacts)
+
+ @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:uc.TEST_DATA_DIR,
+ constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
+ def test_get_target_module_by_pkg(self):
+ mod_info = module_info.ModuleInfo(
+ module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+ self.assertEqual(
+ 'AmSlam',
+ mod_info.get_target_module_by_pkg(
+ package='c0m.andr0id.settingS',
+ search_from=Path(uc.TEST_DATA_DIR).joinpath('foo/bar/AmSlam/test')))
+
+ @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:uc.TEST_DATA_DIR,
+ constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
+ def test_get_artifact_map(self):
+ mod_info = module_info.ModuleInfo(
+ module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+ artifacts = {
+ 'AmSlam': {os.path.join(uc.TEST_DATA_DIR,
+ 'out/target/product/generic/data/app/AmSlam/AmSlam.apk')}
+ }
+ self.assertEqual(mod_info.get_artifact_map('AmSlam'), artifacts)
+
+ @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:uc.TEST_DATA_DIR,
+ constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
+ def test_get_filepath_from_module(self):
+ """Test for get_filepath_from_module."""
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+
+ expected_filepath = Path(uc.TEST_DATA_DIR).joinpath(
+ 'foo/bar/AmSlam', 'AndroidManifest.xml')
+ self.assertEqual(
+ mod_info.get_filepath_from_module('AmSlam', 'AndroidManifest.xml'),
+ expected_filepath)
+
+ expected_filepath = Path(uc.TEST_DATA_DIR).joinpath(
+ 'foo/bar/AmSlam/test', 'AndroidManifest.xml')
+ self.assertEqual(
+ mod_info.get_filepath_from_module('AmSlamTests', 'AndroidManifest.xml'),
+ expected_filepath)
+
def test_get_module_dependency(self):
"""Test get_module_dependency."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
expect_deps = {'test_dep_level_1_1', 'module_1', 'test_dep_level_1_2',
'test_dep_level_2_2', 'test_dep_level_2_1', 'module_2'}
mod_info._merge_build_system_infos(mod_info.name_to_module_info,
@@ -422,11 +385,9 @@
mod_info.get_module_dependency('dep_test_module'),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_get_module_dependency_w_loop(self):
"""Test get_module_dependency with problem dep file."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
# Java dependency file with a endless loop define.
java_dep_file = os.path.join(uc.TEST_DATA_DIR,
'module_bp_java_loop_deps.json')
@@ -438,11 +399,9 @@
mod_info.get_module_dependency('dep_test_module'),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_get_install_module_dependency(self):
"""Test get_install_module_dependency."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
expect_deps = {'module_1', 'test_dep_level_2_1'}
mod_info._merge_build_system_infos(mod_info.name_to_module_info,
java_bp_info_path=self.java_dep_path)
@@ -450,11 +409,9 @@
mod_info.get_install_module_dependency('dep_test_module'),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_cc_merge_build_system_infos(self):
"""Test _merge_build_system_infos for cc."""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
mod_info_1 = {constants.MODULE_NAME: 'module_cc_1',
constants.MODULE_DEPENDENCIES: []}
name_to_mod_info = {'module_cc_1' : mod_info_1}
@@ -465,32 +422,31 @@
name_to_mod_info['module_cc_1'].get(constants.MODULE_DEPENDENCIES),
expect_deps)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_is_unit_test(self):
"""Test is_unit_test."""
module_name = 'myModule'
maininfo_with_unittest = {constants.MODULE_NAME: module_name,
constants.MODULE_IS_UNIT_TEST: 'true'}
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH,
+ index_dir=HOST_OUT_DIR)
self.assertTrue(mod_info.is_unit_test(maininfo_with_unittest))
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_is_host_unit_test(self):
"""Test is_host_unit_test."""
module_name = 'myModule'
maininfo_with_host_unittest = {
constants.MODULE_NAME: module_name,
constants.MODULE_IS_UNIT_TEST: 'true',
- 'compatibility_suites': ['host-unit-tests']
+ 'compatibility_suites': ['host-unit-tests'],
+ constants.MODULE_INSTALLED: uc.DEFAULT_INSTALL_PATH,
+ 'auto_test_config': ['true']
}
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH,
+ index_dir=HOST_OUT_DIR)
+
self.assertTrue(mod_info.is_host_unit_test(maininfo_with_host_unittest))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_is_device_driven_test(self):
module_name = 'myModule'
maininfo_with_device_driven_test = {
@@ -500,21 +456,61 @@
constants.MODULE_INSTALLED: uc.DEFAULT_INSTALL_PATH,
'supported_variants': ['DEVICE']
}
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
self.assertTrue(mod_info.is_device_driven_test(maininfo_with_device_driven_test))
+ def test_not_device_driven_test_when_suite_is_robolectric_test(self):
+ module_name = 'myModule'
+ maininfo_with_device_driven_test = {
+ constants.MODULE_NAME: module_name,
+ constants.MODULE_TEST_CONFIG:[os.path.join(
+ uc.TEST_CONFIG_DATA_DIR, "a.xml.data")],
+ constants.MODULE_INSTALLED: uc.DEFAULT_INSTALL_PATH,
+ 'supported_variants': ['DEVICE'],
+ 'compatibility_suites': ['robolectric-tests'],
+ }
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+
+ self.assertFalse(mod_info.is_device_driven_test(maininfo_with_device_driven_test))
+
+ def test_is_host_driven_test(self):
+ """Test is_host_driven_test."""
+ test_name = 'myModule'
+ expected_host_driven_info = {
+ constants.MODULE_NAME: test_name,
+ constants.MODULE_TEST_CONFIG:[os.path.join(
+ uc.TEST_CONFIG_DATA_DIR, "a.xml.data")],
+ constants.MODULE_INSTALLED: uc.DEFAULT_INSTALL_PATH,
+ 'supported_variants': ['HOST']
+ }
+ mod_info = create_module_info([
+ module(
+ name=test_name,
+ test_config=[os.path.join(uc.TEST_CONFIG_DATA_DIR,
+ "a.xml.data")],
+ installed=uc.DEFAULT_INSTALL_PATH,
+ supported_variants=['HOST']
+ )
+ ])
+
+ return_value = mod_info.is_host_driven_test(expected_host_driven_info)
+
+ self.assertTrue(return_value)
+
+ # TODO: (b/264015241) Stop mocking build variables.
+ # TODO: (b/263199608) Re-write the test after refactoring module-info.py
@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:os.path.dirname(__file__),
constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
def test_has_mainline_modules(self):
"""Test has_mainline_modules."""
name1 = 'MainModule1'
- mainline_module1 = 'foo2.apk+foo3.apk'
+ mainline_module1 = ['foo2.apk', 'foo3.apk']
name2 = 'MainModule2'
- mainline_module2 = 'foo1.apex'
+ mainline_module2 = ['foo1.apex']
name3 = 'MainModule3'
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
# found in 'test_mainlne_modules' attribute.
self.assertTrue(mod_info.has_mainline_modules(name1, mainline_module1))
# found in the value of 'mainline-param' in test_config.
@@ -522,6 +518,8 @@
# cannot be found in both 'test_mainline_modules' and 'test_config'.
self.assertFalse(mod_info.has_mainline_modules(name3, mainline_module2))
+ # TODO: (b/264015241) Stop mocking build variables.
+ # TODO: (b/263199608) Re-write the test after refactoring module-info.py
@mock.patch.dict('os.environ',
{constants.ANDROID_BUILD_TOP:os.path.dirname(__file__),
constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@@ -529,9 +527,564 @@
my_module_name = 'MyMultiArchTestModule'
multi_arch_json = os.path.join(uc.TEST_DATA_DIR,
'multi_arch_module-info.json')
- mod_info = module_info.ModuleInfo(module_file=multi_arch_json)
+ mod_info = module_info.ModuleInfo(module_file=multi_arch_json, index_dir=HOST_OUT_DIR)
self.assertIsNotNone(mod_info.get_module_info(my_module_name))
+ def test_get_modules_by_include_deps_w_testable_module_only_false(self):
+ module_1 = module(name='module_1',
+ dependencies=['dep1', 'dep2'],
+ )
+ module_2 = module(name='module_2',
+ dependencies=['dep1', 'dep3']
+ )
+ mod_info = create_module_info([module_1, module_2])
+
+ self.assertEqual({'module_1', 'module_2'},
+ mod_info.get_modules_by_include_deps(
+ {'dep1'}, testable_module_only=False))
+ self.assertEqual({'module_1'},
+ mod_info.get_modules_by_include_deps(
+ {'dep2'}, testable_module_only=False))
+ self.assertEqual({'module_2'},
+ mod_info.get_modules_by_include_deps(
+ {'dep3'}, testable_module_only=False))
+
+ @mock.patch.object(module_info.ModuleInfo, 'get_testable_modules')
+ def test_get_modules_by_include_deps_w_testable_module_only_true(
+ self, _testable_modules):
+ module_1 = module(name='module_1',
+ dependencies=['dep1', 'dep2'],
+ )
+ module_2 = module(name='module_2',
+ dependencies=['dep1', 'dep3']
+ )
+ mod_info = create_module_info([module_1, module_2])
+ _testable_modules.return_value = []
+
+ self.assertEqual(set(),
+ mod_info.get_modules_by_include_deps(
+ {'dep1'}, testable_module_only=True))
+
+ def test_get_modules_by_path_in_srcs_no_module_found(self):
+ module_1 = module(name='module_1',
+ srcs=['path/src1', 'path/src2'],
+ )
+ module_2 = module(name='module_2',
+ srcs=['path/src2', 'path/src3']
+ )
+ mod_info = create_module_info([module_1, module_2])
+
+ self.assertEqual(set(),
+ mod_info.get_modules_by_path_in_srcs('path/src4'))
+
+ def test_get_modules_by_path_in_srcs_one_module_found(self):
+ module_1 = module(name='module_1',
+ srcs=['path/src1', 'path/src2'],
+ )
+ module_2 = module(name='module_2',
+ srcs=['path/src2', 'path/src3']
+ )
+ mod_info = create_module_info([module_1, module_2])
+
+ self.assertEqual({'module_1'},
+ mod_info.get_modules_by_path_in_srcs('path/src1'))
+
+ def test_get_modules_by_path_in_srcs_multiple_module_found(self):
+ module_1 = module(name='module_1',
+ srcs=['path/src1', 'path/src2'],
+ )
+ module_2 = module(name='module_2',
+ srcs=['path/src2', 'path/src3']
+ )
+ mod_info = create_module_info([module_1, module_2])
+
+ self.assertEqual({'module_1', 'module_2'},
+ mod_info.get_modules_by_path_in_srcs('path/src2'))
+
+ def test_contains_same_mainline_modules(self):
+ mainline_modules = {'A.apex', 'B.apk'}
+ self.assertTrue(module_info.contains_same_mainline_modules(
+ mainline_modules,
+ {'B.apk+A.apex'}))
+ self.assertFalse(module_info.contains_same_mainline_modules(
+ mainline_modules,
+ {'B.apk+C.apex'}))
+
+
+class ModuleInfoTestFixture(fake_filesystem_unittest.TestCase):
+ """Fixture for ModuleInfo tests."""
+
+ def setUp(self):
+ self.setUpPyfakefs()
+
+ # pylint: disable=protected-access
+ def create_empty_module_info(self):
+ fake_temp_file_name = next(tempfile._get_candidate_names())
+ self.fs.create_file(fake_temp_file_name, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+ def create_module_info(self, modules=None):
+ mod_info = self.create_empty_module_info()
+ modules = modules or []
+
+ for m in modules:
+ mod_info.name_to_module_info[m['module_name']] = m
+ for path in m['path']:
+ if path in mod_info.path_to_module_info:
+ mod_info.path_to_module_info[path].append(m)
+ else:
+ mod_info.path_to_module_info[path] = [m]
+
+ return mod_info
+
+
+class HasTestConfonfigTest(ModuleInfoTestFixture):
+ """Tests has_test_config in various conditions."""
+
+ def test_return_true_if_test_config_is_not_empty(self):
+ test_module_info = module(test_config=['config_file'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.has_test_config(test_module_info)
+
+ self.assertTrue(return_value)
+
+ def test_return_true_if_auto_test_config_is_not_empty(self):
+ test_module_info = module(auto_test_config=['no_empty'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.has_test_config(test_module_info)
+
+ self.assertTrue(return_value)
+
+ def test_return_false_if_auto_test_config_and_test_config_empty(self):
+ test_module_info = module(test_config=[],
+ auto_test_config=[])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.has_test_config(test_module_info)
+
+ self.assertFalse(return_value)
+
+
+class ModuleInfoCompatibilitySuiteTest(ModuleInfoTestFixture):
+ """Tests the compatibility suite in the module info."""
+
+ def test_return_true_if_suite_in_test(self):
+ test_module_info = module(compatibility_suites=['test_suite'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_suite_in_compatibility_suites(
+ 'test_suite', test_module_info)
+
+ self.assertTrue(return_value)
+
+ def test_return_false_if_suite_not_in_test(self):
+ test_module_info = module(compatibility_suites=['no_suite'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_suite_in_compatibility_suites(
+ 'test_suite', test_module_info)
+
+ self.assertFalse(return_value)
+
+ def test_return_false_when_mod_info_is_empty(self):
+ test_module_info = None
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_suite_in_compatibility_suites(
+ 'test_suite', test_module_info)
+
+ self.assertFalse(return_value)
+
+ def test_return_false_when_mod_info_is_not_a_dict(self):
+ test_module_info = ['no_a_dict']
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_suite_in_compatibility_suites(
+ 'test_suite', test_module_info)
+
+ self.assertFalse(return_value)
+
+
+class RobolectricTestNameTest(ModuleInfoTestFixture):
+ """Tests the Robolectric test name in the module info."""
+
+ def test_return_empty_for_a_modern_robolectric_test(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ modern_robolectric_test_module(name=f'{module_name}'),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual('', return_module)
+
+ def test_return_related_robolectric_run_module_name(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ robolectric_class_test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual(run_module_name, return_module)
+
+ def test_return_empty_when_no_related_robolectic_class_module(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual('', return_module)
+
+ def test_return_empty_if_related_module_name_not_start_with_Run(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Not_Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ robolectric_class_test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual('', return_module)
+
+ def test_return_itself_for_a_robolectric_class_test_module(self):
+ module_name = 'Run_hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_class_test_module(name=f'{module_name}'),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual(module_name, return_module)
+
+ def test_return_empty_if_robolectric_class_module_not_start_with_Run(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_class_test_module(name=f'{module_name}'),
+ ])
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual('', return_module)
+
+ def test_return_0_when_no_mod_info(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info()
+
+ return_module = mod_info.get_robolectric_test_name(module_name)
+
+ self.assertEqual('', return_module)
+
+
+class RobolectricTestTypeTest(ModuleInfoTestFixture):
+ """Tests the Robolectric test type in the module info."""
+
+ def test_modern_robolectric_test_type(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ modern_robolectric_test_module(name=f'{module_name}'),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, constants.ROBOTYPE_MODERN)
+
+ def test_return_modern_if_compliant_with_modern_and_legacy(self):
+ module_name = 'hello_world_test'
+ module_path = 'robolectric_path'
+ run_module_name = f'Run{module_name}'
+ mod_info = self.create_module_info(modules=[
+ modern_robolectric_test_module(name=f'{module_name}',
+ path=module_path),
+ robolectric_class_test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, constants.ROBOTYPE_MODERN)
+
+ def test_not_modern_robolectric_test_if_suite_is_not_robolectric(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ compatibility_suites='not_robolectric_tests'),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, 0)
+
+ def test_legacy_robolectric_test_type(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ robolectric_class_test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, constants.ROBOTYPE_LEGACY)
+
+ def test_robolectric_class_test_module(self):
+ module_name = 'Run_hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_class_test_module(name=f'{module_name}'),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, constants.ROBOTYPE_LEGACY)
+
+ def test_not_robolectric_test_if_module_name_not_start_with_Run(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info(modules=[
+ robolectric_class_test_module(name=f'{module_name}'),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, 0)
+
+ def test_return_0_when_no_related_robolectic_class_module(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, 0)
+
+ def test_return_0_when_no_related_module_name_start_with_Run(self):
+ module_name = 'hello_world_test'
+ run_module_name = f'Not_Run{module_name}'
+ module_path = 'robolectric_path'
+ mod_info = self.create_module_info(modules=[
+ test_module(name=f'{module_name}',
+ path=module_path),
+ robolectric_class_test_module(name=f'{run_module_name}',
+ path=module_path),
+ ])
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, 0)
+
+ def test_return_0_when_no_mod_info(self):
+ module_name = 'hello_world_test'
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.get_robolectric_type(module_name)
+
+ self.assertEqual(return_value, 0)
+
+
+class IsLegacyRobolectricClassTest(ModuleInfoTestFixture):
+ """Tests is_legacy_robolectric_class in various conditions."""
+
+ def test_return_true_if_module_class_is_robolectric(self):
+ test_module_info = module(classes=[constants.MODULE_CLASS_ROBOLECTRIC])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+ self.assertTrue(return_value)
+
+ def test_return_false_if_module_class_is_not_robolectric(self):
+ test_module_info = module(classes=['not_robolectric'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+ self.assertFalse(return_value)
+
+ def test_return_false_if_module_class_is_empty(self):
+ test_module_info = module(classes=[])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+ self.assertFalse(return_value)
+
+
+class IsTestableModuleTest(ModuleInfoTestFixture):
+ """Tests is_testable_module in various conditions."""
+
+ def test_return_true_for_tradefed_testable_module(self):
+ info = test_module()
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertTrue(return_value)
+
+ def test_return_true_for_modern_robolectric_test_module(self):
+ info = modern_robolectric_test_module()
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertTrue(return_value)
+
+ def test_return_true_for_legacy_robolectric_test_module(self):
+ info = legacy_robolectric_test_module()
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertTrue(return_value)
+
+ def test_return_false_for_non_tradefed_testable_module(self):
+ info = module(auto_test_config=[], test_config=[],
+ installed=['installed_path'])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertFalse(return_value)
+
+ def test_return_false_for_no_installed_path_module(self):
+ info = module(auto_test_config=['true'], installed=[])
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertFalse(return_value)
+
+ def test_return_false_if_module_info_is_empty(self):
+ info = {}
+ mod_info = self.create_module_info()
+
+ return_value = mod_info.is_testable_module(info)
+
+ self.assertFalse(return_value)
+
+
+@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
+def create_empty_module_info():
+ with fake_filesystem_unittest.Patcher() as patcher:
+ # pylint: disable=protected-access
+ fake_temp_file_name = next(tempfile._get_candidate_names())
+ patcher.fs.create_file(fake_temp_file_name, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+
+def create_module_info(modules=None):
+ mod_info = create_empty_module_info()
+ modules = modules or []
+
+ for m in modules:
+ mod_info.name_to_module_info[m['module_name']] = m
+
+ return mod_info
+
+
+def test_module(**kwargs):
+ kwargs.setdefault('name', 'hello_world_test')
+ return test(module(**kwargs))
+
+
+def modern_robolectric_test_module(**kwargs):
+ kwargs.setdefault('name', 'hello_world_test')
+ return test(robolectric_tests_suite(module(**kwargs)))
+
+
+def legacy_robolectric_test_module(**kwargs):
+ kwargs.setdefault('name', 'Run_hello_world_test')
+ return test(robolectric_tests_suite(module(**kwargs)))
+
+
+def robolectric_class_test_module(**kwargs):
+ kwargs.setdefault('name', 'hello_world_test')
+ return test(robolectric_class(module(**kwargs)))
+
+
+# pylint: disable=too-many-arguments, too-many-locals
+def module(
+ name=None,
+ path=None,
+ installed=None,
+ classes=None,
+ auto_test_config=None,
+ test_config=None,
+ shared_libs=None,
+ dependencies=None,
+ runtime_dependencies=None,
+ data=None,
+ data_dependencies=None,
+ compatibility_suites=None,
+ host_dependencies=None,
+ srcs=None,
+ supported_variants=None
+):
+ name = name or 'libhello'
+
+ m = {}
+
+ m['module_name'] = name
+ m['class'] = classes or ['ETC']
+ m['path'] = [path or '']
+ m['installed'] = installed or []
+ m['is_unit_test'] = 'false'
+ m['auto_test_config'] = auto_test_config or []
+ m['test_config'] = test_config or []
+ m['shared_libs'] = shared_libs or []
+ m['runtime_dependencies'] = runtime_dependencies or []
+ m['dependencies'] = dependencies or []
+ m['data'] = data or []
+ m['data_dependencies'] = data_dependencies or []
+ m['compatibility_suites'] = compatibility_suites or []
+ m['host_dependencies'] = host_dependencies or []
+ m['srcs'] = srcs or []
+ m['supported_variants'] = supported_variants or []
+ return m
+
+
+def test(info):
+ info['auto_test_config'] = ['true']
+ info['installed'] = ['installed_path']
+ return info
+
+
+def robolectric_class(info):
+ info['class'] = ['ROBOLECTRIC']
+ return info
+
+
+def robolectric_tests_suite(info):
+ info = test(info)
+ info.setdefault('compatibility_suites', []).append('robolectric-tests')
+ return info
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/atest/profiler.py b/atest/profiler.py
new file mode 100644
index 0000000..ac742bc
--- /dev/null
+++ b/atest/profiler.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+# Copyright 2022, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script of Atest Profiler."""
+
+import argparse
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+# This is mostly a copy of Soong's stub_template_host.txt, but with changes to
+# run soong python executables via a profiler tool. This is just a hack, soong
+# should really have the ability to build profiled binaries directly.
+
+def main():
+ """Main method that runs python profilers."""
+ parser = argparse.ArgumentParser(
+ description='Runs a soong-built python binary under a profiler.')
+ parser.add_argument('profiler', choices = ["pyinstrument", "cProfile"],
+ help='The profiler to use')
+ parser.add_argument('profile_file', help="The output file of the profiler")
+ parser.add_argument(
+ 'executable',
+ help="The soong-built python binary (with embedded_launcher: false)")
+ args, args_for_executable = parser.parse_known_args()
+
+ if not os.path.isfile(args.executable):
+ sys.exit(f"{args.executable}: File not found")
+ os.makedirs(os.path.dirname(args.profile_file), exist_ok=True)
+
+ runfiles_path = tempfile.mkdtemp(prefix="Soong.python_")
+ try:
+ _zf = zipfile.ZipFile(args.executable)
+ _zf.extractall(runfiles_path)
+ _zf.close()
+
+ sys.exit(subprocess.call([
+ "python3",
+ "-m", args.profiler,
+ "-o", args.profile_file,
+ os.path.join(runfiles_path,
+ "__soong_entrypoint_redirector__.py")
+ ] + args_for_executable, close_fds=False))
+
+ finally:
+ shutil.rmtree(runfiles_path, ignore_errors=True)
+
+if __name__ == '__main__':
+ main()
diff --git a/atest/proto/__init__.py b/atest/proto/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/proto/__init__.py
+++ /dev/null
diff --git a/atest/proto/clientanalytics_pb2.py b/atest/proto/clientanalytics_pb2.py
deleted file mode 100644
index b58dcc7..0000000
--- a/atest/proto/clientanalytics_pb2.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/clientanalytics.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/clientanalytics.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x1bproto/clientanalytics.proto\"y\n\nLogRequest\x12 \n\x0b\x63lient_info\x18\x01 \x01(\x0b\x32\x0b.ClientInfo\x12\x12\n\nlog_source\x18\x02 \x01(\x05\x12\x17\n\x0frequest_time_ms\x18\x04 \x01(\x03\x12\x1c\n\tlog_event\x18\x03 \x03(\x0b\x32\t.LogEvent\"!\n\nClientInfo\x12\x13\n\x0b\x63lient_type\x18\x01 \x01(\x05\"/\n\x0bLogResponse\x12 \n\x18next_request_wait_millis\x18\x01 \x01(\x03\";\n\x08LogEvent\x12\x15\n\revent_time_ms\x18\x01 \x01(\x03\x12\x18\n\x10source_extension\x18\x06 \x01(\x0c')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_LOGREQUEST = _descriptor.Descriptor(
- name='LogRequest',
- full_name='LogRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='client_info', full_name='LogRequest.client_info', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='log_source', full_name='LogRequest.log_source', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='request_time_ms', full_name='LogRequest.request_time_ms', index=2,
- number=4, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='log_event', full_name='LogRequest.log_event', index=3,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=31,
- serialized_end=152,
-)
-
-
-_CLIENTINFO = _descriptor.Descriptor(
- name='ClientInfo',
- full_name='ClientInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='client_type', full_name='ClientInfo.client_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=154,
- serialized_end=187,
-)
-
-
-_LOGRESPONSE = _descriptor.Descriptor(
- name='LogResponse',
- full_name='LogResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='next_request_wait_millis', full_name='LogResponse.next_request_wait_millis', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=189,
- serialized_end=236,
-)
-
-
-_LOGEVENT = _descriptor.Descriptor(
- name='LogEvent',
- full_name='LogEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='event_time_ms', full_name='LogEvent.event_time_ms', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='source_extension', full_name='LogEvent.source_extension', index=1,
- number=6, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=_b(""),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=238,
- serialized_end=297,
-)
-
-_LOGREQUEST.fields_by_name['client_info'].message_type = _CLIENTINFO
-_LOGREQUEST.fields_by_name['log_event'].message_type = _LOGEVENT
-DESCRIPTOR.message_types_by_name['LogRequest'] = _LOGREQUEST
-DESCRIPTOR.message_types_by_name['ClientInfo'] = _CLIENTINFO
-DESCRIPTOR.message_types_by_name['LogResponse'] = _LOGRESPONSE
-DESCRIPTOR.message_types_by_name['LogEvent'] = _LOGEVENT
-
-LogRequest = _reflection.GeneratedProtocolMessageType('LogRequest', (_message.Message,), dict(
- DESCRIPTOR = _LOGREQUEST,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogRequest)
- ))
-_sym_db.RegisterMessage(LogRequest)
-
-ClientInfo = _reflection.GeneratedProtocolMessageType('ClientInfo', (_message.Message,), dict(
- DESCRIPTOR = _CLIENTINFO,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:ClientInfo)
- ))
-_sym_db.RegisterMessage(ClientInfo)
-
-LogResponse = _reflection.GeneratedProtocolMessageType('LogResponse', (_message.Message,), dict(
- DESCRIPTOR = _LOGRESPONSE,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogResponse)
- ))
-_sym_db.RegisterMessage(LogResponse)
-
-LogEvent = _reflection.GeneratedProtocolMessageType('LogEvent', (_message.Message,), dict(
- DESCRIPTOR = _LOGEVENT,
- __module__ = 'proto.clientanalytics_pb2'
- # @@protoc_insertion_point(class_scope:LogEvent)
- ))
-_sym_db.RegisterMessage(LogEvent)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/proto/common_pb2.py b/atest/proto/common_pb2.py
deleted file mode 100644
index 5b7bd2e..0000000
--- a/atest/proto/common_pb2.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/common.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/common.proto',
- package='',
- syntax='proto2',
- serialized_pb=_b('\n\x12proto/common.proto\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x02(\x03\x12\r\n\x05nanos\x18\x02 \x02(\x05*$\n\x08UserType\x12\n\n\x06GOOGLE\x10\x00\x12\x0c\n\x08\x45XTERNAL\x10\x01')
-)
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-_USERTYPE = _descriptor.EnumDescriptor(
- name='UserType',
- full_name='UserType',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='GOOGLE', index=0, number=0,
- options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='EXTERNAL', index=1, number=1,
- options=None,
- type=None),
- ],
- containing_type=None,
- options=None,
- serialized_start=66,
- serialized_end=102,
-)
-_sym_db.RegisterEnumDescriptor(_USERTYPE)
-
-UserType = enum_type_wrapper.EnumTypeWrapper(_USERTYPE)
-GOOGLE = 0
-EXTERNAL = 1
-
-
-
-_DURATION = _descriptor.Descriptor(
- name='Duration',
- full_name='Duration',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='seconds', full_name='Duration.seconds', index=0,
- number=1, type=3, cpp_type=2, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- _descriptor.FieldDescriptor(
- name='nanos', full_name='Duration.nanos', index=1,
- number=2, type=5, cpp_type=1, label=2,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- options=None),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=22,
- serialized_end=64,
-)
-
-DESCRIPTOR.message_types_by_name['Duration'] = _DURATION
-DESCRIPTOR.enum_types_by_name['UserType'] = _USERTYPE
-
-Duration = _reflection.GeneratedProtocolMessageType('Duration', (_message.Message,), dict(
- DESCRIPTOR = _DURATION,
- __module__ = 'proto.common_pb2'
- # @@protoc_insertion_point(class_scope:Duration)
- ))
-_sym_db.RegisterMessage(Duration)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/proto/external_user_log.proto b/atest/proto/external_user_log.proto
index 2e340e1..4332947 100644
--- a/atest/proto/external_user_log.proto
+++ b/atest/proto/external_user_log.proto
@@ -1,6 +1,6 @@
syntax = "proto2";
-import "proto/common.proto";
+import "atest/proto/common.proto";
option java_package = "com.android.asuite.clearcut";
diff --git a/atest/proto/external_user_log_pb2.py b/atest/proto/external_user_log_pb2.py
deleted file mode 100644
index 9ac9039..0000000
--- a/atest/proto/external_user_log_pb2.py
+++ /dev/null
@@ -1,505 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/external_user_log.proto
-
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from proto import common_pb2 as proto_dot_common__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/external_user_log.proto',
- package='',
- syntax='proto2',
- serialized_options=b'\n\033com.android.asuite.clearcut',
- create_key=_descriptor._internal_create_key,
- serialized_pb=b'\n\x1dproto/external_user_log.proto\x1a\x12proto/common.proto\"\xa6\x08\n\x15\x41testLogEventExternal\x12\x10\n\x08user_key\x18\x01 \x01(\t\x12\x0e\n\x06run_id\x18\x02 \x01(\t\x12\x1c\n\tuser_type\x18\x03 \x01(\x0e\x32\t.UserType\x12\x11\n\ttool_name\x18\n \x01(\t\x12\x15\n\rsub_tool_name\x18\x0c \x01(\t\x12\x43\n\x11\x61test_start_event\x18\x04 \x01(\x0b\x32&.AtestLogEventExternal.AtestStartEventH\x00\x12\x41\n\x10\x61test_exit_event\x18\x05 \x01(\x0b\x32%.AtestLogEventExternal.AtestExitEventH\x00\x12L\n\x16\x66ind_test_finish_event\x18\x06 \x01(\x0b\x32*.AtestLogEventExternal.FindTestFinishEventH\x00\x12\x45\n\x12\x62uild_finish_event\x18\x07 \x01(\x0b\x32\'.AtestLogEventExternal.BuildFinishEventH\x00\x12G\n\x13runner_finish_event\x18\x08 \x01(\x0b\x32(.AtestLogEventExternal.RunnerFinishEventH\x00\x12L\n\x16run_tests_finish_event\x18\t \x01(\x0b\x32*.AtestLogEventExternal.RunTestsFinishEventH\x00\x12\x45\n\x12local_detect_event\x18\x0b \x01(\x0b\x32\'.AtestLogEventExternal.LocalDetectEventH\x00\x1a\x11\n\x0f\x41testStartEvent\x1a@\n\x0e\x41testExitEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x11\n\texit_code\x18\x02 \x01(\x05\x1a\x43\n\x13\x46indTestFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x1a@\n\x10\x42uildFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x1aV\n\x11RunnerFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x13\n\x0brunner_name\x18\x03 \x01(\t\x1a\x32\n\x13RunTestsFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x1a\x37\n\x10LocalDetectEvent\x12\x13\n\x0b\x64\x65tect_type\x18\x01 \x01(\x05\x12\x0e\n\x06result\x18\x02 \x01(\x05\x42\x07\n\x05\x65ventB\x1d\n\x1b\x63om.android.asuite.clearcut'
- ,
- dependencies=[proto_dot_common__pb2.DESCRIPTOR,])
-
-
-
-
-_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT = _descriptor.Descriptor(
- name='AtestStartEvent',
- full_name='AtestLogEventExternal.AtestStartEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=692,
- serialized_end=709,
-)
-
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT = _descriptor.Descriptor(
- name='AtestExitEvent',
- full_name='AtestLogEventExternal.AtestExitEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.AtestExitEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='exit_code', full_name='AtestLogEventExternal.AtestExitEvent.exit_code', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=711,
- serialized_end=775,
-)
-
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT = _descriptor.Descriptor(
- name='FindTestFinishEvent',
- full_name='AtestLogEventExternal.FindTestFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.FindTestFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.FindTestFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=777,
- serialized_end=844,
-)
-
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT = _descriptor.Descriptor(
- name='BuildFinishEvent',
- full_name='AtestLogEventExternal.BuildFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.BuildFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.BuildFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=846,
- serialized_end=910,
-)
-
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT = _descriptor.Descriptor(
- name='RunnerFinishEvent',
- full_name='AtestLogEventExternal.RunnerFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.RunnerFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventExternal.RunnerFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='runner_name', full_name='AtestLogEventExternal.RunnerFinishEvent.runner_name', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=912,
- serialized_end=998,
-)
-
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT = _descriptor.Descriptor(
- name='RunTestsFinishEvent',
- full_name='AtestLogEventExternal.RunTestsFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventExternal.RunTestsFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1000,
- serialized_end=1050,
-)
-
-_ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT = _descriptor.Descriptor(
- name='LocalDetectEvent',
- full_name='AtestLogEventExternal.LocalDetectEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='detect_type', full_name='AtestLogEventExternal.LocalDetectEvent.detect_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventExternal.LocalDetectEvent.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1052,
- serialized_end=1107,
-)
-
-_ATESTLOGEVENTEXTERNAL = _descriptor.Descriptor(
- name='AtestLogEventExternal',
- full_name='AtestLogEventExternal',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='user_key', full_name='AtestLogEventExternal.user_key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='run_id', full_name='AtestLogEventExternal.run_id', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='user_type', full_name='AtestLogEventExternal.user_type', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='tool_name', full_name='AtestLogEventExternal.tool_name', index=3,
- number=10, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='sub_tool_name', full_name='AtestLogEventExternal.sub_tool_name', index=4,
- number=12, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='atest_start_event', full_name='AtestLogEventExternal.atest_start_event', index=5,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='atest_exit_event', full_name='AtestLogEventExternal.atest_exit_event', index=6,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='find_test_finish_event', full_name='AtestLogEventExternal.find_test_finish_event', index=7,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='build_finish_event', full_name='AtestLogEventExternal.build_finish_event', index=8,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='runner_finish_event', full_name='AtestLogEventExternal.runner_finish_event', index=9,
- number=8, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='run_tests_finish_event', full_name='AtestLogEventExternal.run_tests_finish_event', index=10,
- number=9, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='local_detect_event', full_name='AtestLogEventExternal.local_detect_event', index=11,
- number=11, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT, _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT, _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT, _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT, _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT, _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT, _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='event', full_name='AtestLogEventExternal.event',
- index=0, containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[]),
- ],
- serialized_start=54,
- serialized_end=1116,
-)
-
-_ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT.containing_type = _ATESTLOGEVENTEXTERNAL
-_ATESTLOGEVENTEXTERNAL.fields_by_name['user_type'].enum_type = proto_dot_common__pb2._USERTYPE
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'].message_type = _ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'].message_type = _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'].message_type = _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT
-_ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'].message_type = _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_start_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['atest_exit_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['find_test_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['build_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['runner_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['run_tests_finish_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTEXTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'])
-_ATESTLOGEVENTEXTERNAL.fields_by_name['local_detect_event'].containing_oneof = _ATESTLOGEVENTEXTERNAL.oneofs_by_name['event']
-DESCRIPTOR.message_types_by_name['AtestLogEventExternal'] = _ATESTLOGEVENTEXTERNAL
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-AtestLogEventExternal = _reflection.GeneratedProtocolMessageType('AtestLogEventExternal', (_message.Message,), {
-
- 'AtestStartEvent' : _reflection.GeneratedProtocolMessageType('AtestStartEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_ATESTSTARTEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.AtestStartEvent)
- })
- ,
-
- 'AtestExitEvent' : _reflection.GeneratedProtocolMessageType('AtestExitEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_ATESTEXITEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.AtestExitEvent)
- })
- ,
-
- 'FindTestFinishEvent' : _reflection.GeneratedProtocolMessageType('FindTestFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_FINDTESTFINISHEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.FindTestFinishEvent)
- })
- ,
-
- 'BuildFinishEvent' : _reflection.GeneratedProtocolMessageType('BuildFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_BUILDFINISHEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.BuildFinishEvent)
- })
- ,
-
- 'RunnerFinishEvent' : _reflection.GeneratedProtocolMessageType('RunnerFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_RUNNERFINISHEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.RunnerFinishEvent)
- })
- ,
-
- 'RunTestsFinishEvent' : _reflection.GeneratedProtocolMessageType('RunTestsFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_RUNTESTSFINISHEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.RunTestsFinishEvent)
- })
- ,
-
- 'LocalDetectEvent' : _reflection.GeneratedProtocolMessageType('LocalDetectEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL_LOCALDETECTEVENT,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal.LocalDetectEvent)
- })
- ,
- 'DESCRIPTOR' : _ATESTLOGEVENTEXTERNAL,
- '__module__' : 'proto.external_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventExternal)
- })
-_sym_db.RegisterMessage(AtestLogEventExternal)
-_sym_db.RegisterMessage(AtestLogEventExternal.AtestStartEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.AtestExitEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.FindTestFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.BuildFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.RunnerFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.RunTestsFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventExternal.LocalDetectEvent)
-
-
-DESCRIPTOR._options = None
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/proto/file_md5.proto b/atest/proto/file_md5.proto
new file mode 100644
index 0000000..b3be4ae
--- /dev/null
+++ b/atest/proto/file_md5.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+enum RootType {
+ SRC_ROOT = 0;
+ RESOURCE_ROOT = 1;
+ ABS_PATH = 2;
+ PRODUCT_OUT = 3;
+}
+
+message FileChecksum {
+ RootType root_type = 1;
+ string rel_path = 2;
+ string md5sum = 3;
+}
+
+message FileChecksumList {
+ repeated FileChecksum file_checksums = 1;
+}
diff --git a/atest/proto/internal_user_log.proto b/atest/proto/internal_user_log.proto
index deb064a..3f935c5 100644
--- a/atest/proto/internal_user_log.proto
+++ b/atest/proto/internal_user_log.proto
@@ -1,6 +1,6 @@
syntax = "proto2";
-import "proto/common.proto";
+import "atest/proto/common.proto";
option java_package = "com.android.asuite.clearcut";
diff --git a/atest/proto/internal_user_log_pb2.py b/atest/proto/internal_user_log_pb2.py
deleted file mode 100644
index c27b0e1..0000000
--- a/atest/proto/internal_user_log_pb2.py
+++ /dev/null
@@ -1,637 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: skip-file
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: proto/internal_user_log.proto
-
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from proto import common_pb2 as proto_dot_common__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='proto/internal_user_log.proto',
- package='',
- syntax='proto2',
- serialized_options=b'\n\033com.android.asuite.clearcut',
- create_key=_descriptor._internal_create_key,
- serialized_pb=b'\n\x1dproto/internal_user_log.proto\x1a\x12proto/common.proto\"\xdb\n\n\x15\x41testLogEventInternal\x12\x10\n\x08user_key\x18\x01 \x01(\t\x12\x0e\n\x06run_id\x18\x02 \x01(\t\x12\x1c\n\tuser_type\x18\x03 \x01(\x0e\x32\t.UserType\x12\x11\n\ttool_name\x18\n \x01(\t\x12\x15\n\rsub_tool_name\x18\x0c \x01(\t\x12\x43\n\x11\x61test_start_event\x18\x04 \x01(\x0b\x32&.AtestLogEventInternal.AtestStartEventH\x00\x12\x41\n\x10\x61test_exit_event\x18\x05 \x01(\x0b\x32%.AtestLogEventInternal.AtestExitEventH\x00\x12L\n\x16\x66ind_test_finish_event\x18\x06 \x01(\x0b\x32*.AtestLogEventInternal.FindTestFinishEventH\x00\x12\x45\n\x12\x62uild_finish_event\x18\x07 \x01(\x0b\x32\'.AtestLogEventInternal.BuildFinishEventH\x00\x12G\n\x13runner_finish_event\x18\x08 \x01(\x0b\x32(.AtestLogEventInternal.RunnerFinishEventH\x00\x12L\n\x16run_tests_finish_event\x18\t \x01(\x0b\x32*.AtestLogEventInternal.RunTestsFinishEventH\x00\x12\x45\n\x12local_detect_event\x18\x0b \x01(\x0b\x32\'.AtestLogEventInternal.LocalDetectEventH\x00\x1aY\n\x0f\x41testStartEvent\x12\x14\n\x0c\x63ommand_line\x18\x01 \x01(\t\x12\x17\n\x0ftest_references\x18\x02 \x03(\t\x12\x0b\n\x03\x63wd\x18\x03 \x01(\t\x12\n\n\x02os\x18\x04 \x01(\t\x1a\x62\n\x0e\x41testExitEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x11\n\texit_code\x18\x02 \x01(\x05\x12\x12\n\nstacktrace\x18\x03 \x01(\t\x12\x0c\n\x04logs\x18\x04 \x01(\t\x1a\x84\x01\n\x13\x46indTestFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x16\n\x0etest_reference\x18\x03 \x01(\t\x12\x14\n\x0ctest_finders\x18\x04 \x03(\t\x12\x11\n\ttest_info\x18\x05 \x01(\t\x1aQ\n\x10\x42uildFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07targets\x18\x03 \x03(\t\x1a\xcd\x01\n\x11RunnerFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x13\n\x0brunner_name\x18\x03 \x01(\t\x12;\n\x04test\x18\x04 \x03(\x0b\x32-.AtestLogEventInternal.RunnerFinishEvent.Test\x1a\x38\n\x04Test\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06result\x18\x02 \x01(\x05\x12\x12\n\nstacktrace\x18\x03 \x01(\t\x1a\x32\n\x13RunTestsFinishEvent\x12\x1b\n\x08\x64uration\x18\x01 \x01(\x0b\x32\t.Duration\x1a\x37\n\x10LocalDetectEvent\x12\x13\n\x0b\x64\x65tect_type\x18\x01 \x01(\x05\x12\x0e\n\x06result\x18\x02 \x01(\x05\x42\x07\n\x05\x65ventB\x1d\n\x1b\x63om.android.asuite.clearcut'
- ,
- dependencies=[proto_dot_common__pb2.DESCRIPTOR,])
-
-
-
-
-_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT = _descriptor.Descriptor(
- name='AtestStartEvent',
- full_name='AtestLogEventInternal.AtestStartEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='command_line', full_name='AtestLogEventInternal.AtestStartEvent.command_line', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='test_references', full_name='AtestLogEventInternal.AtestStartEvent.test_references', index=1,
- number=2, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='cwd', full_name='AtestLogEventInternal.AtestStartEvent.cwd', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='os', full_name='AtestLogEventInternal.AtestStartEvent.os', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=692,
- serialized_end=781,
-)
-
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT = _descriptor.Descriptor(
- name='AtestExitEvent',
- full_name='AtestLogEventInternal.AtestExitEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.AtestExitEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='exit_code', full_name='AtestLogEventInternal.AtestExitEvent.exit_code', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='stacktrace', full_name='AtestLogEventInternal.AtestExitEvent.stacktrace', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='logs', full_name='AtestLogEventInternal.AtestExitEvent.logs', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=783,
- serialized_end=881,
-)
-
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT = _descriptor.Descriptor(
- name='FindTestFinishEvent',
- full_name='AtestLogEventInternal.FindTestFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.FindTestFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.FindTestFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='test_reference', full_name='AtestLogEventInternal.FindTestFinishEvent.test_reference', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='test_finders', full_name='AtestLogEventInternal.FindTestFinishEvent.test_finders', index=3,
- number=4, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='test_info', full_name='AtestLogEventInternal.FindTestFinishEvent.test_info', index=4,
- number=5, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=884,
- serialized_end=1016,
-)
-
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT = _descriptor.Descriptor(
- name='BuildFinishEvent',
- full_name='AtestLogEventInternal.BuildFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.BuildFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.BuildFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='targets', full_name='AtestLogEventInternal.BuildFinishEvent.targets', index=2,
- number=3, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1018,
- serialized_end=1099,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST = _descriptor.Descriptor(
- name='Test',
- full_name='AtestLogEventInternal.RunnerFinishEvent.Test',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='stacktrace', full_name='AtestLogEventInternal.RunnerFinishEvent.Test.stacktrace', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1251,
- serialized_end=1307,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT = _descriptor.Descriptor(
- name='RunnerFinishEvent',
- full_name='AtestLogEventInternal.RunnerFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.RunnerFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='success', full_name='AtestLogEventInternal.RunnerFinishEvent.success', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='runner_name', full_name='AtestLogEventInternal.RunnerFinishEvent.runner_name', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='test', full_name='AtestLogEventInternal.RunnerFinishEvent.test', index=3,
- number=4, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1102,
- serialized_end=1307,
-)
-
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT = _descriptor.Descriptor(
- name='RunTestsFinishEvent',
- full_name='AtestLogEventInternal.RunTestsFinishEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='duration', full_name='AtestLogEventInternal.RunTestsFinishEvent.duration', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1309,
- serialized_end=1359,
-)
-
-_ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT = _descriptor.Descriptor(
- name='LocalDetectEvent',
- full_name='AtestLogEventInternal.LocalDetectEvent',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='detect_type', full_name='AtestLogEventInternal.LocalDetectEvent.detect_type', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='result', full_name='AtestLogEventInternal.LocalDetectEvent.result', index=1,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1361,
- serialized_end=1416,
-)
-
-_ATESTLOGEVENTINTERNAL = _descriptor.Descriptor(
- name='AtestLogEventInternal',
- full_name='AtestLogEventInternal',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name='user_key', full_name='AtestLogEventInternal.user_key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='run_id', full_name='AtestLogEventInternal.run_id', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='user_type', full_name='AtestLogEventInternal.user_type', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='tool_name', full_name='AtestLogEventInternal.tool_name', index=3,
- number=10, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='sub_tool_name', full_name='AtestLogEventInternal.sub_tool_name', index=4,
- number=12, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='atest_start_event', full_name='AtestLogEventInternal.atest_start_event', index=5,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='atest_exit_event', full_name='AtestLogEventInternal.atest_exit_event', index=6,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='find_test_finish_event', full_name='AtestLogEventInternal.find_test_finish_event', index=7,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='build_finish_event', full_name='AtestLogEventInternal.build_finish_event', index=8,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='runner_finish_event', full_name='AtestLogEventInternal.runner_finish_event', index=9,
- number=8, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='run_tests_finish_event', full_name='AtestLogEventInternal.run_tests_finish_event', index=10,
- number=9, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- _descriptor.FieldDescriptor(
- name='local_detect_event', full_name='AtestLogEventInternal.local_detect_event', index=11,
- number=11, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
- ],
- extensions=[
- ],
- nested_types=[_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT, _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT, _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT, _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT, _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT, _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT, _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto2',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='event', full_name='AtestLogEventInternal.event',
- index=0, containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[]),
- ],
- serialized_start=54,
- serialized_end=1425,
-)
-
-_ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_ATESTEXITEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST.containing_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.fields_by_name['test'].message_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST
-_ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT.fields_by_name['duration'].message_type = proto_dot_common__pb2._DURATION
-_ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT.containing_type = _ATESTLOGEVENTINTERNAL
-_ATESTLOGEVENTINTERNAL.fields_by_name['user_type'].enum_type = proto_dot_common__pb2._USERTYPE
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'].message_type = _ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'].message_type = _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'].message_type = _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT
-_ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'].message_type = _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_start_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['atest_exit_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['find_test_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['build_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['runner_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['run_tests_finish_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-_ATESTLOGEVENTINTERNAL.oneofs_by_name['event'].fields.append(
- _ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'])
-_ATESTLOGEVENTINTERNAL.fields_by_name['local_detect_event'].containing_oneof = _ATESTLOGEVENTINTERNAL.oneofs_by_name['event']
-DESCRIPTOR.message_types_by_name['AtestLogEventInternal'] = _ATESTLOGEVENTINTERNAL
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-AtestLogEventInternal = _reflection.GeneratedProtocolMessageType('AtestLogEventInternal', (_message.Message,), {
-
- 'AtestStartEvent' : _reflection.GeneratedProtocolMessageType('AtestStartEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_ATESTSTARTEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.AtestStartEvent)
- })
- ,
-
- 'AtestExitEvent' : _reflection.GeneratedProtocolMessageType('AtestExitEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_ATESTEXITEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.AtestExitEvent)
- })
- ,
-
- 'FindTestFinishEvent' : _reflection.GeneratedProtocolMessageType('FindTestFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_FINDTESTFINISHEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.FindTestFinishEvent)
- })
- ,
-
- 'BuildFinishEvent' : _reflection.GeneratedProtocolMessageType('BuildFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_BUILDFINISHEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.BuildFinishEvent)
- })
- ,
-
- 'RunnerFinishEvent' : _reflection.GeneratedProtocolMessageType('RunnerFinishEvent', (_message.Message,), {
-
- 'Test' : _reflection.GeneratedProtocolMessageType('Test', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT_TEST,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunnerFinishEvent.Test)
- })
- ,
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_RUNNERFINISHEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunnerFinishEvent)
- })
- ,
-
- 'RunTestsFinishEvent' : _reflection.GeneratedProtocolMessageType('RunTestsFinishEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_RUNTESTSFINISHEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.RunTestsFinishEvent)
- })
- ,
-
- 'LocalDetectEvent' : _reflection.GeneratedProtocolMessageType('LocalDetectEvent', (_message.Message,), {
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL_LOCALDETECTEVENT,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal.LocalDetectEvent)
- })
- ,
- 'DESCRIPTOR' : _ATESTLOGEVENTINTERNAL,
- '__module__' : 'proto.internal_user_log_pb2'
- # @@protoc_insertion_point(class_scope:AtestLogEventInternal)
- })
-_sym_db.RegisterMessage(AtestLogEventInternal)
-_sym_db.RegisterMessage(AtestLogEventInternal.AtestStartEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.AtestExitEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.FindTestFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.BuildFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunnerFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunnerFinishEvent.Test)
-_sym_db.RegisterMessage(AtestLogEventInternal.RunTestsFinishEvent)
-_sym_db.RegisterMessage(AtestLogEventInternal.LocalDetectEvent)
-
-
-DESCRIPTOR._options = None
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/res/config/template/atest_local_min.xml b/atest/res/config/template/atest_local_min.xml
index bf6550d..fc86404 100644
--- a/atest/res/config/template/atest_local_min.xml
+++ b/atest/res/config/template/atest_local_min.xml
@@ -25,6 +25,8 @@
<template-include name="log_saver" default="empty" />
+ <system_checker class="com.android.tradefed.suite.checker.DeviceBaselineChecker" />
+
<logger class="com.android.tradefed.log.FileLogger">
<option name="log-level" value="VERBOSE" />
<option name="log-level-display" value="VERBOSE" />
diff --git a/atest/result_reporter.py b/atest/result_reporter.py
index 5cbe696..00c13ac 100644
--- a/atest/result_reporter.py
+++ b/atest/result_reporter.py
@@ -72,12 +72,12 @@
from collections import OrderedDict
-import constants
-import atest_configs
-import atest_utils as au
+from atest import constants
+from atest import atest_configs
+from atest import atest_utils as au
-from atest_enum import ExitCode
-from test_runners import test_runner_base
+from atest.atest_enum import ExitCode
+from atest.test_runners import test_runner_base
UNSUPPORTED_FLAG = 'UNSUPPORTED_RUNNER'
FAILURE_FLAG = 'RUNNER_FAILURE'
@@ -373,11 +373,14 @@
tests_ret = ExitCode.SUCCESS
if not self.runners:
return tests_ret
- device_detail = (
- ' (Test executed with {} device(s).)'.format(self.device_count)
- ) if self.device_count else ''
- print('\n{}'.format(au.colorize('Summary{}'.format(device_detail),
- constants.CYAN)))
+ if not self.device_count:
+ device_detail = ''
+ elif self.device_count == 1:
+ device_detail = '(Test executed with 1 device.)'
+ else:
+ device_detail = f'(Test executed with {self.device_count} devices.)'
+ print('\n{}'.format(au.colorize(f'Summary {device_detail}',
+ constants.CYAN)))
print(au.delimiter('-', 7))
iterations = len(ITER_SUMMARY)
for iter_num, summary_list in ITER_SUMMARY.items():
@@ -390,7 +393,7 @@
for runner_name, groups in self.runners.items():
if groups == UNSUPPORTED_FLAG:
print(f'Pretty output does not support {runner_name}. '
- f'See raw output above.')
+ r'See raw output above.')
continue
if groups == FAILURE_FLAG:
tests_ret = ExitCode.TEST_FAILURE
@@ -400,11 +403,10 @@
for group_name, stats in groups.items():
name = group_name if group_name else runner_name
summary = self.process_summary(name, stats)
- if stats.failed > 0:
+ if stats.failed > 0 or stats.run_errors:
tests_ret = ExitCode.TEST_FAILURE
- if stats.run_errors:
- tests_ret = ExitCode.TEST_FAILURE
- failed_sum += 1 if not stats.failed else 0
+ if stats.run_errors:
+ failed_sum += 1 if not stats.failed else 0
if not ITER_SUMMARY:
print(summary)
self.run_stats.perf_info.print_perf_info()
@@ -573,7 +575,7 @@
error_label = au.colorize('(Completed With ERRORS)', constants.RED)
# Only extract host_log_content if test name is tradefed
# Import here to prevent circular-import error.
- from test_runners import atest_tf_test_runner
+ from atest.test_runners import atest_tf_test_runner
if name == atest_tf_test_runner.AtestTradefedTestRunner.NAME:
find_logs = au.find_files(self.log_path,
file_name=constants.TF_HOST_LOG)
@@ -677,7 +679,7 @@
print('%s (%s %s)' % (au.colorize(test.test_run_name,
constants.BLUE),
test.group_total,
- 'Test(s)'))
+ 'Test' if test.group_total == 1 else 'Tests'))
if test.status == test_runner_base.ERROR_STATUS:
print('RUNNER ERROR: %s\n' % test.details)
self.pre_test = test
@@ -705,10 +707,10 @@
print(': {} {}'.format(au.colorize(test.status, color),
test.test_time))
if test.status == test_runner_base.PASSED_STATUS:
- for key, data in test.additional_info.items():
+ for key, data in sorted(test.additional_info.items()):
if key not in BENCHMARK_EVENT_KEYS:
print('\t%s: %s' % (au.colorize(key, constants.BLUE),
data))
if test.status == test_runner_base.FAILED_STATUS:
- print('\nSTACKTRACE:\n%s' % test.details)
+ print(f'\nSTACKTRACE:\n{test.details}')
self.pre_test = test
diff --git a/atest/result_reporter_unittest.py b/atest/result_reporter_unittest.py
index a9aada8..e23ca85 100755
--- a/atest/result_reporter_unittest.py
+++ b/atest/result_reporter_unittest.py
@@ -24,10 +24,10 @@
from io import StringIO
from unittest import mock
-import atest_configs
-import result_reporter
+from atest import atest_configs
+from atest import result_reporter
-from test_runners import test_runner_base
+from atest.test_runners import test_runner_base
RESULT_PASSED_TEST = test_runner_base.TestResult(
diff --git a/atest/test_data/test_commands.json b/atest/test_data/test_commands.json
index 4283ad0..5e41c83 100644
--- a/atest/test_data/test_commands.json
+++ b/atest/test_data/test_commands.json
@@ -167,6 +167,7 @@
],
"HelloWorldTests": [
"--include-filter",
+"--include-filter",
"--log-level",
"--log-level-display",
"--logcat-on-failure",
@@ -179,6 +180,7 @@
"VERBOSE",
"VERBOSE",
"atest_tradefed.sh",
+"hallo-welt",
"log_saver=template/log/atest_log_saver",
"template/atest_local_min",
"test=atest"
@@ -339,10 +341,14 @@
],
"android.os.cts.CompanionDeviceManagerTest": [
"--atest-include-filter",
-"--include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
"--log-level",
"--log-level-display",
"--logcat-on-failure",
+"--module",
"--no-early-device-release",
"--no-enable-granular-attempts",
"--skip-loading-config-jar",
@@ -355,16 +361,23 @@
"VERBOSE",
"atest_tradefed.sh",
"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
"template/atest_local_min",
"test=atest"
],
"android.os.cts.CompanionDeviceManagerTest#testIsDeviceAssociatedWithCompanionApproveWifiConnectionsPermission": [
"--atest-include-filter",
-"--include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
"--log-level",
"--log-level-display",
"--logcat-on-failure",
+"--module",
"--no-early-device-release",
"--no-enable-granular-attempts",
"--skip-loading-config-jar",
@@ -377,7 +390,10 @@
"VERBOSE",
"atest_tradefed.sh",
"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
"template/atest_local_min",
"test=atest"
],
@@ -447,10 +463,14 @@
],
"cts/tests/tests/os/src/android/os/cts/CompanionDeviceManagerTest.kt#testIsDeviceAssociated": [
"--atest-include-filter",
-"--include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
"--log-level",
"--log-level-display",
"--logcat-on-failure",
+"--module",
"--no-early-device-release",
"--no-enable-granular-attempts",
"--skip-loading-config-jar",
@@ -463,7 +483,10 @@
"VERBOSE",
"atest_tradefed.sh",
"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
"template/atest_local_min",
"test=atest"
],
@@ -639,5 +662,25 @@
"log_saver=template/log/atest_log_saver",
"template/atest_local_min",
"test=atest"
+],
+"pts-bot:PAN/GN/MISC/UUID/BV-01-C": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"pts-bot",
+"pts-bot:PAN/GN/MISC/UUID/BV-01-C",
+"template/atest_local_min",
+"test=atest"
]
}
\ No newline at end of file
diff --git a/atest/test_finder_handler.py b/atest/test_finder_handler.py
index c03e401..94f79d9 100644
--- a/atest/test_finder_handler.py
+++ b/atest/test_finder_handler.py
@@ -23,14 +23,16 @@
import inspect
import logging
-import atest_enum
-import constants
+from enum import unique, Enum
-from test_finders import cache_finder
-from test_finders import test_finder_base
-from test_finders import suite_plan_finder
-from test_finders import tf_integration_finder
-from test_finders import module_finder
+from atest import constants
+
+from atest.test_finders import cache_finder
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
+from atest.test_finders import suite_plan_finder
+from atest.test_finders import tf_integration_finder
+from atest.test_finders import module_finder
# List of default test finder classes.
_TEST_FINDERS = {
@@ -40,57 +42,75 @@
cache_finder.CacheFinder,
}
-# Explanation of REFERENCE_TYPEs:
-# ----------------------------------
-# 0. MODULE: LOCAL_MODULE or LOCAL_PACKAGE_NAME value in Android.mk/Android.bp.
-# 1. MAINLINE_MODULE: module[mod1.apk+mod2.apex] pattern in TEST_MAPPING files.
-# 2. CLASS: Names which the same with a ClassName.java/kt file.
-# 3. QUALIFIED_CLASS: String like "a.b.c.ClassName".
-# 4. MODULE_CLASS: Combo of MODULE and CLASS as "module:class".
-# 5. PACKAGE: Package in java file. Same as file path to java file.
-# 6. MODULE_PACKAGE: Combo of MODULE and PACKAGE as "module:package".
-# 7. MODULE_FILE_PATH: File path to dir of tests or test itself.
-# 8. INTEGRATION_FILE_PATH: File path to config xml in one of the 4 integration
-# config directories.
-# 9. INTEGRATION: xml file name in one of the 4 integration config directories.
-# 10. SUITE: Value of the "run-suite-tag" in xml config file in 4 config dirs.
-# Same as value of "test-suite-tag" in AndroidTest.xml files.
-# 11. CC_CLASS: Test case in cc file.
-# 12. SUITE_PLAN: Suite name such as cts.
-# 13. SUITE_PLAN_FILE_PATH: File path to config xml in the suite config
-# directories.
-# 14. CACHE: A pseudo type that runs cache_finder without finding test in real.
-_REFERENCE_TYPE = atest_enum.AtestEnum(['MODULE', 'MAINLINE_MODULE',
- 'CLASS', 'QUALIFIED_CLASS',
- 'MODULE_CLASS', 'PACKAGE',
- 'MODULE_PACKAGE', 'MODULE_FILE_PATH',
- 'INTEGRATION_FILE_PATH', 'INTEGRATION',
- 'SUITE', 'CC_CLASS', 'SUITE_PLAN',
- 'SUITE_PLAN_FILE_PATH', 'CACHE',
- 'CONFIG'])
+@unique
+class FinderMethod(Enum):
+ """An enum object for test finders.
-_REF_TYPE_TO_FUNC_MAP = {
- _REFERENCE_TYPE.MODULE: module_finder.ModuleFinder.find_test_by_module_name,
- _REFERENCE_TYPE.MAINLINE_MODULE: module_finder.MainlineModuleFinder.find_test_by_module_name,
- _REFERENCE_TYPE.CLASS: module_finder.ModuleFinder.find_test_by_class_name,
- _REFERENCE_TYPE.MODULE_CLASS: module_finder.ModuleFinder.find_test_by_module_and_class,
- _REFERENCE_TYPE.QUALIFIED_CLASS: module_finder.ModuleFinder.find_test_by_class_name,
- _REFERENCE_TYPE.PACKAGE: module_finder.ModuleFinder.find_test_by_package_name,
- _REFERENCE_TYPE.MODULE_PACKAGE: module_finder.ModuleFinder.find_test_by_module_and_package,
- _REFERENCE_TYPE.MODULE_FILE_PATH: module_finder.ModuleFinder.find_test_by_path,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH:
- tf_integration_finder.TFIntegrationFinder.find_int_test_by_path,
- _REFERENCE_TYPE.INTEGRATION:
- tf_integration_finder.TFIntegrationFinder.find_test_by_integration_name,
- _REFERENCE_TYPE.CC_CLASS:
- module_finder.ModuleFinder.find_test_by_cc_class_name,
- _REFERENCE_TYPE.SUITE_PLAN:suite_plan_finder.SuitePlanFinder.find_test_by_suite_name,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH:
- suite_plan_finder.SuitePlanFinder.find_test_by_suite_path,
- _REFERENCE_TYPE.CACHE: cache_finder.CacheFinder.find_test_by_cache,
- _REFERENCE_TYPE.CONFIG: module_finder.ModuleFinder.find_test_by_config_name,
-}
+ Explanation of FinderMethod:
+ 0. MODULE: LOCAL_MODULE or LOCAL_PACKAGE_NAME value in Android.mk/Android.bp.
+ 1. MAINLINE_MODULE: module[mod1.apk+mod2.apex] pattern in TEST_MAPPING files.
+ 2. CLASS: Names which the same with a ClassName.java/kt file.
+ 3. QUALIFIED_CLASS: String like "a.b.c.ClassName".
+ 4. MODULE_CLASS: Combo of MODULE and CLASS as "module:class".
+ 5. PACKAGE: Package in java file. Same as file path to java file.
+ 6. MODULE_PACKAGE: Combo of MODULE and PACKAGE as "module:package".
+ 7. MODULE_FILE_PATH: File path to dir of tests or test itself.
+ 8. INTEGRATION_FILE_PATH: File path to config xml in one of the 4 integration
+ config directories.
+ 9. INTEGRATION: xml file name in one of the 4 integration config directories.
+ 10. SUITE: Value of the "run-suite-tag" in xml config file in 4 config dirs.
+ Same as value of "test-suite-tag" in AndroidTest.xml files.
+ 11. CC_CLASS: Test case in cc file.
+ 12. SUITE_PLAN: Suite name such as cts.
+ 13. SUITE_PLAN_FILE_PATH: File path to config xml in the suite config
+ directories.
+ 14. CACHE: A pseudo type that runs cache_finder without finding test in real.
+ 15: CONFIG: Find tests by the given AndroidTest.xml file path.
+ """
+ MODULE = ('MODULE',
+ module_finder.ModuleFinder.find_test_by_module_name)
+ MAINLINE_MODULE = (
+ 'MAINLINE_MODULE',
+ module_finder.MainlineModuleFinder.find_test_by_module_name)
+ CLASS = ('CLASS', module_finder.ModuleFinder.find_test_by_class_name)
+ MODULE_CLASS = (
+ 'MODULE_CLASS',
+ module_finder.ModuleFinder.find_test_by_module_and_class)
+ QUALIFIED_CLASS = (
+ 'QUALIFIED_CLASS', module_finder.ModuleFinder.find_test_by_class_name)
+ PACKAGE = ('PACKAGE', module_finder.ModuleFinder.find_test_by_package_name)
+ MODULE_PACKAGE = (
+ 'MODULE_PACKAGE',
+ module_finder.ModuleFinder.find_test_by_module_and_package)
+ MODULE_FILE_PATH = (
+ 'MODULE_FILE_PATH', module_finder.ModuleFinder.find_test_by_path)
+ INTEGRATION_FILE_PATH = (
+ 'INTEGRATION_FILE_PATH',
+ tf_integration_finder.TFIntegrationFinder.find_int_test_by_path)
+ INTEGRATION = (
+ 'INTEGRATION',
+ tf_integration_finder.TFIntegrationFinder.find_test_by_integration_name)
+ CC_CLASS = ('CC_CLASS',
+ module_finder.ModuleFinder.find_test_by_cc_class_name)
+ SUITE_PLAN = ('SUITE_PLAN',
+ suite_plan_finder.SuitePlanFinder.find_test_by_suite_name)
+ SUITE_PLAN_FILE_PATH = (
+ 'SUITE_PLAN_FILE_PATH',
+ suite_plan_finder.SuitePlanFinder.find_test_by_suite_path)
+ CACHE = ('CACHE', cache_finder.CacheFinder.find_test_by_cache)
+ CONFIG = ('CONFIG', module_finder.ModuleFinder.find_test_by_config_name)
+ def __init__(self, name, method):
+ self._name = name
+ self._method = method
+
+ def get_name(self):
+ """Return finder's name."""
+ return self._name
+
+ def get_method(self):
+ """Return finder's method."""
+ return self._method
def _get_finder_instance_dict(module_info):
"""Return dict of finder instances.
@@ -144,79 +164,81 @@
A list of possible REFERENCE_TYPEs (ints) for reference string.
"""
if ref.startswith('.') or '..' in ref:
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_FILE_PATH,
+ FinderMethod.INTEGRATION_FILE_PATH,
+ FinderMethod.SUITE_PLAN_FILE_PATH]
if '/' in ref:
if ref.startswith('/'):
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_FILE_PATH,
+ FinderMethod.INTEGRATION_FILE_PATH,
+ FinderMethod.SUITE_PLAN_FILE_PATH]
if ':' in ref:
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH,
- _REFERENCE_TYPE.MODULE_CLASS]
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.INTEGRATION,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH,
- _REFERENCE_TYPE.CC_CLASS,
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_FILE_PATH,
+ FinderMethod.INTEGRATION_FILE_PATH,
+ FinderMethod.INTEGRATION,
+ FinderMethod.SUITE_PLAN_FILE_PATH,
+ FinderMethod.MODULE_CLASS]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_FILE_PATH,
+ FinderMethod.INTEGRATION_FILE_PATH,
+ FinderMethod.INTEGRATION,
+ FinderMethod.SUITE_PLAN_FILE_PATH,
+ FinderMethod.CC_CLASS,
# TODO: Uncomment in SUITE when it's supported
- # _REFERENCE_TYPE.SUITE
+ # FinderMethod.SUITE
]
if constants.TEST_WITH_MAINLINE_MODULES_RE.match(ref):
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.MAINLINE_MODULE]
+ return [FinderMethod.CACHE, FinderMethod.MAINLINE_MODULE]
if '.' in ref:
ref_end = ref.rsplit('.', 1)[-1]
ref_end_is_upper = ref_end[0].isupper()
- if ':' in ref:
+ # parse_test_reference() will return none empty dictionary if input test
+ # reference match $module:$package_class.
+ if test_finder_utils.parse_test_reference(ref):
if '.' in ref:
if ref_end_is_upper:
# Module:fully.qualified.Class or Integration:fully.q.Class
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_CLASS,
- _REFERENCE_TYPE.INTEGRATION]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_CLASS,
+ FinderMethod.INTEGRATION]
# Module:some.package
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.MODULE_PACKAGE,
- _REFERENCE_TYPE.MODULE_CLASS]
+ return [FinderMethod.CACHE, FinderMethod.MODULE_PACKAGE,
+ FinderMethod.MODULE_CLASS]
# Module:Class or IntegrationName:Class
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.MODULE_CLASS,
- _REFERENCE_TYPE.INTEGRATION]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE_CLASS,
+ FinderMethod.INTEGRATION]
if '.' in ref:
# The string of ref_end possibly includes specific mathods, e.g.
# foo.java#method, so let ref_end be the first part of splitting '#'.
if "#" in ref_end:
ref_end = ref_end.split('#')[0]
if ref_end in ('java', 'kt', 'bp', 'mk', 'cc', 'cpp'):
- return [_REFERENCE_TYPE.CACHE, _REFERENCE_TYPE.MODULE_FILE_PATH]
+ return [FinderMethod.CACHE, FinderMethod.MODULE_FILE_PATH]
if ref_end == 'xml':
- return [_REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION_FILE_PATH,
- _REFERENCE_TYPE.SUITE_PLAN_FILE_PATH]
+ return [FinderMethod.CACHE,
+ FinderMethod.INTEGRATION_FILE_PATH,
+ FinderMethod.SUITE_PLAN_FILE_PATH]
# (b/207327349) ref_end_is_upper does not guarantee a classname anymore.
- return [_REFERENCE_TYPE.MODULE,
- _REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.QUALIFIED_CLASS,
- _REFERENCE_TYPE.PACKAGE]
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE,
+ FinderMethod.QUALIFIED_CLASS,
+ FinderMethod.PACKAGE]
# Note: We assume that if you're referencing a file in your cwd,
# that file must have a '.' in its name, i.e. foo.java, foo.xml.
# If this ever becomes not the case, then we need to include path below.
- return [_REFERENCE_TYPE.MODULE,
- _REFERENCE_TYPE.CACHE,
- _REFERENCE_TYPE.INTEGRATION,
+ return [FinderMethod.CACHE,
+ FinderMethod.MODULE,
+ FinderMethod.INTEGRATION,
# TODO: Uncomment in SUITE when it's supported
- # _REFERENCE_TYPE.SUITE,
- _REFERENCE_TYPE.CONFIG,
- _REFERENCE_TYPE.SUITE_PLAN,
- _REFERENCE_TYPE.CLASS,
- _REFERENCE_TYPE.CC_CLASS]
+ # FinderMethod.SUITE,
+ FinderMethod.CONFIG,
+ FinderMethod.SUITE_PLAN,
+ FinderMethod.CLASS,
+ FinderMethod.CC_CLASS]
def _get_registered_find_methods(module_info):
@@ -256,12 +278,12 @@
find_methods = []
finder_instance_dict = _get_finder_instance_dict(module_info)
test_ref_types = _get_test_reference_types(test)
- logging.debug('Resolved input to possible references: %s', [
- _REFERENCE_TYPE[t] for t in test_ref_types])
+ logging.debug('Resolved input to possible references: %s', ', '.join([
+ t.get_name() for t in test_ref_types]))
for test_ref_type in test_ref_types:
- find_method = _REF_TYPE_TO_FUNC_MAP[test_ref_type]
+ find_method = test_ref_type.get_method()
finder_instance = finder_instance_dict[inspect._findclass(find_method).NAME]
- finder_info = _REFERENCE_TYPE[test_ref_type]
+ finder_info = test_ref_type.get_name()
find_methods.append(test_finder_base.Finder(finder_instance,
find_method,
finder_info))
diff --git a/atest/test_finder_handler_unittest.py b/atest/test_finder_handler_unittest.py
index 70f4cbb..ba59f43 100755
--- a/atest/test_finder_handler_unittest.py
+++ b/atest/test_finder_handler_unittest.py
@@ -17,24 +17,21 @@
"""Unittests for test_finder_handler."""
# pylint: disable=line-too-long
-
+# pylint: disable=protected-access
import unittest
from unittest import mock
-import atest_error
-import test_finder_handler
+from atest import atest_error
+from atest import test_finder_handler
+from atest.test_finder_handler import FinderMethod as REF_TYPE
+from atest.test_finders import test_info
+from atest.test_finders import test_finder_base
-from test_finders import test_info
-from test_finders import test_finder_base
-
-#pylint: disable=protected-access
-REF_TYPE = test_finder_handler._REFERENCE_TYPE
_EXAMPLE_FINDER_A = 'EXAMPLE_A'
-#pylint: disable=no-self-use
@test_finder_base.find_method_register
class ExampleFinderA(test_finder_base.TestFinderBase):
"""Example finder class A."""
@@ -75,15 +72,15 @@
self.maxDiff = None
self.empty_mod_info = None
# We want to control the finders we return.
- mock.patch('test_finder_handler._get_test_finders',
+ mock.patch('atest.test_finder_handler._get_test_finders',
lambda: _TEST_FINDERS_PATCH).start()
# Since we're going to be comparing instance objects, we'll need to keep
# track of the objects so they align.
- mock.patch('test_finder_handler._get_finder_instance_dict',
+ mock.patch('atest.test_finder_handler._get_finder_instance_dict',
lambda x: _FINDER_INSTANCES).start()
# We want to mock out the default find methods to make sure we got all
# the methods we expect.
- mock.patch('test_finder_handler._get_default_find_methods',
+ mock.patch('atest.test_finder_handler._get_default_find_methods',
lambda x, y: [test_finder_base.Finder(
_FINDER_INSTANCES[_EXAMPLE_FINDER_A],
ExampleFinderA.unregistered_find_method_from_example_finder,
@@ -97,36 +94,36 @@
"""Test _get_test_reference_types parses reference types correctly."""
self.assertEqual(
test_finder_handler._get_test_reference_types('ModuleOrClassName'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE, REF_TYPE.INTEGRATION,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE, REF_TYPE.INTEGRATION,
REF_TYPE.CONFIG, REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS,
REF_TYPE.CC_CLASS]
)
self.assertEqual(
test_finder_handler._get_test_reference_types('Module_or_Class_name'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE, REF_TYPE.INTEGRATION,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE, REF_TYPE.INTEGRATION,
REF_TYPE.CONFIG, REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS,
REF_TYPE.CC_CLASS]
)
self.assertEqual(
test_finder_handler._get_test_reference_types('SuiteName'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE, REF_TYPE.INTEGRATION,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE, REF_TYPE.INTEGRATION,
REF_TYPE.CONFIG, REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS,
REF_TYPE.CC_CLASS]
)
self.assertEqual(
test_finder_handler._get_test_reference_types('Suite-Name'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE, REF_TYPE.INTEGRATION,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE, REF_TYPE.INTEGRATION,
REF_TYPE.CONFIG, REF_TYPE.SUITE_PLAN, REF_TYPE.CLASS,
REF_TYPE.CC_CLASS]
)
self.assertEqual(
test_finder_handler._get_test_reference_types('some.package'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE,
REF_TYPE.QUALIFIED_CLASS, REF_TYPE.PACKAGE]
)
self.assertEqual(
test_finder_handler._get_test_reference_types('fully.q.Class'),
- [REF_TYPE.MODULE, REF_TYPE.CACHE,
+ [REF_TYPE.CACHE, REF_TYPE.MODULE,
REF_TYPE.QUALIFIED_CLASS, REF_TYPE.PACKAGE]
)
self.assertEqual(
diff --git a/atest/test_finders/__init__.py b/atest/test_finders/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/test_finders/__init__.py
+++ /dev/null
diff --git a/atest/test_finders/cache_finder.py b/atest/test_finders/cache_finder.py
index dfa3ca0..7ac2c28 100644
--- a/atest/test_finders/cache_finder.py
+++ b/atest/test_finders/cache_finder.py
@@ -18,11 +18,11 @@
import logging
-import atest_utils
-import constants
+from atest import atest_utils
+from atest import constants
-from test_finders import test_finder_base
-from test_finders import test_info
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_info
class CacheFinder(test_finder_base.TestFinderBase):
"""Cache Finder class."""
diff --git a/atest/test_finders/cache_finder_unittest.py b/atest/test_finders/cache_finder_unittest.py
index 2e09560..6551f83 100755
--- a/atest/test_finders/cache_finder_unittest.py
+++ b/atest/test_finders/cache_finder_unittest.py
@@ -23,13 +23,13 @@
from unittest import mock
-import atest_utils
-import constants
-import module_info
-import unittest_constants as uc
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+from atest import unittest_constants as uc
-from test_finders import cache_finder
-from test_finders import test_info
+from atest.test_finders import cache_finder
+from atest.test_finders import test_info
#pylint: disable=protected-access
diff --git a/atest/test_finders/example_finder.py b/atest/test_finders/example_finder.py
index db53e30..7af9185 100644
--- a/atest/test_finders/example_finder.py
+++ b/atest/test_finders/example_finder.py
@@ -16,9 +16,9 @@
Example Finder class.
"""
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_runners import example_test_runner
+from atest.test_finders import test_info
+from atest.test_finders import test_finder_base
+from atest.test_runners import example_test_runner
@test_finder_base.find_method_register
diff --git a/atest/test_finders/module_finder.py b/atest/test_finders/module_finder.py
index 116cb14..0fd406d 100644
--- a/atest/test_finders/module_finder.py
+++ b/atest/test_finders/module_finder.py
@@ -22,21 +22,21 @@
import os
import time
-import atest_configs
-import atest_error
-import atest_utils
-import constants
+from typing import List
-from atest_enum import DetectType
-from metrics import metrics
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_runners import atest_tf_test_runner
-from test_runners import robolectric_test_runner
-from test_runners import vts_tf_test_runner
+from atest import atest_configs
+from atest import atest_error
+from atest import atest_utils
+from atest import constants
-_ANDROID_MK = 'Android.mk'
+from atest.atest_enum import DetectType
+from atest.metrics import metrics
+from atest.test_finders import test_info
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
+from atest.test_runners import atest_tf_test_runner
+from atest.test_runners import robolectric_test_runner
+from atest.test_runners import vts_tf_test_runner
# These are suites in LOCAL_COMPATIBILITY_SUITE that aren't really suites so
# we can ignore them.
@@ -54,10 +54,11 @@
self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
self.module_info = module_info
- def _determine_testable_module(self, path, file_path=None):
+ def _determine_testable_module(self, path: str,
+ file_path: str = None) -> List:
"""Determine which module the user is trying to test.
- Returns the module to test. If there are multiple possibilities, will
+ Returns the modules to test. If there are multiple possibilities, will
ask the user. Otherwise will return the only module found.
Args:
@@ -72,12 +73,6 @@
testable_modules_no_srcs = []
for mod in self.module_info.get_module_names(path):
mod_info = self.module_info.get_module_info(mod)
- # Robolectric tests always exist in pairs of 2, one module to build
- # the test and another to run it. For now, we are assuming they are
- # isolated in their own folders and will return if we find one.
- if self.module_info.is_robolectric_test(mod):
- # return a list with one module name if it is robolectric.
- return [mod]
if self.module_info.is_testable_module(mod_info):
# If test module defined srcs, input file_path should be defined
# in the src list of module.
@@ -102,7 +97,7 @@
mod_info = self.module_info.get_module_info(module_name)
suites = []
if mod_info:
- suites = mod_info.get('compatibility_suites', [])
+ suites = mod_info.get(constants.MODULE_COMPATIBILITY_SUITES, [])
# Pull out all *ts (cts, tvts, etc) suites.
suites = [suite for suite in suites if suite not in _SUITES_TO_IGNORE]
return len(suites) == 1 and 'vts10' in suites
@@ -140,10 +135,11 @@
vts_xmls |= test_finder_utils.get_plans_from_vts_xml(xml_path)
for config_file in vts_xmls:
# Add in vts10 test build targets.
- test.build_targets |= test_finder_utils.get_targets_from_vts_xml(
- config_file, vts_out_dir, self.module_info)
- test.build_targets.add('vts-test-core')
- test.build_targets.add(test.test_name)
+ for target in test_finder_utils.get_targets_from_vts_xml(
+ config_file, vts_out_dir, self.module_info):
+ test.add_build_target(target)
+ test.add_build_target('vts-test-core')
+ test.add_build_target(test.test_name)
return test
def _update_legacy_robolectric_test_info(self, test):
@@ -166,6 +162,7 @@
test.test_name = self.module_info.get_robolectric_test_name(test.test_name)
return test
+ # pylint: disable=too-many-branches
def _process_test_info(self, test):
"""Process the test info and return some fields updated/changed.
@@ -193,12 +190,29 @@
if test.robo_type:
test.install_locations = {constants.DEVICELESS_TEST}
if test.robo_type == constants.ROBOTYPE_MODERN:
- test.build_targets.add(test.test_name)
+ test.add_build_target(test.test_name)
return test
if test.robo_type == constants.ROBOTYPE_LEGACY:
return self._update_legacy_robolectric_test_info(test)
rel_config = test.data[constants.TI_REL_CONFIG]
- test.build_targets = self._get_build_targets(module_name, rel_config)
+ for target in self._get_build_targets(module_name, rel_config):
+ test.add_build_target(target)
+ # (b/177626045) Probe target APK for running instrumentation tests to
+ # prevent RUNNER ERROR by adding target application(module) to the
+ # build_targets, and install these target apks before testing.
+ artifact_map = self.module_info.get_instrumentation_target_apps(
+ module_name)
+ if artifact_map:
+ logging.debug('Found %s an instrumentation test.', module_name)
+ for art in artifact_map.keys():
+ test.add_build_target(art)
+ logging.debug('Add %s to build targets...',
+ ', '.join(artifact_map.keys()))
+ test.artifacts = [apk for p in artifact_map.values() for apk in p]
+ logging.debug('Will install target APK: %s\n', test.artifacts)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.FOUND_TARGET_ARTIFACTS,
+ result=len(test.artifacts))
# For device side java test, it will use
# com.android.compatibility.testtype.DalvikTest as test runner in
# cts-dalvik-device-test-runner.jar
@@ -206,7 +220,7 @@
if constants.MODULE_CLASS_JAVA_LIBRARIES in test.module_class:
for dalvik_dep in test_finder_utils.DALVIK_TEST_DEPS:
if self.module_info.is_module(dalvik_dep):
- test.build_targets.add(dalvik_dep)
+ test.add_build_target(dalvik_dep)
# Update test name if the test belong to extra config which means it's
# test config name is not the same as module name. For extra config, it
# index will be greater or equal to 1.
@@ -246,11 +260,13 @@
for module_path in self.module_info.get_paths(module_name):
mod_dir = module_path.replace('/', '-')
targets.add(constants.MODULES_IN + mod_dir)
- # (b/156457698) Force add vts_kernel_tests as build target if our test
- # belong to REQUIRED_KERNEL_TEST_MODULES due to required_module option
- # not working for sh_test in soong.
- if module_name in constants.REQUIRED_KERNEL_TEST_MODULES:
- targets.add('vts_kernel_tests')
+ # (b/156457698) Force add vts_kernel_ltp_tests as build target if our
+ # test belongs to REQUIRED_LTP_TEST_MODULES due to required_module
+ # option not working for sh_test in soong. Ditto for kselftest.
+ if module_name in constants.REQUIRED_LTP_TEST_MODULES:
+ targets.add('vts_kernel_ltp_tests')
+ if module_name in constants.REQUIRED_KSELFTEST_TEST_MODULES:
+ targets.add('vts_kernel_kselftest_tests')
# (b/184567849) Force adding module_name as a build_target. This will
# allow excluding MODULES-IN-* and prevent from missing build targets.
if module_name and self.module_info.is_module(module_name):
@@ -642,21 +658,26 @@
Returns:
A list of populated TestInfo namedtuple if found, else None.
"""
- if ':' not in module_class:
+ parse_result = test_finder_utils.parse_test_reference(module_class)
+ if not parse_result:
return None
- module_name, class_name = module_class.split(':')
+ module_name = parse_result['module_name']
+ class_name = parse_result['pkg_class_name']
+ method_name = parse_result.get('method_name', '')
+ if method_name:
+ class_name = class_name + '#' + method_name
+
# module_infos is a list with at most 1 element.
module_infos = self.find_test_by_module_name(module_name)
module_info = module_infos[0] if module_infos else None
if not module_info:
return None
find_result = None
- # If the target module is NATIVE_TEST, search CC classes only.
- if not self.module_info.is_native_test(module_name):
- # Find by java class.
- find_result = self.find_test_by_class_name(
- class_name, module_info.test_name,
- module_info.data.get(constants.TI_REL_CONFIG))
+ # If the target module is JAVA or Python test, search class name.
+ find_result = self.find_test_by_class_name(
+ class_name, module_info.test_name,
+ module_info.data.get(constants.TI_REL_CONFIG),
+ self.module_info.is_native_test(module_name))
# kernel target test is also define as NATIVE_TEST in build system.
# TODO (b/157210083) Update find_test_by_kernel_class_name method to
# support gen_rule use case.
@@ -694,7 +715,7 @@
else:
search_dir = self.root_dir
package_paths = test_finder_utils.run_find_cmd(
- test_finder_utils.FIND_REFERENCE_TYPE.PACKAGE, search_dir, package)
+ test_finder_utils.TestReferenceType.PACKAGE, search_dir, package)
package_paths = package_paths if package_paths is not None else []
# Package path will be the full path to the dir represented by package.
if not package_paths:
@@ -721,7 +742,15 @@
Returns:
A list of populated TestInfo namedtuple if found, else None.
"""
- module_name, package = module_package.split(':')
+ parse_result = test_finder_utils.parse_test_reference(module_package)
+ if not parse_result:
+ return None
+ module_name = parse_result['module_name']
+ package = parse_result['pkg_class_name']
+ method = parse_result.get('method_name', '')
+ if method:
+ package = package + '#' + method
+
# module_infos is a list with at most 1 element.
module_infos = self.find_test_by_module_name(module_name)
module_info = module_infos[0] if module_infos else None
@@ -731,7 +760,7 @@
package, module_info.test_name,
module_info.data.get(constants.TI_REL_CONFIG))
- def find_test_by_path(self, rel_path):
+ def find_test_by_path(self, rel_path: str) -> List[test_info.TestInfo]:
"""Find the first test info matching the given path.
Strategy:
@@ -762,6 +791,30 @@
# Module/Class
rel_module_dir = test_finder_utils.find_parent_module_dir(
self.root_dir, dir_path, self.module_info)
+
+ # If the input file path does not belong to a module(by searching
+ # upwards to the build_top), check whether it belongs to the dependency
+ # of modules.
+ if not rel_module_dir:
+ testable_modules = self.module_info.get_modules_by_include_deps(
+ self.module_info.get_modules_by_path_in_srcs(rel_path),
+ testable_module_only=True)
+ if testable_modules:
+ test_filter = self._get_test_info_filter(
+ path, methods, rel_module_dir=rel_module_dir)
+ tinfos = []
+ for testable_module in testable_modules:
+ rel_config = os.path.join(
+ self.module_info.get_paths(
+ testable_module)[0], constants.MODULE_CONFIG)
+ tinfos.extend(
+ self._get_test_infos(
+ path, rel_config, testable_module, test_filter))
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.FIND_TEST_IN_DEPS,
+ result=1)
+ return tinfos
+
if not rel_module_dir:
# Try to find unit-test for input path.
path = os.path.relpath(
@@ -915,7 +968,8 @@
return [tinfo]
return None
- def _is_comparted_src(self, path):
+ @staticmethod
+ def _is_comparted_src(path):
"""Check if the input path need to match srcs information in module.
If path is a folder or android build file, we don't need to compart
diff --git a/atest/test_finders/module_finder_unittest.py b/atest/test_finders/module_finder_unittest.py
index 5da65bd..0ee7c1c 100755
--- a/atest/test_finders/module_finder_unittest.py
+++ b/atest/test_finders/module_finder_unittest.py
@@ -16,43 +16,50 @@
"""Unittests for module_finder."""
+# pylint: disable=invalid-name
# pylint: disable=line-too-long
+# pylint: disable=missing-function-docstring
# pylint: disable=too-many-lines
# pylint: disable=unsubscriptable-object
import copy
import re
+import tempfile
import unittest
import os
+from pathlib import Path
from unittest import mock
-import atest_error
-import atest_configs
-import atest_utils
-import constants
-import module_info
-import unittest_constants as uc
-import unittest_utils
+# pylint: disable=import-error
+from pyfakefs import fake_filesystem_unittest
-from test_finders import module_finder
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import atest_tf_test_runner as atf_tr
+from atest import atest_error
+from atest import atest_configs
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+from atest import unittest_constants as uc
+from atest import unittest_utils
+
+from atest.test_finders import module_finder
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_runners import atest_tf_test_runner as atf_tr
MODULE_CLASS = '%s:%s' % (uc.MODULE_NAME, uc.CLASS_NAME)
MODULE_PACKAGE = '%s:%s' % (uc.MODULE_NAME, uc.PACKAGE)
CC_MODULE_CLASS = '%s:%s' % (uc.CC_MODULE_NAME, uc.CC_CLASS_NAME)
KERNEL_TEST_CLASS = 'test_class_1'
KERNEL_TEST_CONFIG = 'KernelTest.xml.data'
-KERNEL_MODULE_CLASS = '%s:%s' % (constants.REQUIRED_KERNEL_TEST_MODULES[0],
+KERNEL_MODULE_CLASS = '%s:%s' % (constants.REQUIRED_LTP_TEST_MODULES[0],
KERNEL_TEST_CLASS)
KERNEL_CONFIG_FILE = os.path.join(uc.TEST_DATA_DIR, KERNEL_TEST_CONFIG)
KERNEL_CLASS_FILTER = test_info.TestFilter(KERNEL_TEST_CLASS, frozenset())
KERNEL_MODULE_CLASS_DATA = {constants.TI_REL_CONFIG: KERNEL_CONFIG_FILE,
constants.TI_FILTER: frozenset([KERNEL_CLASS_FILTER])}
KERNEL_MODULE_CLASS_INFO = test_info.TestInfo(
- constants.REQUIRED_KERNEL_TEST_MODULES[0],
+ constants.REQUIRED_LTP_TEST_MODULES[0],
atf_tr.AtestTradefedTestRunner.NAME,
uc.CLASS_BUILD_TARGETS, KERNEL_MODULE_CLASS_DATA)
FLAT_METHOD_INFO = test_info.TestInfo(
@@ -96,6 +103,170 @@
return uc.FIND_ONE
return None
+class ModuleFinderFindTestByModuleName(fake_filesystem_unittest.TestCase):
+ """Unit tests for module_finder.py"""
+
+ def setUp(self):
+ self.setUpPyfakefs()
+ self.build_top = Path('/')
+ self.product_out = self.build_top.joinpath('out/product')
+ self.product_out.mkdir(parents=True, exist_ok=True)
+ self.module_info_file = self.product_out.joinpath('atest_merged_dep.json')
+ self.fs.create_file(
+ self.module_info_file,
+ contents=('''
+ { "CtsJankDeviceTestCases": {
+ "class":["APPS"],
+ "path":["foo/bar/jank"],
+ "tags": ["optional"],
+ "installed": ["path/to/install/CtsJankDeviceTestCases.apk"],
+ "test_config": ["foo/bar/jank/AndroidTest.xml",
+ "foo/bar/jank/CtsJankDeviceTestCases2.xml"],
+ "module_name": "CtsJankDeviceTestCases" }
+ }''')
+ )
+
+ @mock.patch('builtins.input', return_value='1')
+ def test_find_test_by_module_name_w_multiple_config(self, _):
+ """Test find_test_by_module_name (test_config_select)"""
+ atest_configs.GLOBAL_ARGS = mock.Mock()
+ atest_configs.GLOBAL_ARGS.test_config_select = True
+ # The original test name will be updated to the config name when multiple
+ # configs were found.
+ expected_test_info = create_test_info(
+ test_name='CtsJankDeviceTestCases2',
+ raw_test_name='CtsJankDeviceTestCases',
+ test_runner='AtestTradefedTestRunner',
+ module_class=['APPS'],
+ build_targets={'MODULES-IN-foo-bar-jank', 'CtsJankDeviceTestCases'},
+ data={'rel_config': 'foo/bar/jank/CtsJankDeviceTestCases2.xml',
+ 'filter': frozenset()}
+ )
+ self.fs.create_file(
+ self.build_top.joinpath('foo/bar/jank/CtsJankDeviceTestCases2.xml'),
+ contents=('''
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
+ <option name="test-file-name" value="CtsUiDeviceTestCases.apk" />
+ </target_preparer>
+ ''')
+ )
+
+ mod_info = module_info.ModuleInfo(module_file=self.module_info_file)
+ mod_finder = module_finder.ModuleFinder(module_info=mod_info)
+ t_infos = mod_finder.find_test_by_module_name('CtsJankDeviceTestCases')
+
+ self.assertEqual(len(t_infos), 1)
+ unittest_utils.assert_equal_testinfos(self,
+ t_infos[0], expected_test_info)
+
+ def test_find_test_by_module_name_w_multiple_config_all(self):
+ """Test find_test_by_module_name."""
+ atest_configs.GLOBAL_ARGS = mock.Mock()
+ atest_configs.GLOBAL_ARGS.test_config_select = False
+ expected_test_info = [
+ create_test_info(
+ test_name='CtsJankDeviceTestCases',
+ test_runner='AtestTradefedTestRunner',
+ module_class=['APPS'],
+ build_targets={'MODULES-IN-foo-bar-jank', 'CtsJankDeviceTestCases'},
+ data={'rel_config': 'foo/bar/jank/AndroidTest.xml',
+ 'filter': frozenset()}
+ ),
+ create_test_info(
+ test_name='CtsJankDeviceTestCases2',
+ raw_test_name='CtsJankDeviceTestCases',
+ test_runner='AtestTradefedTestRunner',
+ module_class=['APPS'],
+ build_targets={'MODULES-IN-foo-bar-jank', 'CtsJankDeviceTestCases'},
+ data={'rel_config': 'foo/bar/jank/CtsJankDeviceTestCases2.xml',
+ 'filter': frozenset()}
+ )]
+ self.fs.create_file(
+ self.build_top.joinpath('foo/bar/jank/AndroidTest.xml'),
+ contents=('''
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
+ <option name="test-file-name" value="CtsUiDeviceTestCases.apk" />
+ </target_preparer>
+ ''')
+ )
+
+ mod_info = module_info.ModuleInfo(module_file=self.module_info_file)
+ mod_finder = module_finder.ModuleFinder(module_info=mod_info)
+ t_infos = mod_finder.find_test_by_module_name('CtsJankDeviceTestCases')
+
+ self.assertEqual(len(t_infos), 2)
+ unittest_utils.assert_equal_testinfos(self,
+ t_infos[0], expected_test_info[0])
+ unittest_utils.assert_equal_testinfos(self,
+ t_infos[1], expected_test_info[1])
+
+class ModuleFinderFindTestByPath(fake_filesystem_unittest.TestCase):
+ """Test cases that invoke find_test_by_path."""
+ def setUp(self):
+ self.setUpPyfakefs()
+
+ # pylint: disable=protected-access
+ def create_empty_module_info(self):
+ fake_temp_file_name = next(tempfile._get_candidate_names())
+ self.fs.create_file(fake_temp_file_name, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+ def create_module_info(self, modules=None):
+ mod_info = self.create_empty_module_info()
+ modules = modules or []
+
+ for m in modules:
+ mod_info.name_to_module_info[m['module_name']] = m
+ for path in m['path']:
+ if path in mod_info.path_to_module_info:
+ mod_info.path_to_module_info[path].append(m)
+ else:
+ mod_info.path_to_module_info[path] = [m]
+
+ return mod_info
+
+ # TODO: remove below mocks and hide unnecessary information.
+ @mock.patch.object(module_finder.ModuleFinder, '_get_test_info_filter')
+ @mock.patch.object(test_finder_utils, 'find_parent_module_dir',
+ return_value=None)
+ @mock.patch('os.path.exists')
+ #pylint: disable=unused-argument
+ def test_find_test_by_path_belong_to_dependencies(
+ self, _mock_exists, _mock_find_parent, _mock_test_filter):
+ """Test find_test_by_path if belong to test dependencies."""
+ test1 = module(name='test1',
+ classes=['class'],
+ dependencies=['lib1'],
+ installed=['install/test1'],
+ auto_test_config=[True])
+ test2 = module(name='test2',
+ classes=['class'],
+ dependencies=['lib2'],
+ installed=['install/test2'],
+ auto_test_config=[True])
+ lib1 = module(name='lib1',
+ srcs=['path/src1'])
+ lib2 = module(name='lib2',
+ srcs=['path/src2'])
+ mod_info = self.create_module_info(
+ [test1, test2, lib1, lib2])
+ mod_finder = module_finder.ModuleFinder(module_info=mod_info)
+ _mock_exists.return_value = True
+ test1_filter = test_info.TestFilter('test1Filter', frozenset())
+ _mock_test_filter.return_value = test1_filter
+
+ t_infos = mod_finder.find_test_by_path('path/src1')
+
+ unittest_utils.assert_equal_testinfos(
+ self,
+ test_info.TestInfo(
+ 'test1',
+ atf_tr.AtestTradefedTestRunner.NAME,
+ {'test1', 'MODULES-IN-'},
+ {constants.TI_FILTER: test1_filter,
+ constants.TI_REL_CONFIG: 'AndroidTest.xml'},
+ module_class=['class']),
+ t_infos[0])
#pylint: disable=protected-access
class ModuleFinderUnittests(unittest.TestCase):
@@ -125,6 +296,7 @@
def test_find_test_by_module_name(self, _get_targ):
"""Test find_test_by_module_name."""
self.mod_finder.module_info.is_robolectric_test.return_value = False
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
self.mod_finder.module_info.has_test_config.return_value = True
mod_info = {'installed': ['/path/to/install'],
'path': [uc.MODULE_DIR],
@@ -141,55 +313,6 @@
self.mod_finder.module_info.is_testable_module.return_value = False
self.assertIsNone(self.mod_finder.find_test_by_module_name('Not_Module'))
- @mock.patch('builtins.input', return_value='1')
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
- return_value=copy.deepcopy(uc.MODULE_BUILD_TARGETS))
- def test_find_test_by_module_name_w_multiple_config(
- self, _get_targ, _mock_input):
- """Test find_test_by_module_name."""
- atest_configs.GLOBAL_ARGS = mock.Mock()
- atest_configs.GLOBAL_ARGS.test_config_select = True
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mod_info = {'installed': ['/path/to/install'],
- 'path': [uc.MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: [],
- constants.MODULE_TEST_CONFIG: [
- uc.CONFIG_FILE,
- uc.EXTRA_CONFIG_FILE]}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- self.mod_finder.module_info.get_robolectric_type.return_value = 0
- t_infos = self.mod_finder.find_test_by_module_name(uc.MODULE_NAME)
- # Only select one test
- self.assertEqual(len(t_infos), 1)
- # The t_info should be the EXTRA_CONFIG_FILE one.
- unittest_utils.assert_equal_testinfos(
- self, t_infos[0], uc.MODULE_INFO_W_CONFIG)
-
- @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
- return_value=copy.deepcopy(uc.MODULE_BUILD_TARGETS))
- def test_find_test_by_module_name_w_multiple_config_all(
- self, _get_targ,):
- """Test find_test_by_module_name."""
- atest_configs.GLOBAL_ARGS = mock.Mock()
- atest_configs.GLOBAL_ARGS.test_config_select = False
- self.mod_finder.module_info.is_robolectric_test.return_value = False
- self.mod_finder.module_info.has_test_config.return_value = True
- mod_info = {'installed': ['/path/to/install'],
- 'path': [uc.MODULE_DIR],
- constants.MODULE_CLASS: [],
- constants.MODULE_COMPATIBILITY_SUITES: [],
- constants.MODULE_TEST_CONFIG: [
- uc.CONFIG_FILE,
- uc.EXTRA_CONFIG_FILE]}
- self.mod_finder.module_info.get_module_info.return_value = mod_info
- self.mod_finder.module_info.get_robolectric_type.return_value = 0
- t_infos = self.mod_finder.find_test_by_module_name(uc.MODULE_NAME)
- unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.MODULE_INFO)
- unittest_utils.assert_equal_testinfos(
- self, t_infos[1], uc.MODULE_INFO_W_CONFIG)
-
@mock.patch.object(test_finder_utils, 'find_host_unit_tests',
return_value=[])
@mock.patch.object(atest_utils, 'is_build_file', return_value=True)
@@ -215,6 +338,7 @@
mock_build.return_value = uc.CLASS_BUILD_TARGETS
self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
self.mod_finder.module_info.is_robolectric_test.return_value = False
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
self.mod_finder.module_info.has_test_config.return_value = True
self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
self.mod_finder.module_info.get_module_info.return_value = {
@@ -286,6 +410,7 @@
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_module_info.return_value = mod_info
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_module_and_class(MODULE_CLASS)
unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.CLASS_INFO)
# with method
@@ -334,6 +459,7 @@
'prefixes': set(),
'typed': False}}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_module_and_class(CC_MODULE_CLASS)
unittest_utils.assert_equal_testinfos(self, t_infos[0], uc.CC_MODULE_CLASS_INFO)
# with method
@@ -374,6 +500,7 @@
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_module_info.return_value = mod_info
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_module_and_class(KERNEL_MODULE_CLASS)
unittest_utils.assert_equal_testinfos(self, t_infos[0], KERNEL_MODULE_CLASS_INFO)
@@ -401,6 +528,7 @@
constants.MODULE_COMPATIBILITY_SUITES: []
}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_package_name(uc.PACKAGE)
unittest_utils.assert_equal_testinfos(
self, t_infos[0],
@@ -439,6 +567,7 @@
constants.MODULE_CLASS: [],
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_module_info.return_value = mod_info
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_module_and_package(MODULE_PACKAGE)
self.assertEqual(t_infos, None)
_isdir.return_value = True
@@ -463,6 +592,7 @@
self.mod_finder.module_info.get_module_info.return_value = mod_info
self.assertIsNone(self.mod_finder.find_test_by_module_and_package(bad_pkg))
+ # TODO: Move and rewite it to ModuleFinderFindTestByPath.
@mock.patch.object(test_finder_utils, 'find_host_unit_tests',
return_value=[])
@mock.patch.object(test_finder_utils, 'get_cc_class_info', return_value={})
@@ -491,6 +621,7 @@
"""Test find_test_by_path."""
self.mod_finder.module_info.is_robolectric_test.return_value = False
self.mod_finder.module_info.has_test_config.return_value = True
+ self.mod_finder.module_info.get_modules_by_include_deps.return_value = set()
mock_build.return_value = set()
# Check that we don't return anything with invalid test references.
mock_pathexists.return_value = False
@@ -513,6 +644,7 @@
class_path = '%s.kt' % uc.CLASS_NAME
mock_build.return_value = uc.CLASS_BUILD_TARGETS
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_path(class_path)
unittest_utils.assert_equal_testinfos(
self, uc.CLASS_INFO, t_infos[0])
@@ -556,6 +688,7 @@
unittest_utils.assert_equal_testinfos(
self, uc.CC_PATH_INFO2, t_infos[0])
+ # TODO: Move and rewite it to ModuleFinderFindTestByPath.
@mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
return_value=copy.deepcopy(uc.MODULE_BUILD_TARGETS))
@mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
@@ -577,6 +710,7 @@
constants.MODULE_CLASS: [],
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_path(class_dir)
unittest_utils.assert_equal_testinfos(
self, uc.PATH_INFO, t_infos[0])
@@ -627,6 +761,7 @@
constants.MODULE_CLASS: [],
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
_class_info.return_value = {'PFTest': {'methods': {'test1', 'test2'},
'prefixes': set(),
'typed': False}}
@@ -733,6 +868,7 @@
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_paths.return_value = [uc.TEST_DATA_CONFIG]
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_class_name(
uc.FULL_CLASS_NAME, module_name=uc.MODULE_NAME,
rel_config=uc.CONFIG_FILE)
@@ -763,6 +899,7 @@
}
self.mod_finder.module_info.get_paths.return_value = [uc.TEST_DATA_CONFIG]
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_package_name(
uc.PACKAGE, module_name=uc.MODULE_NAME, rel_config=uc.CONFIG_FILE)
unittest_utils.assert_equal_testinfos(
@@ -803,6 +940,7 @@
constants.MODULE_CLASS: [],
constants.MODULE_COMPATIBILITY_SUITES: []}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
# Happy path testing.
mock_dir.return_value = uc.MODULE_DIR
class_path = '%s.java' % uc.CLASS_NAME
@@ -843,6 +981,7 @@
mock_build.return_value = uc.CLASS_BUILD_TARGETS
self.mod_finder.module_info.is_auto_gen_test_config.return_value = False
self.mod_finder.module_info.is_robolectric_test.return_value = False
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
self.mod_finder.module_info.has_test_config.return_value = True
self.mod_finder.module_info.get_module_names.return_value = [uc.MODULE_NAME]
self.mod_finder.module_info.get_module_info.return_value = {
@@ -934,6 +1073,7 @@
constants.MODULE_COMPATIBILITY_SUITES: [],
constants.MODULE_SRCS: [class_path]}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
t_infos = self.mod_finder.find_test_by_path(class_path)
unittest_utils.assert_equal_testinfos(self, uc.CLASS_INFO, t_infos[0])
@@ -995,6 +1135,7 @@
'prefixes': founded_prefixes,
'typed': False}}
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
cc_path_data = {constants.TI_REL_CONFIG: uc.CC_CONFIG_FILE,
constants.TI_FILTER: frozenset(
{test_info.TestFilter(class_name='class1.*',
@@ -1035,14 +1176,52 @@
self.mod_finder.module_info.is_robolectric_test.return_value = False
self.mod_finder.module_info.is_auto_gen_test_config.return_value = True
self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ self.mod_finder.module_info.get_instrumentation_target_apps.return_value = {}
self.mod_finder.module_info.get_module_info.return_value = mod_info
processed_info = self.mod_finder._process_test_info(
- copy.copy(uc.MODULE_INFO))
+ copy.deepcopy(uc.MODULE_INFO))
unittest_utils.assert_equal_testinfos(
self,
processed_info,
uc.MODULE_INFO_W_DALVIK)
+ # pylint: disable=unused-argument
+ @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
+ @mock.patch.object(module_info.ModuleInfo, 'get_instrumentation_target_apps')
+ @mock.patch.object(module_info.ModuleInfo, 'get_robolectric_type')
+ @mock.patch.object(module_info.ModuleInfo, 'is_testable_module')
+ def test_process_test_info_with_instrumentation_target_apps(
+ self, testable, robotype, tapps, btargets):
+ """Test _process_test_info."""
+ testable.return_value = True
+ robotype.return_value = 0
+ target_module = 'AmSlam'
+ test_module = 'AmSlamTests'
+ artifact_path = '/out/somewhere/app/AmSlam.apk'
+ tapps.return_value = {target_module: {artifact_path}}
+ btargets.return_value = {target_module}
+ self.mod_finder.module_info.is_auto_gen_test_config.return_value = True
+ self.mod_finder.module_info.get_robolectric_type.return_value = 0
+ test1 = module(name=target_module,
+ classes=['APPS'],
+ path=['foo/bar/AmSlam'],
+ installed=[artifact_path])
+ test2 = module(name=test_module,
+ classes=['APPS'],
+ path=['foo/bar/AmSlam/test'],
+ installed=['/out/somewhere/app/AmSlamTests.apk'])
+ info = test_info.TestInfo(test_module,
+ atf_tr.AtestTradefedTestRunner.NAME,
+ set(),
+ {constants.TI_REL_CONFIG: uc.CONFIG_FILE,
+ constants.TI_FILTER: frozenset()})
+
+ self.mod_finder.module_info = create_module_info([test1, test2])
+ t_infos = self.mod_finder._process_test_info(info)
+
+ self.assertTrue(target_module in t_infos.build_targets)
+ self.assertEqual([artifact_path], t_infos.artifacts)
+
@mock.patch.object(test_finder_utils, 'get_annotated_methods')
def test_is_srcs_match_method_annotation_include_anno(
self, _mock_get_anno_methods):
@@ -1193,5 +1372,106 @@
self.mod_finder.find_test_by_class_name('my.test.class'),
None)
+
+def create_empty_module_info():
+ with fake_filesystem_unittest.Patcher() as patcher:
+ # pylint: disable=protected-access
+ fake_temp_file_name = next(tempfile._get_candidate_names())
+ patcher.fs.create_file(fake_temp_file_name, contents='{}')
+ return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+
+def create_module_info(modules=None):
+ mod_info = create_empty_module_info()
+ modules = modules or []
+
+ for m in modules:
+ mod_info.name_to_module_info[m['module_name']] = m
+
+ return mod_info
+
+
+# pylint: disable=too-many-arguments
+def module(
+ name=None,
+ path=None,
+ installed=None,
+ classes=None,
+ auto_test_config=None,
+ shared_libs=None,
+ dependencies=None,
+ runtime_dependencies=None,
+ data=None,
+ data_dependencies=None,
+ compatibility_suites=None,
+ host_dependencies=None,
+ srcs=None,
+):
+ name = name or 'libhello'
+
+ m = {}
+
+ m['module_name'] = name
+ m['class'] = classes
+ m['path'] = [path or '']
+ m['installed'] = installed or []
+ m['is_unit_test'] = 'false'
+ m['auto_test_config'] = auto_test_config or []
+ m['shared_libs'] = shared_libs or []
+ m['runtime_dependencies'] = runtime_dependencies or []
+ m['dependencies'] = dependencies or []
+ m['data'] = data or []
+ m['data_dependencies'] = data_dependencies or []
+ m['compatibility_suites'] = compatibility_suites or []
+ m['host_dependencies'] = host_dependencies or []
+ m['srcs'] = srcs or []
+ return m
+
+# pylint: disable=too-many-locals
+def create_test_info(**kwargs):
+ test_name = kwargs.pop('test_name')
+ test_runner = kwargs.pop('test_runner')
+ build_targets = kwargs.pop('build_targets')
+ data = kwargs.pop('data', None)
+ suite = kwargs.pop('suite', None)
+ module_class = kwargs.pop('module_class', None)
+ install_locations = kwargs.pop('install_locations', None)
+ test_finder = kwargs.pop('test_finder', '')
+ compatibility_suites = kwargs.pop('compatibility_suites', None)
+
+ t_info = test_info.TestInfo(
+ test_name=test_name,
+ test_runner=test_runner,
+ build_targets=build_targets,
+ data=data,
+ suite=suite,
+ module_class=module_class,
+ install_locations=install_locations,
+ test_finder=test_finder,
+ compatibility_suites=compatibility_suites
+ )
+ raw_test_name = kwargs.pop('raw_test_name', None)
+ if raw_test_name:
+ t_info.raw_test_name = raw_test_name
+ artifacts = kwargs.pop('artifacts', set())
+ if artifacts:
+ t_info.artifacts = artifacts
+ robo_type = kwargs.pop('robo_type', None)
+ if robo_type:
+ t_info.robo_type = robo_type
+ mainline_modules = kwargs.pop('mainline_modules', set())
+ if mainline_modules:
+ t_info._mainline_modules = mainline_modules
+ for keyword in ['from_test_mapping',
+ 'host',
+ 'aggregate_metrics_result']:
+ value = kwargs.pop(keyword, 'None')
+ if isinstance(value, bool):
+ setattr(t_info, keyword, value)
+ if kwargs:
+ assert f'Unknown keyword(s) for test_info: {kwargs.keys()}'
+ return t_info
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/atest/test_finders/suite_plan_finder.py b/atest/test_finders/suite_plan_finder.py
index 3f725a9..5e2fb16 100644
--- a/atest/test_finders/suite_plan_finder.py
+++ b/atest/test_finders/suite_plan_finder.py
@@ -20,12 +20,12 @@
import os
import re
-import constants
+from atest import constants
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import suite_plan_test_runner
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_runners import suite_plan_test_runner
_SUITE_PLAN_NAME_RE = re.compile(r'^.*\/(?P<suite>.*)-tradefed\/res\/config\/'
r'(?P<suite_plan_name>.*).xml$')
diff --git a/atest/test_finders/suite_plan_finder_unittest.py b/atest/test_finders/suite_plan_finder_unittest.py
index 1c12731..0719753 100755
--- a/atest/test_finders/suite_plan_finder_unittest.py
+++ b/atest/test_finders/suite_plan_finder_unittest.py
@@ -23,13 +23,13 @@
from unittest import mock
-import unittest_constants as uc
-import unittest_utils
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_finders import suite_plan_finder
-from test_runners import suite_plan_test_runner
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_finders import suite_plan_finder
+from atest.test_runners import suite_plan_test_runner
# pylint: disable=protected-access
diff --git a/atest/test_finders/test_finder_utils.py b/atest/test_finders/test_finder_utils.py
index 5de7922..cfaaf3f 100644
--- a/atest/test_finders/test_finder_utils.py
+++ b/atest/test_finders/test_finder_utils.py
@@ -22,23 +22,26 @@
from __future__ import print_function
import logging
-import multiprocessing
import os
import pickle
import re
+import shutil
import subprocess
import tempfile
import time
import xml.etree.ElementTree as ET
-import atest_decorator
-import atest_error
-import atest_utils
-import constants
+from contextlib import contextmanager
+from enum import unique, Enum
+from pathlib import Path
+from typing import Any, Dict
-from atest_enum import AtestEnum, DetectType
-from metrics import metrics, metrics_utils
-from tools import atest_tools
+from atest import atest_error
+from atest import atest_utils
+from atest import constants
+
+from atest.atest_enum import ExitCode, DetectType
+from atest.metrics import metrics, metrics_utils
# Helps find apk files listed in a test config (AndroidTest.xml) file.
# Matches "filename.apk" in <option name="foo", value="filename.apk" />
@@ -96,55 +99,38 @@
# Kotlin: class A : B (...)
_PARENT_CLS_RE = re.compile(r'.*class\s+\w+\s+(?:extends|:)\s+'
r'(?P<parent>[\w\.]+)\s*(?:\{|\()')
+_CC_GREP_RE = r'^\s*(TYPED_TEST(_P)*|TEST(_F|_P)*)\s*\({1},'
-# Explanation of FIND_REFERENCE_TYPEs:
-# ----------------------------------
-# 0. CLASS: Name of a java/kotlin class, usually file is named the same
-# (HostTest lives in HostTest.java or HostTest.kt)
-# 1. QUALIFIED_CLASS: Like CLASS but also contains the package in front like
-# com.android.tradefed.testtype.HostTest.
-# 2. PACKAGE: Name of a java package.
-# 3. INTEGRATION: XML file name in one of the 4 integration config directories.
-# 4. CC_CLASS: Name of a cc class.
+@unique
+class TestReferenceType(Enum):
+ """An Enum class that stores the ways of finding a reference."""
+ # Name of a java/kotlin class, usually file is named the same
+ # (HostTest lives in HostTest.java or HostTest.kt)
+ CLASS = (
+ constants.CLASS_INDEX,
+ r"find {0} -type f| egrep '.*/{1}\.(kt|java)$' || true")
+ # Like CLASS but also contains the package in front like
+ # com.android.tradefed.testtype.HostTest.
+ QUALIFIED_CLASS = (
+ constants.QCLASS_INDEX,
+ r"find {0} -type f | egrep '.*{1}\.(kt|java)$' || true")
+ # Name of a Java package.
+ PACKAGE = (
+ constants.PACKAGE_INDEX,
+ r"find {0} -wholename '*{1}' -type d -print")
+ # XML file name in one of the 4 integration config directories.
+ INTEGRATION = (
+ constants.INT_INDEX,
+ r"find {0} -wholename '*/{1}\.xml' -print")
+ # Name of a cc/cpp class.
+ CC_CLASS = (
+ constants.CC_CLASS_INDEX,
+ (r"find {0} -type f -print | egrep -i '/*test.*\.(cc|cpp)$'"
+ f"| xargs -P0 egrep -sH '{_CC_GREP_RE}' || true"))
-FIND_REFERENCE_TYPE = AtestEnum(['CLASS',
- 'QUALIFIED_CLASS',
- 'PACKAGE',
- 'INTEGRATION',
- 'CC_CLASS'])
-# Get cpu count.
-_CPU_COUNT = 0 if os.uname()[0] == 'Linux' else multiprocessing.cpu_count()
-
-# Unix find commands for searching for test files based on test type input.
-# Note: Find (unlike grep) exits with status 0 if nothing found.
-FIND_CMDS = {
- FIND_REFERENCE_TYPE.CLASS: r"find {0} {1} -type f"
- r"| egrep '.*/{2}\.(kt|java)$' || true",
- FIND_REFERENCE_TYPE.QUALIFIED_CLASS: r"find {0} {1} -type f"
- r"| egrep '.*{2}\.(kt|java)$' || true",
- FIND_REFERENCE_TYPE.PACKAGE: r"find {0} {1} -wholename "
- r"'*{2}' -type d -print",
- FIND_REFERENCE_TYPE.INTEGRATION: r"find {0} {1} -wholename "
- r"'*{2}.xml' -print",
- # Searching a test among files where the absolute paths contain *test*.
- # If users complain atest couldn't find a CC_CLASS, ask them to follow the
- # convention that the filename or dirname must contain *test*, where *test*
- # is case-insensitive.
- FIND_REFERENCE_TYPE.CC_CLASS: r"find {0} {1} -type f -print"
- r"| egrep -i '/*test.*\.(cc|cpp)$'"
- r"| xargs -P" + str(_CPU_COUNT) + r" egrep -sH"
- r" '{}' ".format(constants.CC_GREP_KWRE) +
- " || true"
-}
-
-# Map ref_type with its index file.
-FIND_INDEXES = {
- FIND_REFERENCE_TYPE.CLASS: constants.CLASS_INDEX,
- FIND_REFERENCE_TYPE.QUALIFIED_CLASS: constants.QCLASS_INDEX,
- FIND_REFERENCE_TYPE.PACKAGE: constants.PACKAGE_INDEX,
- FIND_REFERENCE_TYPE.INTEGRATION: constants.INT_INDEX,
- FIND_REFERENCE_TYPE.CC_CLASS: constants.CC_CLASS_INDEX
-}
+ def __init__(self, index_file, find_command):
+ self.index_file = index_file
+ self.find_command = find_command
# XML parsing related constants.
_COMPATIBILITY_PACKAGE_PREFIX = "com.android.compatibility"
@@ -188,7 +174,6 @@
_VTS_BINARY_SRC_DELIM_RE = re.compile(r'.*::(?P<target>.*)$')
_VTS_OUT_DATA_APP_PATH = 'DATA/app'
-# pylint: disable=inconsistent-return-statements
def split_methods(user_input):
"""Split user input string into test reference and list of methods.
@@ -211,16 +196,38 @@
class1#method,class2#method
path1#method,path2#method
"""
+ error_msg = (
+ 'Too many "{}" characters in user input:\n\t{}\n'
+ 'Multiple classes should be separated by space, and methods belong to '
+ 'the same class should be separated by comma. Example syntaxes are:\n'
+ '\tclass1 class2#method1 class3#method2,method3\n'
+ '\tclass1#method class2#method')
+ if not '#' in user_input:
+ if ',' in user_input:
+ raise atest_error.MoreThanOneClassError(
+ error_msg.format(',', user_input))
+ return user_input, frozenset()
parts = user_input.split('#')
- if len(parts) == 1:
- return parts[0], frozenset()
- if len(parts) == 2:
- return parts[0], frozenset(parts[1].split(','))
- raise atest_error.TooManyMethodsError(
- 'Too many methods specified with # character in user input: %s.'
- '\n\nOnly one class#method combination supported per positional'
- ' argument. Multiple classes should be separated by spaces: '
- 'class#method class#method')
+ if len(parts) > 2:
+ raise atest_error.TooManyMethodsError(
+ error_msg.format('#', user_input))
+ # (b/260183137) Support parsing multiple parameters.
+ parsed_methods = []
+ brackets = ('[', ']')
+ for part in parts[1].split(','):
+ count = {part.count(p) for p in brackets}
+ # If brackets are in pair, the length of count should be 1.
+ if len(count) == 1:
+ parsed_methods.append(part)
+ else:
+ # The front part of the pair, e.g. 'method[1'
+ if re.compile(r'^[a-zA-Z0-9]+\[').match(part):
+ parsed_methods.append(part)
+ continue
+ # The rear part of the pair, e.g. '5]]', accumulate this part to
+ # the last index of parsed_method.
+ parsed_methods[-1] += f',{part}'
+ return parts[0], frozenset(parsed_methods)
# pylint: disable=inconsistent-return-statements
@@ -257,10 +264,14 @@
Returns:
Boolean: has cc class in test_path or not.
"""
- with open(test_path) as class_file:
+ with open_cc(test_path) as class_file:
content = class_file.read()
if re.findall(_CC_CLASS_METHOD_RE, content):
return True
+ if re.findall(_CC_PARAM_CLASS_RE, content):
+ return True
+ if re.findall(_TYPE_CC_CLASS_RE, content):
+ return True
return False
@@ -321,7 +332,7 @@
else:
parent_fqcn = package + '.' + parent_cls
parent_test_paths = run_find_cmd(
- FIND_REFERENCE_TYPE.QUALIFIED_CLASS,
+ TestReferenceType.QUALIFIED_CLASS,
os.environ.get(constants.ANDROID_BUILD_TOP),
parent_fqcn)
# Recursively search parent classes until the class is not found.
@@ -454,72 +465,11 @@
return list(mtests)
-@atest_decorator.static_var("cached_ignore_dirs", [])
-def _get_ignored_dirs():
- """Get ignore dirs in find command.
-
- Since we can't construct a single find cmd to find the target and
- filter-out the dir with .out-dir, .find-ignore and $OUT-DIR. We have
- to run the 1st find cmd to find these dirs. Then, we can use these
- results to generate the real find cmd.
-
- Return:
- A list of the ignore dirs.
- """
- out_dirs = _get_ignored_dirs.cached_ignore_dirs
- if not out_dirs:
- build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
- find_out_dir_cmd = (r'find %s -maxdepth 2 '
- r'-type f \( -name ".out-dir" -o -name '
- r'".find-ignore" \)') % build_top
- out_files = subprocess.check_output(find_out_dir_cmd, shell=True)
- if isinstance(out_files, bytes):
- out_files = out_files.decode()
- # Get all dirs with .out-dir or .find-ignore
- if out_files:
- out_files = out_files.splitlines()
- for out_file in out_files:
- if out_file:
- out_dirs.append(os.path.dirname(out_file.strip()))
- # Get the out folder if user specified $OUT_DIR
- custom_out_dir = os.environ.get(constants.ANDROID_OUT_DIR)
- if custom_out_dir:
- user_out_dir = None
- if os.path.isabs(custom_out_dir):
- user_out_dir = custom_out_dir
- else:
- user_out_dir = os.path.join(build_top, custom_out_dir)
- # only ignore the out_dir when it under $ANDROID_BUILD_TOP
- if build_top in user_out_dir:
- if user_out_dir not in out_dirs:
- out_dirs.append(user_out_dir)
- _get_ignored_dirs.cached_ignore_dirs = out_dirs
- return out_dirs
-
-
-def _get_prune_cond_of_ignored_dirs():
- """Get the prune condition of ignore dirs.
-
- Generation a string of the prune condition in the find command.
- It will filter-out the dir with .out-dir, .find-ignore and $OUT-DIR.
- Because they are the out dirs, we don't have to find them.
-
- Return:
- A string of the prune condition of the ignore dirs.
- """
- out_dirs = _get_ignored_dirs()
- prune_cond = r'-type d \( -name ".*"'
- for out_dir in out_dirs:
- prune_cond += r' -o -path %s' % out_dir
- prune_cond += r' \) -prune -o'
- return prune_cond
-
-
def run_find_cmd(ref_type, search_dir, target, methods=None):
"""Find a path to a target given a search dir and a target name.
Args:
- ref_type: An AtestEnum of the reference type.
+ ref_type: An Enum of the reference type.
search_dir: A string of the dirpath to search in.
target: A string of what you're trying to find.
methods: A set of method names.
@@ -528,32 +478,29 @@
A list of the path to the target.
If the search_dir is inexistent, None will be returned.
"""
- # If module_info.json is outdated, finding in the search_dir can result in
- # raising exception. Return null immediately can guild users to run
- # --rebuild-module-info to resolve the problem.
if not os.path.isdir(search_dir):
logging.debug('\'%s\' does not exist!', search_dir)
return None
- ref_name = FIND_REFERENCE_TYPE[ref_type]
+ ref_name = ref_type.name
+ index_file = ref_type.index_file
start = time.time()
- if os.path.isfile(FIND_INDEXES[ref_type]):
+ if os.path.isfile(index_file):
_dict, out = {}, None
- with open(FIND_INDEXES[ref_type], 'rb') as index:
+ with open(index_file, 'rb') as index:
try:
_dict = pickle.load(index, encoding='utf-8')
except (TypeError, IOError, EOFError, pickle.UnpicklingError) as err:
logging.debug('Exception raised: %s', err)
metrics_utils.handle_exc_and_send_exit_event(
constants.ACCESS_CACHE_FAILURE)
- os.remove(FIND_INDEXES[ref_type])
+ os.remove(index_file)
if _dict.get(target):
out = [path for path in _dict.get(target) if search_dir in path]
logging.debug('Found %s in %s', target, out)
else:
- prune_cond = _get_prune_cond_of_ignored_dirs()
if '.' in target:
target = target.replace('.', '/')
- find_cmd = FIND_CMDS[ref_type].format(search_dir, prune_cond, target)
+ find_cmd = ref_type.find_command.format(search_dir, target)
logging.debug('Executing %s find cmd: %s', ref_name, find_cmd)
out = subprocess.check_output(find_cmd, shell=True)
if isinstance(out, bytes):
@@ -577,11 +524,11 @@
A list of the path to the java/cc file.
"""
if is_native_test:
- ref_type = FIND_REFERENCE_TYPE.CC_CLASS
+ ref_type = TestReferenceType.CC_CLASS
elif '.' in class_name:
- ref_type = FIND_REFERENCE_TYPE.QUALIFIED_CLASS
+ ref_type = TestReferenceType.QUALIFIED_CLASS
else:
- ref_type = FIND_REFERENCE_TYPE.CLASS
+ ref_type = TestReferenceType.CLASS
return run_find_cmd(ref_type, search_dir, class_name, methods)
@@ -635,7 +582,7 @@
return rel_dir
# Check module_info if auto_gen config or robo (non-config) here
for mod in module_info.path_to_module_info.get(rel_dir, []):
- if module_info.is_robolectric_module(mod):
+ if module_info.is_legacy_robolectric_class(mod):
return rel_dir
for test_config in mod.get(constants.MODULE_TEST_CONFIG, []):
# If the test config doesn's exist until it was auto-generated
@@ -664,6 +611,8 @@
Returns:
A set of build targets based on the signals found in the xml file.
"""
+ if not os.path.isfile(xml_file):
+ return set()
xml_root = ET.parse(xml_file).getroot()
return get_targets_from_xml_root(xml_root, module_info)
@@ -1012,7 +961,7 @@
test_files = []
for integration_dir in int_dirs:
abs_path = os.path.join(root_dir, integration_dir)
- test_paths = run_find_cmd(FIND_REFERENCE_TYPE.INTEGRATION, abs_path,
+ test_paths = run_find_cmd(TestReferenceType.INTEGRATION, abs_path,
name)
if test_paths:
test_files.extend(test_paths)
@@ -1176,6 +1125,29 @@
return set()
+@contextmanager
+def open_cc(filename: str):
+ """Open a cc/cpp file with comments trimmed."""
+ target_cc = filename
+ if shutil.which('gcc'):
+ tmp = tempfile.NamedTemporaryFile()
+ cmd = (f'gcc -fpreprocessed -dD -E {filename} > {tmp.name}')
+ strip_proc = subprocess.run(cmd, shell=True, check=True)
+ if strip_proc.returncode == ExitCode.SUCCESS:
+ target_cc = tmp.name
+ else:
+ logging.debug('Failed to strip comments in %s. Parsing '
+ 'class/method name may not be accurate.',
+ target_cc)
+ else:
+ logging.debug('Cannot find "gcc" and unable to trim comments.')
+ try:
+ cc_obj = open(target_cc, 'r')
+ yield cc_obj
+ finally:
+ cc_obj.close()
+
+
# pylint: disable=too-many-branches
def get_cc_class_info(test_path):
"""Get the class info of the given cc input test_path.
@@ -1201,22 +1173,8 @@
Returns:
A dict of class info.
"""
- # Strip comments prior to parsing class and method names if possible.
- _test_path = tempfile.NamedTemporaryFile()
- if atest_tools.has_command('gcc'):
- strip_comment_cmd = 'gcc -fpreprocessed -dD -E {} > {}'
- if subprocess.getoutput(
- strip_comment_cmd.format(test_path, _test_path.name)):
- logging.debug('Failed to strip comments in %s. Parsing class/method'
- 'names may not be accurate.', test_path)
- file_to_parse = test_path
- # If failed to strip comments, it will be empty.
- if os.stat(_test_path.name).st_size != 0:
- file_to_parse = _test_path.name
-
- # TODO: b/234531695 support reading header files as well.
- with open(file_to_parse) as class_file:
- logging.debug('Parsing: %s', test_path)
+ logging.debug('Parsing: %s', test_path)
+ with open_cc(test_path) as class_file:
content = class_file.read()
# ('TYPED_TEST', 'PrimeTableTest', 'ReturnsTrueForPrimes')
method_matches = re.findall(_CC_CLASS_METHOD_RE, content)
@@ -1382,11 +1340,11 @@
return None, None
-def need_aggregate_metrics_result(test_xml):
+def need_aggregate_metrics_result(test_xml: str) -> bool:
"""Check if input test config need aggregate metrics.
If the input test define metrics_collector, which means there's a need for
- atest to have the aggregate metrcis result.
+ atest to have the aggregate metrics result.
Args:
test_xml: A string of the path for the test xml.
@@ -1394,17 +1352,63 @@
Returns:
True if input test need to enable aggregate metrics result.
"""
- if os.path.isfile(test_xml):
+ # Due to (b/211640060) it may replace .xml with .config in the xml as
+ # workaround.
+ if not Path(test_xml).is_file():
+ if Path(test_xml).suffix == '.config':
+ test_xml = test_xml.rsplit('.', 1)[0] + '.xml'
+
+ if Path(test_xml).is_file():
xml_root = ET.parse(test_xml).getroot()
if xml_root.findall('.//metrics_collector'):
return True
- # Check if include other config
+ # Recursively check included configs in the same git repository.
+ git_dir = get_git_path(test_xml)
include_configs = xml_root.findall('.//include')
for include_config in include_configs:
name = include_config.attrib[_XML_NAME].strip()
- # Get the absolute path for the include config.
- include_path = os.path.join(
- str(test_xml).split(str(name).split('/')[0])[0], name)
- if need_aggregate_metrics_result(include_path):
- return True
+ # Get the absolute path for the included configs.
+ include_paths = search_integration_dirs(
+ os.path.splitext(name)[0], [git_dir])
+ for include_path in include_paths:
+ if need_aggregate_metrics_result(include_path):
+ return True
return False
+
+
+def get_git_path(file_path: str) -> str:
+ """Get the path of the git repository for the input file.
+
+ Args:
+ file_path: A string of the path to find the git path it belongs.
+
+ Returns:
+ The path of the git repository for the input file, return the path of
+ $ANDROID_BUILD_TOP if nothing find.
+ """
+ build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
+ parent = Path(file_path).absolute().parent
+ while not parent.samefile('/') and not parent.samefile(build_top):
+ if parent.joinpath('.git').is_dir():
+ return parent.absolute()
+ parent = parent.parent
+ return build_top
+
+
+def parse_test_reference(test_ref: str) -> Dict[str, str]:
+ """Parse module, class/pkg, and method name from the given test reference.
+
+ The result will be a none empty dictionary only if input test reference
+ match $module:$pkg_class or $module:$pkg_class:$method.
+
+ Args:
+ test_ref: A string of the input test reference from command line.
+
+ Returns:
+ Dict includes module_name, pkg_class_name and method_name.
+ """
+ ref_match = re.match(
+ r'^(?P<module_name>[^:#]+):(?P<pkg_class_name>[^#]+)'
+ r'#?(?P<method_name>.*)$', test_ref)
+
+ return ref_match.groupdict(default=dict()) if ref_match else dict()
diff --git a/atest/test_finders/test_finder_utils_unittest.py b/atest/test_finders/test_finder_utils_unittest.py
index 35e4edb..3b4c9f8 100755
--- a/atest/test_finders/test_finder_utils_unittest.py
+++ b/atest/test_finders/test_finder_utils_unittest.py
@@ -26,14 +26,14 @@
from unittest import mock
-import atest_error
-import constants
-import module_info
-import unittest_constants as uc
-import unittest_utils
+from atest import atest_error
+from atest import constants
+from atest import module_info
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
JSON_FILE_PATH = os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE)
CLASS_DIR = 'foo/bar/jank/src/android/jank/cts/ui'
@@ -96,8 +96,10 @@
DALVIK_XML_TARGETS = XML_TARGETS | test_finder_utils.DALVIK_TEST_DEPS
BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
+HOST_OUT_DIR = tempfile.NamedTemporaryFile().name
#pylint: disable=protected-access
+#pylint: disable=too-many-public-methods
#pylint: disable=unnecessary-comprehension
class TestFinderUtilsUnittests(unittest.TestCase):
"""Unit tests for test_finder_utils.py"""
@@ -128,6 +130,10 @@
self.assertRaises(
atest_error.TooManyMethodsError, test_finder_utils.split_methods,
'class.name#Method,class.name.2#method')
+ self.assertRaises(
+ atest_error.MoreThanOneClassError, test_finder_utils.split_methods,
+ 'class.name1,class.name2,class.name3'
+ )
# Path
unittest_utils.assert_strict_equal(
self,
@@ -137,6 +143,11 @@
self,
test_finder_utils.split_methods('foo/bar/class.java#Method'),
('foo/bar/class.java', {'Method'}))
+ # Multiple parameters
+ unittest_utils.assert_strict_equal(
+ self,
+ test_finder_utils.split_methods('Class.Name#method[1],method[2,[3,4]]'),
+ ('Class.Name', {'method[1]', 'method[2,[3,4]]'}))
@mock.patch.object(test_finder_utils, 'has_method_in_file',
return_value=False)
@@ -175,6 +186,7 @@
self.assertFalse(test_finder_utils.has_method_in_file(
test_path, frozenset(['testMethod'])))
+ # TODO: (b/263330492) Stop mocking build environment variables.
def test_has_method_in_kt_file(self):
"""Test has_method_in_file method with kt class path."""
test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
@@ -352,7 +364,7 @@
"""
abs_class_dir = '/%s' % CLASS_DIR
mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
- mock_module_info.is_robolectric_module.return_value = True
+ mock_module_info.is_legacy_robolectric_class.return_value = True
rel_class_dir_path = os.path.relpath(abs_class_dir, uc.ROOT)
mock_module_info.path_to_module_info = {rel_class_dir_path: [{}]}
unittest_utils.assert_strict_equal(
@@ -420,70 +432,6 @@
mock_module_info),
VTS_XML_TARGETS)
- @mock.patch('subprocess.check_output')
- def test_get_ignored_dirs(self, _mock_check_output):
- """Test _get_ignored_dirs method."""
-
- # Clean cached value for test.
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
-
- build_top = '/a/b'
- _mock_check_output.return_value = ('/a/b/c/.find-ignore\n'
- '/a/b/out/.out-dir\n'
- '/a/b/d/.out-dir\n\n')
- # Case 1: $OUT_DIR = ''. No customized out dir.
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: ''}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 2: $OUT_DIR = 'out2'
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'out2'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d', '/a/b/out2']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 3: The $OUT_DIR is abs dir but not under $ANDROID_BUILD_TOP
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: '/x/y/e/g'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 4: The $OUT_DIR is abs dir and under $ANDROID_BUILD_TOP
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: '/a/b/e/g'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d', '/a/b/e/g']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 5: There is a file of '.out-dir' under $OUT_DIR.
- test_finder_utils._get_ignored_dirs.cached_ignore_dirs = []
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'out'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- correct_ignore_dirs = ['/a/b/c', '/a/b/out', '/a/b/d']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, correct_ignore_dirs)
- # Case 6: Testing cache. All of the changes are useless.
- _mock_check_output.return_value = ('/a/b/X/.find-ignore\n'
- '/a/b/YY/.out-dir\n'
- '/a/b/d/.out-dir\n\n')
- os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top,
- constants.ANDROID_OUT_DIR: 'new'}
- with mock.patch.dict('os.environ', os_environ_mock, clear=True):
- cached_answer = ['/a/b/c', '/a/b/out', '/a/b/d']
- none_cached_answer = ['/a/b/X', '/a/b/YY', '/a/b/d', 'a/b/new']
- ignore_dirs = test_finder_utils._get_ignored_dirs()
- self.assertEqual(ignore_dirs, cached_answer)
- self.assertNotEqual(ignore_dirs, none_cached_answer)
-
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
@mock.patch('builtins.input', return_value='0')
def test_search_integration_dirs(self, mock_input):
"""Test search_integration_dirs."""
@@ -519,7 +467,6 @@
del java_tmp_test_result[:]
mock_input.return_value = '0'
_mock_isfile = True
- test_finder_utils.FIND_INDEXES['CLASS'] = uc.CLASS_INDEX
java_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_TESTCASE_JAVA + '.java')
java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
uc.FIND_PATH_TESTCASE_JAVA))
@@ -547,7 +494,6 @@
del java_tmp_test_result[:]
mock_input.return_value = '0'
_mock_isfile = True
- test_finder_utils.FIND_INDEXES['QUALIFIED_CLASS'] = uc.QCLASS_INDEX
java_qualified_class = '{0}.{1}'.format(uc.FIND_PATH_FOLDER, uc.FIND_PATH_TESTCASE_JAVA)
java_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
java_qualified_class))
@@ -577,7 +523,6 @@
del cc_tmp_test_result[:]
mock_input.return_value = '0'
_mock_isfile = True
- test_finder_utils.FIND_INDEXES['CC_CLASS'] = uc.CC_CLASS_INDEX
cpp_class = os.path.join(uc.FIND_PATH, uc.FIND_PATH_FILENAME_CC + '.cpp')
cc_tmp_test_result.extend(test_finder_utils.find_class_file(uc.FIND_PATH,
uc.FIND_PATH_TESTCASE_CC,
@@ -590,7 +535,6 @@
self.assertTrue(cpp_class in cc_tmp_test_result)
self.assertTrue(cc_class in cc_tmp_test_result)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
@mock.patch('builtins.input', return_value='0')
@mock.patch.object(test_finder_utils, 'get_dir_path_and_filename')
@mock.patch('os.path.exists', return_value=True)
@@ -652,8 +596,9 @@
self.assertEqual(test_finder_utils.get_levenshtein_distance(uc.MOD3, uc.FUZZY_MOD3,
dir_costs=(1, 2, 1)), 8)
- def test_is_parameterized_java_class(self):
- """Test is_parameterized_java_class method."""
+ @staticmethod
+ def test_is_parameterized_java_class():
+ """Test is_parameterized_java_class method. """
matched_contents = (['@RunWith(Parameterized.class)'],
[' @RunWith( Parameterized.class ) '],
['@RunWith(TestParameterInjector.class)'],
@@ -755,7 +700,8 @@
self.assertEqual(package_name,
test_finder_utils.get_package_name(target_kt))
- def get_paths_side_effect(self, module_name):
+ @staticmethod
+ def _get_paths_side_effect(module_name):
"""Mock return values for module_info.get_paths."""
if module_name == UNIT_TEST_MODULE_1:
return [IT_TEST_MATCHED_1_PATH]
@@ -765,8 +711,6 @@
return [UNIT_TEST_NOT_MATCHED_1_PATH]
return []
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(module_info.ModuleInfo, 'get_all_host_unit_tests',
return_value=[UNIT_TEST_MODULE_1,
UNIT_TEST_MODULE_2,
@@ -774,8 +718,8 @@
@mock.patch.object(module_info.ModuleInfo, 'get_paths',)
def test_find_host_unit_tests(self, _get_paths, _mock_get_unit_tests):
"""Test find_host_unit_tests"""
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
- _get_paths.side_effect = self.get_paths_side_effect
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
+ _get_paths.side_effect = self._get_paths_side_effect
expect_unit_tests = [UNIT_TEST_MODULE_1, UNIT_TEST_MODULE_2]
self.assertEqual(
sorted(expect_unit_tests),
@@ -793,13 +737,11 @@
expect_methods.sort()
self.assertEqual(expect_methods, real_methods)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
def test_get_test_config_use_androidtestxml(self, _isfile):
"""Test get_test_config_and_srcs using default AndroidTest.xml"""
android_root = '/'
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
t_info = test_info.TestInfo(
'androidtest_config_module', 'mock_runner', build_targets=set())
expect_config = os.path.join(android_root, uc.ANDTEST_CONFIG_PATH,
@@ -807,13 +749,11 @@
result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
self.assertEqual(expect_config, result)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
def test_get_test_config_single_config(self, _isfile):
"""Test get_test_config_and_srcs manualy set it's config"""
android_root = '/'
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
t_info = test_info.TestInfo(
'single_config_module', 'mock_runner', build_targets=set())
expect_config = os.path.join(
@@ -821,13 +761,11 @@
result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
self.assertEqual(expect_config, result)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
def test_get_test_config_main_multiple_config(self, _isfile):
"""Test get_test_config_and_srcs which is the main module of multiple config"""
android_root = '/'
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
t_info = test_info.TestInfo(
'multiple_config_module', 'mock_runner', build_targets=set())
expect_config = os.path.join(
@@ -835,13 +773,11 @@
result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
self.assertEqual(expect_config, result)
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
def test_get_test_config_subtest_in_multiple_config(self, _isfile):
"""Test get_test_config_and_srcs not the main module of multiple config"""
android_root = '/'
- mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+ mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
t_info = test_info.TestInfo(
'Multiple2', 'mock_runner', build_targets=set())
expect_config = os.path.join(
@@ -858,5 +794,110 @@
self.assertEqual(exist, False)
+ def test_parse_test_reference_input_module_class_method_match(self):
+ test_module = 'myModule'
+ test_class = 'myClass'
+ test_method = 'myTest::Method'
+ test_ref = f'{test_module}:{test_class}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual(test_method, result['method_name'])
+
+ def test_parse_test_reference_input_module_class_match(self):
+ test_module = 'myModule'
+ test_class = 'myClass'
+ test_ref = f'{test_module}:{test_class}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual('', result.get('method_name', ''))
+
+ def test_parse_test_reference_input_module_class_parameter_method_match(
+ self):
+ test_module = 'myModule'
+ test_class = 'myClass'
+ test_method = 'myTest::Method[0]'
+ test_ref = f'{test_module}:{test_class}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual(test_method, result['method_name'])
+
+ def test_parse_test_reference_input_module_class_multiple_methods_match(
+ self):
+ test_module = 'myModule'
+ test_class = 'myClass'
+ test_method = 'myTest::Method[0],myTest::Method[1]'
+ test_ref = f'{test_module}:{test_class}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual(test_method, result['method_name'])
+
+ def test_parse_test_reference_input_class_method_not_match(
+ self):
+ test_class = 'myClass'
+ test_method = 'myTest::Method'
+ test_ref = f'{test_class}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(result, dict())
+
+ def test_parse_test_reference_input_module_dashed_match(self):
+ test_module = 'my-module'
+ test_class = 'BR/EI/ZH'
+ test_ref = f'{test_module}:{test_class}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+
+ def test_parse_test_reference_input_module_pkg_method_match(self):
+ test_module = 'myModule'
+ test_package = 'my.package'
+ test_method = 'myTest::Method'
+ test_ref = f'{test_module}:{test_package}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_package, result['pkg_class_name'])
+ self.assertEqual(test_method, result['method_name'])
+
+ def test_parse_test_reference_input_plan_class_match(self):
+ test_module = 'my/Module'
+ test_class = 'class'
+ test_ref = f'{test_module}:{test_class}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual('', result.get('method_name', ''))
+
+ def test_parse_test_reference_input_module_parameter_class_and_method_match(
+ self):
+ test_module = 'myModule'
+ test_class = 'myClass/abc0'
+ test_method = 'myTest0/Method[0]'
+ test_ref = f'{test_module}:{test_class}#{test_method}'
+
+ result = test_finder_utils.parse_test_reference(test_ref)
+
+ self.assertEqual(test_module, result['module_name'])
+ self.assertEqual(test_class, result['pkg_class_name'])
+ self.assertEqual(test_method, result['method_name'])
+
if __name__ == '__main__':
unittest.main()
diff --git a/atest/test_finders/test_info.py b/atest/test_finders/test_info.py
index 4132bb8..74dc5c2 100644
--- a/atest/test_finders/test_info.py
+++ b/atest/test_finders/test_info.py
@@ -17,9 +17,9 @@
"""
from collections import namedtuple
+from typing import Set
-import constants
-
+from atest import constants
TestFilterBase = namedtuple('TestFilter', ['class_name', 'methods'])
@@ -28,10 +28,11 @@
"""Information needed to identify and run a test."""
# pylint: disable=too-many-arguments
+ # TODO: remove all arguments but only test_name, test_runner, build_targets,
+ # data and compatibility_suites.
def __init__(self, test_name, test_runner, build_targets, data=None,
suite=None, module_class=None, install_locations=None,
- test_finder='', compatibility_suites=None,
- mainline_modules=None, robo_type=None):
+ test_finder='', compatibility_suites=None):
"""Init for TestInfo.
Args:
@@ -48,20 +49,18 @@
compatibility_suites: A list of compatibility_suites. It's a
snippet of compatibility_suites in module_info. e.g.
["device-tests", "vts10"]
- mainline_modules: A string of mainline modules.
- e.g. 'some1.apk+some2.apex+some3.apks'
- robo_type: Integer of robolectric types.
- 0: Not robolectric test
- 1. Modern robolectric test(Tradefed Runner)
- 2: Legacy robolectric test(Robolectric Runner)
"""
self.test_name = test_name
+ self.raw_test_name = test_name
self.test_runner = test_runner
- self.build_targets = build_targets
self.data = data if data else {}
self.suite = suite
self.module_class = module_class if module_class else []
- self.robo_type = robo_type if robo_type else 0
+ # robolectric test types:
+ # 0: Not robolectric test
+ # 1. Modern robolectric test(Tradefed Runner)
+ # 2: Legacy robolectric test(Robolectric Runner)
+ self.robo_type = 0
self.install_locations = (install_locations if install_locations
else set())
# True if the TestInfo is built from a test configured in TEST_MAPPING.
@@ -74,21 +73,57 @@
if compatibility_suites else [])
# True if test need to generate aggregate metrics result.
self.aggregate_metrics_result = False
- self.mainline_modules = mainline_modules if mainline_modules else ""
+ self.artifacts = set()
+
+ self._build_targets = set(build_targets) if build_targets else set()
+ self._mainline_modules = set()
def __str__(self):
host_info = (' - runs on host without device required.' if self.host
else '')
return (f'test_name:{self.test_name} - '
+ f'raw_test_name:{self.raw_test_name} - '
f'test_runner:{self.test_runner} - '
- f'build_targets:{self.build_targets} - data:{self.data} - '
+ f'build_targets:{self._build_targets} - data:{self.data} - '
f'suite:{self.suite} - module_class:{self.module_class} - '
f'install_locations:{self.install_locations}{host_info} - '
f'test_finder:{self.test_finder} - '
f'compatibility_suites:{self.compatibility_suites} - '
- f'mainline_modules:{self.mainline_modules} - '
+ f'mainline_modules:{self._mainline_modules} - '
f'aggregate_metrics_result:{self.aggregate_metrics_result} - '
- f'robo_type:{self.robo_type}')
+ f'robo_type:{self.robo_type} - '
+ f'artifacts:{self.artifacts}')
+
+ @property
+ def build_targets(self) -> Set[str]:
+ """Gets all build targets of the test.
+
+ Gets all build targets of the test including mainline
+ modules build targets if it's a mainline test.
+ """
+ return frozenset(self._build_targets)
+
+ def add_build_target(self, target: str):
+ """Sets build targets.
+
+ Args:
+ target: a string of build target name.
+ """
+ self._build_targets.add(target)
+
+ @property
+ def mainline_modules(self) -> Set[str]:
+ """Gets mainline module build targets."""
+ return frozenset(self._mainline_modules)
+
+ def add_mainline_module(self, module: str):
+ """Sets mainline modules.
+
+ Args:
+ module: the build module name of a mainline module.
+ """
+ self._build_targets.add(module)
+ self._mainline_modules.add(module)
def get_supported_exec_mode(self):
"""Get the supported execution mode of the test.
diff --git a/atest/test_finders/test_info_unittest.py b/atest/test_finders/test_info_unittest.py
index 779c28c..43c3985 100755
--- a/atest/test_finders/test_info_unittest.py
+++ b/atest/test_finders/test_info_unittest.py
@@ -19,7 +19,7 @@
import unittest
-from test_finders import test_info
+from atest.test_finders import test_info
#pylint: disable=protected-access
diff --git a/atest/test_finders/tf_integration_finder.py b/atest/test_finders/tf_integration_finder.py
index d7b0998..ce5cc4b 100644
--- a/atest/test_finders/tf_integration_finder.py
+++ b/atest/test_finders/tf_integration_finder.py
@@ -27,13 +27,13 @@
from zipfile import ZipFile
-import atest_error
-import constants
+from atest import atest_error
+from atest import constants
-from test_finders import test_info
-from test_finders import test_finder_base
-from test_finders import test_finder_utils
-from test_runners import atest_tf_test_runner
+from atest.test_finders import test_info
+from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
+from atest.test_runners import atest_tf_test_runner
# Find integration name based on file path of integration config xml file.
# Group matches "foo/bar" given "blah/res/config/foo/bar.xml from source code
@@ -157,7 +157,7 @@
for integration_dir in self.integration_dirs:
abs_path = os.path.join(self.root_dir, integration_dir)
found_test_files = test_finder_utils.run_find_cmd(
- test_finder_utils.FIND_REFERENCE_TYPE.INTEGRATION,
+ test_finder_utils.TestReferenceType.INTEGRATION,
abs_path, name)
if found_test_files:
test_files.extend(found_test_files)
@@ -173,8 +173,13 @@
A populated TestInfo namedtuple if test found, else None
"""
class_name = None
- if ':' in name:
- name, class_name = name.split(':')
+ parse_result = test_finder_utils.parse_test_reference(name)
+ if parse_result:
+ name = parse_result['module_name']
+ class_name = parse_result['pkg_class_name']
+ method = parse_result.get('method_name', '')
+ if method:
+ class_name = class_name + '#' + method
test_files = self._search_integration_dirs(name)
if not test_files:
# Check prebuilt jars if input name is in jars.
diff --git a/atest/test_finders/tf_integration_finder_unittest.py b/atest/test_finders/tf_integration_finder_unittest.py
index 3ac4577..eec7547 100755
--- a/atest/test_finders/tf_integration_finder_unittest.py
+++ b/atest/test_finders/tf_integration_finder_unittest.py
@@ -23,14 +23,14 @@
from unittest import mock
-import constants
-import unittest_constants as uc
-import unittest_utils
+from atest import constants
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_finders import tf_integration_finder
-from test_runners import atest_tf_test_runner as atf_tr
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_finders import tf_integration_finder
+from atest.test_runners import atest_tf_test_runner as atf_tr
INT_NAME_CLASS = uc.INT_NAME + ':' + uc.FULL_CLASS_NAME
@@ -99,7 +99,6 @@
self.assertEqual(
self.tf_finder.find_test_by_integration_name('NotIntName'), [])
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
@mock.patch.object(tf_integration_finder.TFIntegrationFinder,
'_get_build_targets', return_value=set())
@mock.patch('os.path.realpath',
diff --git a/atest/test_mapping.py b/atest/test_mapping.py
index 9f60d92..d9773e0 100644
--- a/atest/test_mapping.py
+++ b/atest/test_mapping.py
@@ -20,8 +20,8 @@
import os
import re
-import atest_utils
-import constants
+from atest import atest_utils
+from atest import constants
TEST_MAPPING = 'TEST_MAPPING'
diff --git a/atest/test_mapping_unittest.py b/atest/test_mapping_unittest.py
index 848a052..e868fd5 100755
--- a/atest/test_mapping_unittest.py
+++ b/atest/test_mapping_unittest.py
@@ -22,8 +22,8 @@
from unittest import mock
-import test_mapping
-import unittest_constants as uc
+from atest import test_mapping
+from atest import unittest_constants as uc
class TestMappingUnittests(unittest.TestCase):
@@ -56,7 +56,7 @@
self.assertEqual(
'host can only have boolean value.', str(context.exception))
- @mock.patch("atest_utils.get_modified_files")
+ @mock.patch("atest.atest_utils.get_modified_files")
def test_is_match_file_patterns(self, mock_modified_files):
"""Test mathod is_match_file_patterns."""
test_mapping_file = ''
diff --git a/atest/test_plans/INTEGRATION_TESTS b/atest/test_plans/INTEGRATION_TESTS
index 630302f..3d2a736 100644
--- a/atest/test_plans/INTEGRATION_TESTS
+++ b/atest/test_plans/INTEGRATION_TESTS
@@ -24,6 +24,7 @@
CtsAnimationTestCases:AnimatorTest
CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences
CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest
+pts-bot:PAN/GN/MISC/UUID/BV-01-C
###[Test Finder: QUALIFIED_CLASS, Test Runner:AtestTradefedTestRunner]###
diff --git a/atest/test_runner_handler.py b/atest/test_runner_handler.py
index 1087ecf..d569a08 100644
--- a/atest/test_runner_handler.py
+++ b/atest/test_runner_handler.py
@@ -25,20 +25,22 @@
from typing import Any, Dict, List
-import atest_error
-import bazel_mode
-import constants
-import module_info
-import result_reporter
+from atest import atest_error
+from atest import bazel_mode
+from atest import constants
+from atest import module_info
+from atest import result_reporter
+from atest import atest_utils
-from atest_enum import ExitCode
-from metrics import metrics
-from metrics import metrics_utils
-from test_finders import test_info
-from test_runners import atest_tf_test_runner
-from test_runners import robolectric_test_runner
-from test_runners import suite_plan_test_runner
-from test_runners import vts_tf_test_runner
+from atest.atest_enum import ExitCode
+from atest.metrics import metrics
+from atest.metrics import metrics_utils
+from atest.test_finders import test_info
+from atest.test_runners import atest_tf_test_runner
+from atest.test_runners import roboleaf_test_runner
+from atest.test_runners import robolectric_test_runner
+from atest.test_runners import suite_plan_test_runner
+from atest.test_runners import vts_tf_test_runner
_TEST_RUNNERS = {
atest_tf_test_runner.AtestTradefedTestRunner.NAME: atest_tf_test_runner.AtestTradefedTestRunner,
@@ -46,6 +48,7 @@
suite_plan_test_runner.SuitePlanTestRunner.NAME: suite_plan_test_runner.SuitePlanTestRunner,
vts_tf_test_runner.VtsTradefedTestRunner.NAME: vts_tf_test_runner.VtsTradefedTestRunner,
bazel_mode.BazelTestRunner.NAME: bazel_mode.BazelTestRunner,
+ roboleaf_test_runner.RoboleafTestRunner.NAME: roboleaf_test_runner.RoboleafTestRunner,
}
@@ -111,12 +114,12 @@
test_runner_build_req |= test_runner(
unused_result_dir,
mod_info=mod_info,
- test_infos=tests,
extra_args=extra_args or {},
- ).get_test_runner_build_reqs()
+ ).get_test_runner_build_reqs(tests)
return test_runner_build_req
+# pylint: disable=too-many-locals
def run_all_tests(results_dir, test_infos, extra_args, mod_info,
delay_print_summary=False):
"""Run the given tests.
@@ -155,13 +158,20 @@
reporter.runner_failure(test_runner.NAME, stacktrace)
tests_ret_code = ExitCode.TEST_FAILURE
is_success = False
+ run_time = metrics_utils.convert_duration(time.time() - test_start)
metrics.RunnerFinishEvent(
- duration=metrics_utils.convert_duration(time.time() - test_start),
+ duration=run_time,
success=is_success,
runner_name=test_runner.NAME,
test=[{'name': test_name,
'result': ret_code,
'stacktrace': stacktrace}])
+ # Tests that spends over 10 mins to finish will be stored in the
+ # shardable test file, and Atest will launch auto-sharding in the next
+ # runs.
+ for test in tests:
+ atest_utils.update_shardable_tests(test.test_name,
+ run_time.get('seconds', 0))
if delay_print_summary:
return tests_ret_code, reporter
return reporter.print_summary() or tests_ret_code, reporter
diff --git a/atest/test_runner_handler_unittest.py b/atest/test_runner_handler_unittest.py
index 4db8ef8..7c2430d 100755
--- a/atest/test_runner_handler_unittest.py
+++ b/atest/test_runner_handler_unittest.py
@@ -25,15 +25,14 @@
from unittest import mock
-import atest_error
-import constants
-import module_info
-import test_runner_handler
-import unittest_constants as uc
+from atest import atest_error
+from atest import module_info
+from atest import test_runner_handler
+from atest import unittest_constants as uc
-from metrics import metrics
-from test_finders import test_info
-from test_runners import test_runner_base as tr_base
+from atest.metrics import metrics
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base as tr_base
FAKE_TR_NAME_A = 'FakeTestRunnerA'
FAKE_TR_NAME_B = 'FakeTestRunnerB'
@@ -51,8 +50,7 @@
MODULE_INFO_B_AGAIN = test_info.TestInfo(MODULE_NAME_B_AGAIN, FAKE_TR_NAME_B,
set())
BAD_TESTINFO = test_info.TestInfo('bad_name', MISSING_TR_NAME, set())
-BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
-PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
+
class FakeTestRunnerA(tr_base.TestRunnerBase):
"""Fake test runner A."""
@@ -66,7 +64,7 @@
def host_env_check(self):
pass
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos):
return FAKE_TR_A_REQS
def generate_run_commands(self, test_infos, extra_args, port=None):
@@ -81,7 +79,7 @@
def run_tests(self, test_infos, extra_args, reporter):
return 1
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos):
return FAKE_TR_B_REQS
@@ -94,7 +92,7 @@
}
def setUp(self):
- mock.patch('test_runner_handler._get_test_runners',
+ mock.patch('atest.test_runner_handler._get_test_runners',
return_value=self._TEST_RUNNERS).start()
def tearDown(self):
@@ -126,15 +124,14 @@
test_runner_handler.get_test_runner_reqs(empty_module_info,
test_infos))
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
- constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@mock.patch.object(metrics, 'RunnerFinishEvent')
def test_run_all_tests(self, _mock_runner_finish):
"""Test that the return value as we expected."""
results_dir = ""
extra_args = {}
mod_info = module_info.ModuleInfo(
- module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE))
+ module_file=os.path.join(uc.TEST_DATA_DIR, uc.JSON_FILE),
+ index_dir=tempfile.NamedTemporaryFile().name)
# Tests both run_tests return 0
test_infos = [MODULE_INFO_A, MODULE_INFO_A_AGAIN]
self.assertEqual(
diff --git a/atest/test_runners/__init__.py b/atest/test_runners/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/test_runners/__init__.py
+++ /dev/null
diff --git a/atest/test_runners/atest_tf_test_runner.py b/atest/test_runners/atest_tf_test_runner.py
index ed15b73..460f06c 100644
--- a/atest/test_runners/atest_tf_test_runner.py
+++ b/atest/test_runners/atest_tf_test_runner.py
@@ -29,23 +29,24 @@
from functools import partial
from pathlib import Path
-from typing import Any, List, Tuple
+from typing import Any, Dict, List, Set, Tuple
-import atest_configs
-import atest_error
-import atest_utils
-import constants
-import module_info
-import result_reporter
+from atest import atest_configs
+from atest import atest_error
+from atest import atest_utils
+from atest import constants
+from atest import module_info
+from atest import result_reporter
-from atest_enum import DetectType, ExitCode
-from logstorage import atest_gcp_utils
-from logstorage import logstorage_utils
-from metrics import metrics
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import test_runner_base as trb
-from .event_handler import EventHandler
+from atest.atest_enum import DetectType, ExitCode
+from atest.coverage import coverage
+from atest.logstorage import atest_gcp_utils
+from atest.logstorage import logstorage_utils
+from atest.metrics import metrics
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base as trb
+from atest.test_runners.event_handler import EventHandler
POLL_FREQ_SECS = 10
SOCKET_HOST = '127.0.0.1'
@@ -79,6 +80,7 @@
'NO_DEVICE_ALLOCATED',
'WRONG_JAVA_VERSION']
+MAINLINE_LOCAL_DOC = 'go/mainline-local-build'
class TradeFedExitError(Exception):
"""Raised when TradeFed exists before test run has finished."""
@@ -123,24 +125,33 @@
super().__init__(results_dir, **kwargs)
self.module_info = mod_info
self.log_path = os.path.join(results_dir, LOG_FOLDER_NAME)
- if not os.path.exists(self.log_path):
- os.makedirs(self.log_path)
+ # (b/275537997) results_dir could be '' in test_runner_handler; only
+ # mkdir when it is invoked by run_tests.
+ if results_dir:
+ Path(self.log_path).mkdir(parents=True, exist_ok=True)
log_args = {'log_root_option_name': constants.LOG_ROOT_OPTION_NAME,
'log_ext_option': constants.LOG_SAVER_EXT_OPTION,
'log_path': self.log_path,
- 'proto_path': os.path.join(self.results_dir, constants.ATEST_TEST_RECORD_PROTO)}
- self.run_cmd_dict = {'env': self._get_ld_library_path(),
+ 'proto_path': os.path.join(
+ self.results_dir,
+ constants.ATEST_TEST_RECORD_PROTO)}
+ self.run_cmd_dict = {'env': '',
'exe': self.EXECUTABLE,
'template': self._TF_TEMPLATE,
'log_saver': constants.ATEST_TF_LOG_SAVER,
'tf_customize_template': '',
'args': '',
'log_args': self._LOG_ARGS.format(**log_args)}
+ if kwargs.get('extra_args', {}).get(constants.LD_LIBRARY_PATH, False):
+ self.run_cmd_dict.update({'env': self._get_ld_library_path()})
self.is_verbose = logging.getLogger().isEnabledFor(logging.DEBUG)
self.root_dir = os.environ.get(constants.ANDROID_BUILD_TOP)
- def _get_ld_library_path(self):
- """Get the extra environment setup string for running TF.
+ def _get_ld_library_path(self) -> str:
+ """Get the corresponding LD_LIBRARY_PATH string for running TF.
+
+ This method will insert $ANDROID_HOST_OUT/{lib,lib64} to LD_LIBRARY_PATH
+ and returns the updated LD_LIBRARY_PATH.
Returns:
Strings for the environment passed to TF. Currently only
@@ -153,11 +164,8 @@
# due to ATest by default only testing the main abi and even a 32bit
# only target the lib64 folder is actually not exist.
lib_dirs = ['lib64', 'lib']
- path = ''
- for lib in lib_dirs:
- lib_dir = os.path.join(out_dir, lib)
- path = path + lib_dir + ':'
- return 'LD_LIBRARY_PATH=%s' % path
+ path = ':'.join([os.path.join(out_dir, dir) for dir in lib_dirs])
+ return f'LD_LIBRARY_PATH={path}:{os.getenv("LD_LIBRARY_PATH", "")}'
def _try_set_gts_authentication_key(self):
"""Set GTS authentication key if it is available or exists.
@@ -340,9 +348,9 @@
inputs.pop().close()
if not reporter.all_test_results:
atest_utils.colorful_print(
- r'No test to run. Please check: '
- r'{} for detail.'.format(reporter.log_path),
- constants.RED, highlight=True)
+ r'No test to run. Test Logs have saved in '
+ f'{reporter.log_path}.',
+ constants.RED, constants.WHITE)
if not data_map:
metrics.LocalDetectEvent(
detect_type=DetectType.TF_EXIT_CODE,
@@ -459,13 +467,16 @@
root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, '')
return os.path.commonprefix([output, root_dir]) != root_dir
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
- build_req = self._BUILD_REQ
+ build_req = self._BUILD_REQ.copy()
# Use different base build requirements if google-tf is around.
if self.module_info.is_module(constants.GTF_MODULE):
build_req = {constants.GTF_TARGET}
@@ -476,8 +487,34 @@
if self._is_missing_exec(executable):
if self.module_info.is_module(executable):
build_req.add(executable)
+
+ # Force rebuilt all jars under $ANDROID_HOST_OUT to prevent old version
+ # host jars break the test.
+ build_req |= self._get_host_framework_targets()
+
+ build_req |= trb.gather_build_targets(test_infos)
return build_req
+ def _get_host_framework_targets(self) -> Set[str]:
+ """Get the build targets for all the existing jars under host framework.
+
+ Returns:
+ A set of build target name under $(ANDROID_HOST_OUT)/framework.
+ """
+ host_targets = set()
+ if not self.module_info:
+ return host_targets
+
+ framework_host_dir = Path(
+ os.environ.get(constants.ANDROID_HOST_OUT)).joinpath('framework')
+ if framework_host_dir.is_dir():
+ jars = framework_host_dir.glob('*.jar')
+ for jar in jars:
+ if self.module_info.is_module(jar.stem):
+ host_targets.add(jar.stem)
+ logging.debug('Found exist host framework target:%s', host_targets)
+ return host_targets
+
def _parse_extra_args(self, test_infos, extra_args):
"""Convert the extra args into something tf can understand.
@@ -506,7 +543,9 @@
args_to_append.append(constants.TF_ENABLE_PARAMETERIZED_MODULES)
# If all the test config has config with auto enable parameter, force
# exclude those default parameters(ex: instant_app, secondary_user)
- if self._is_all_tests_parameter_auto_enabled(test_infos):
+ # TODO: (b/228433541) Remove the limitation after the root cause fixed.
+ if (len(test_infos) <= 1 and
+ self._is_all_tests_parameter_auto_enabled(test_infos)):
if constants.TF_ENABLE_PARAMETERIZED_MODULES not in args_to_append:
args_to_append.append(constants.TF_ENABLE_PARAMETERIZED_MODULES)
for exclude_parameter in constants.DEFAULT_EXCLUDE_PARAS:
@@ -573,6 +612,9 @@
% constants.DEVICE_SETUP_PREPARER)
for info in test_infos:
if constants.TEST_WITH_MAINLINE_MODULES_RE.match(info.test_name):
+ # TODO(b/253641058) Remove this once mainline module
+ # binaries are stored under testcase directory.
+ self._copy_mainline_module_binary(info.mainline_modules)
test_args.append(constants.TF_ENABLE_MAINLINE_PARAMETERIZED_MODULES)
break
# For detailed logs, set TF options log-level/log-level-display as
@@ -580,11 +622,16 @@
log_level = 'VERBOSE'
test_args.extend(['--log-level-display', log_level])
test_args.extend(['--log-level', log_level])
+
+ # TODO(b/275110259) Remove this once TF not going to get bugreport.
+ test_args.extend(['--skip-all-system-status-check=true'])
+
# Set no-early-device-release by default to speed up TF teardown time.
if not constants.TF_EARLY_DEVICE_RELEASE in extra_args:
test_args.extend(['--no-early-device-release'])
- args_to_add, args_not_supported = self._parse_extra_args(test_infos, extra_args)
+ args_to_add, args_not_supported = self._parse_extra_args(
+ test_infos, extra_args)
# If multiple devices in test config, automatically append
# --replicate-parent-setup and --multi-device-count
@@ -593,14 +640,16 @@
args_to_add.append('--replicate-parent-setup')
args_to_add.append('--multi-device-count')
args_to_add.append(str(device_count))
-
- # TODO(b/122889707) Remove this after finding the root cause.
- env_serial = os.environ.get(constants.ANDROID_SERIAL)
- # Use the env variable ANDROID_SERIAL if it's set by user but only when
- # the target tests are not deviceless tests.
- if env_serial and '--serial' not in args_to_add and '-n' not in args_to_add:
- args_to_add.append("--serial")
- args_to_add.append(env_serial)
+ os.environ.pop(constants.ANDROID_SERIAL, None)
+ else:
+ # TODO(b/122889707) Remove this after finding the root cause.
+ env_serial = os.environ.get(constants.ANDROID_SERIAL)
+ # Use the env variable ANDROID_SERIAL if it's set by user but only
+ # when the target tests are not deviceless tests.
+ if (env_serial and '--serial' not in args_to_add
+ and '-n' not in args_to_add):
+ args_to_add.append("--serial")
+ args_to_add.append(env_serial)
test_args.extend(args_to_add)
if args_not_supported:
@@ -747,7 +796,12 @@
# both --module and --include-filter to TF, only test by --module will
# be run. Make a check first, only use --module if all tests are all
# parameter auto enabled.
- use_module_arg = self._is_all_tests_parameter_auto_enabled(test_infos)
+ # Only auto-enable the parameter if there's only one test.
+ # TODO: (b/228433541) Remove the limitation after the root cause fixed.
+ use_module_arg = False
+ if len(test_infos) <= 1:
+ use_module_arg = self._is_all_tests_parameter_auto_enabled(
+ test_infos)
for info in test_infos:
# Integration test exists in TF's jar, so it must have the option
@@ -813,8 +867,10 @@
Returns: A string of tradefed template options.
"""
tf_templates = extra_args.get(constants.TF_TEMPLATE, [])
+ tf_template_keys = [i.split('=')[0] for i in tf_templates]
for info in test_infos:
- if info.aggregate_metrics_result:
+ if (info.aggregate_metrics_result
+ and 'metric_post_processor' not in tf_template_keys):
template_key = 'metric_post_processor'
template_value = (
'google/template/postprocessors/metric-file-aggregate')
@@ -906,6 +962,48 @@
if module_name and device_path:
atest_utils.copy_native_symbols(module_name, device_path)
+ # TODO(b/253641058) remove copying files once mainline module
+ # binaries are stored under testcase directory.
+ def _copy_mainline_module_binary(self, mainline_modules):
+ """Copies mainline module binaries to out/dist/mainline_modules_{arch}
+
+ Copies the mainline module binaries to the location that
+ MainlineModuleHandler in TF expects since there is no way to
+ explicitly tweak the search path.
+
+ Args:
+ mainline_modules: A list of mainline modules.
+ """
+ config = atest_utils.get_android_config()
+ arch = config.get('TARGET_ARCH')
+ dest_dir = atest_utils.DIST_OUT_DIR.joinpath(f'mainline_modules_{arch}')
+ dest_dir.mkdir(parents=True, exist_ok=True)
+
+ for module in mainline_modules:
+ target_module_info = self.module_info.get_module_info(module)
+ installed_paths = target_module_info[constants.MODULE_INSTALLED]
+
+ for installed_path in installed_paths:
+ if not re.search(atest_utils.MAINLINE_MODULES_EXT_RE, installed_path):
+ atest_utils.colorful_print(
+ '%s is not a apk or apex file. Did you run mainline '
+ 'local setup script? Please refer to %s' %
+ (installed_path, MAINLINE_LOCAL_DOC),
+ constants.YELLOW)
+ continue
+ file_name = Path(installed_path).name
+ dest_path = Path(dest_dir).joinpath(file_name)
+ if dest_path.exists():
+ atest_utils.colorful_print(
+ 'Replacing APEX in %s with %s' % (dest_path, installed_path),
+ constants.CYAN)
+ logging.debug(
+ 'deleting the old file: %s and copy a new binary',
+ dest_path)
+ dest_path.unlink()
+ shutil.copyfile(installed_path, dest_path)
+
+ break
def generate_annotation_filter_args(
arg_value: Any, mod_info: module_info.ModuleInfo,
@@ -939,9 +1037,11 @@
return annotation_filter_args
-def extra_args_to_tf_args(mod_info: module_info.ModuleInfo,
- test_infos: List[test_info.TestInfo],
- extra_args: trb.ARGS) -> Tuple[trb.ARGS, trb.ARGS]:
+def extra_args_to_tf_args(
+ mod_info: module_info.ModuleInfo,
+ test_infos: List[test_info.TestInfo],
+ extra_args: Dict[str, Any],
+) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Convert the extra args into atest_tf_test_runner supported args.
Args:
@@ -1027,6 +1127,9 @@
f'include-filter:{arg_value}',
'--test-arg',
'com.android.tradefed.testtype.GTest:native-test-flag:'
+ f'--gtest_filter={arg_value}',
+ '--test-arg',
+ 'com.android.tradefed.testtype.HostGTest:native-test-flag:'
f'--gtest_filter={arg_value}'
],
constants.TEST_TIMEOUT:
@@ -1043,7 +1146,8 @@
'--test-arg',
'com.android.tradefed.testtype.GTest:'
f'native-test-timeout:{arg_value}',
- ]
+ ],
+ constants.COVERAGE: coverage.tf_args,
})
for arg in extra_args:
@@ -1059,13 +1163,15 @@
constants.INVOCATION_ID,
constants.WORKUNIT_ID,
constants.REQUEST_UPLOAD_RESULT,
+ constants.DISABLE_UPLOAD_RESULT,
constants.LOCAL_BUILD_ID,
constants.BUILD_TARGET,
constants.ENABLE_DEVICE_PREPARER,
constants.DRY_RUN,
constants.VERIFY_ENV_VARIABLE,
constants.FLAKES_INFO,
- constants.DISABLE_UPLOAD_RESULT):
+ constants.LD_LIBRARY_PATH,
+ constants.DEVICE_ONLY):
continue
unsupported_args.append(arg)
return supported_args, unsupported_args
diff --git a/atest/test_runners/atest_tf_test_runner_unittest.py b/atest/test_runners/atest_tf_test_runner_unittest.py
index a6d3c88..26709c0 100755
--- a/atest/test_runners/atest_tf_test_runner_unittest.py
+++ b/atest/test_runners/atest_tf_test_runner_unittest.py
@@ -19,6 +19,7 @@
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=too-many-lines
+# pylint: disable=unused-argument
import os
import shlex
@@ -32,16 +33,16 @@
from pathlib import Path
from unittest import mock
-import atest_configs
-import atest_utils
-import constants
-import unittest_constants as uc
-import unittest_utils
+from atest import atest_configs
+from atest import atest_utils
+from atest import constants
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from test_finders import test_finder_utils
-from test_finders import test_info
-from test_runners import event_handler
-from test_runners import atest_tf_test_runner as atf_tr
+from atest.test_finders import test_finder_utils
+from atest.test_finders import test_info
+from atest.test_runners import event_handler
+from atest.test_runners import atest_tf_test_runner as atf_tr
#pylint: disable=protected-access
#pylint: disable=invalid-name
@@ -49,14 +50,15 @@
METRICS_DIR_ARG = '--metrics-folder %s ' % METRICS_DIR
# TODO(147567606): Replace {serial} with {extra_args} for general extra
# arguments testing.
-RUN_CMD_ARGS = ('{metrics}--log-level-display VERBOSE --log-level VERBOSE'
+RUN_CMD_ARGS = ('{metrics}--log-level-display VERBOSE --log-level VERBOSE '
+ '--skip-all-system-status-check=true'
'{device_early_release}{serial}')
LOG_ARGS = atf_tr.AtestTradefedTestRunner._LOG_ARGS.format(
log_root_option_name=constants.LOG_ROOT_OPTION_NAME,
log_ext_option=constants.LOG_SAVER_EXT_OPTION,
log_path=os.path.join(uc.TEST_INFO_DIR, atf_tr.LOG_FOLDER_NAME),
proto_path=os.path.join(uc.TEST_INFO_DIR, constants.ATEST_TEST_RECORD_PROTO))
-RUN_ENV_STR = 'tf_env_var=test'
+RUN_ENV_STR = ''
RUN_CMD = atf_tr.AtestTradefedTestRunner._RUN_CMD.format(
env=RUN_ENV_STR,
exe=atf_tr.AtestTradefedTestRunner.EXECUTABLE,
@@ -192,7 +194,6 @@
#pylint: disable=arguments-differ
@mock.patch.object(atf_tr.AtestTradefedTestRunner, '_get_ld_library_path')
- @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
def setUp(self, mock_get_ld_library_path):
mock_get_ld_library_path.return_value = RUN_ENV_STR
self.tr = atf_tr.AtestTradefedTestRunner(results_dir=uc.TEST_INFO_DIR)
@@ -405,7 +406,7 @@
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_without_serial_env(
self, mock_resultargs, mock_mertrics, _, _mock_all):
"""Test generate_run_command method."""
@@ -446,7 +447,7 @@
return_value=False)
@mock.patch('os.environ.get')
@mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_with_serial_env(
self, mock_resultargs, mock_mertrics, mock_env, _mock_all):
"""Test generate_run_command method."""
@@ -611,7 +612,7 @@
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_collect_tests_only(
self, mock_resultargs, mock_mertrics, _, _mock_is_all):
"""Test generate_run_command method."""
@@ -646,7 +647,7 @@
return_value=False)
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_with_tf_template(
self, mock_resultargs, mock_mertrics, _, _mock_all):
"""Test generate_run_command method."""
@@ -695,7 +696,7 @@
return_value=False)
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_with_tf_early_device_release(
self, mock_resultargs, mock_mertrics, _, _mock_all):
"""Test generate_run_command method."""
@@ -738,7 +739,7 @@
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_has_instant_app_config(
self, mock_resultargs, mock_mertrics, _, _mock_has_config,
_mock_is_all):
@@ -841,7 +842,7 @@
{constants.CUSTOM_ARGS: [constants.TF_MODULE_PARAMETER]})
self.assertTrue(constants.TF_ENABLE_PARAMETERIZED_MODULES in args)
- @mock.patch('atest_utils.get_prebuilt_sdk_tools_dir')
+ @mock.patch('atest.atest_utils.get_prebuilt_sdk_tools_dir')
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_is_missing_exec', return_value=False)
def test_generate_env_vars_aapt_already_in_system_path(
@@ -856,7 +857,7 @@
str(prebuilt_sdk_dir) + ':' in env_vars.get('PATH', ''))
@mock.patch('os.path.exists', return_value=True)
- @mock.patch('atest_utils.get_prebuilt_sdk_tools_dir')
+ @mock.patch('atest.atest_utils.get_prebuilt_sdk_tools_dir')
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_is_missing_exec', return_value=True)
def test_generate_env_vars_aapt_not_in_system_path(
@@ -875,7 +876,7 @@
@mock.patch('os.environ.get', return_value=None)
@mock.patch.object(
atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands_for_aggregate_metric_result(
self, mock_resultargs, mock_mertrics, _mock_env, _mock_create, _mock_parse, _mock_handle_native):
"""Test generate_run_command method for test need aggregate metric."""
@@ -896,6 +897,38 @@
'metric_post_processor='
'google/template/postprocessors/metric-file-aggregate') > 0)
+ @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_handle_native_tests')
+ @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_parse_extra_args')
+ @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_create_test_args')
+ @mock.patch('os.environ.get', return_value=None)
+ @mock.patch.object(
+ atf_tr.AtestTradefedTestRunner, '_generate_metrics_folder')
+ @mock.patch('atest.atest_utils.get_result_server_args')
+ def test_run_commands_for_aggregate_metric_result_with_manually_input(
+ self, mock_resultargs, mock_mertrics, _mock_env, _mock_create,
+ _mock_parse, _mock_handle_native):
+ """Test generate_run_command method for test need aggregate metric."""
+ mock_resultargs.return_value = []
+ mock_mertrics.return_value = ''
+ _mock_create.return_value = []
+ _mock_parse.return_value = [], []
+ test_info_with_aggregate_metrics = test_info.TestInfo(
+ test_name='perf_test', test_runner='test_runner',
+ build_targets=set())
+ test_info_with_aggregate_metrics.aggregate_metrics_result = True
+
+ run_cmd = self.tr.generate_run_commands(
+ [test_info_with_aggregate_metrics],
+ extra_args={constants.TF_TEMPLATE: ['metric_post_processor=a/b/c']})
+
+ self.assertTrue(
+ str(run_cmd).find(
+ 'metric_post_processor='
+ 'google/template/postprocessors/metric-file-aggregate') < 0)
+
+ self.assertTrue(
+ str(run_cmd).find('metric_post_processor=a/b/c') > 0)
+
@mock.patch.object(atf_tr.AtestTradefedTestRunner,
'_is_all_tests_parameter_auto_enabled',
return_value=False)
diff --git a/atest/test_runners/event_handler.py b/atest/test_runners/event_handler.py
index 77f04c1..56b2ed5 100644
--- a/atest/test_runners/event_handler.py
+++ b/atest/test_runners/event_handler.py
@@ -24,10 +24,10 @@
from collections import deque
from datetime import timedelta
-import atest_execution_info
-import result_reporter
+from atest import atest_execution_info
+from atest import result_reporter
-from test_runners import test_runner_base
+from atest.test_runners import test_runner_base
EVENT_NAMES = {'module_started': 'TEST_MODULE_STARTED',
diff --git a/atest/test_runners/event_handler_unittest.py b/atest/test_runners/event_handler_unittest.py
index f39d75e..45f38e1 100755
--- a/atest/test_runners/event_handler_unittest.py
+++ b/atest/test_runners/event_handler_unittest.py
@@ -23,9 +23,9 @@
from importlib import reload
from unittest import mock
-from test_runners import atest_tf_test_runner as atf_tr
-from test_runners import event_handler as e_h
-from test_runners import test_runner_base
+from atest.test_runners import atest_tf_test_runner as atf_tr
+from atest.test_runners import event_handler as e_h
+from atest.test_runners import test_runner_base
EVENTS_NORMAL = [
diff --git a/atest/test_runners/example_test_runner.py b/atest/test_runners/example_test_runner.py
index 8f2eada..f70a084 100644
--- a/atest/test_runners/example_test_runner.py
+++ b/atest/test_runners/example_test_runner.py
@@ -15,7 +15,10 @@
"""Example test runner class."""
-from test_runners import test_runner_base
+from typing import List
+
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base
class ExampleTestRunner(test_runner_base.TestRunnerBase):
@@ -36,7 +39,7 @@
"""
run_cmds = self.generate_run_commands(test_infos, extra_args)
for run_cmd in run_cmds:
- super(ExampleTestRunner, self).run(run_cmd)
+ super(ExampleTestRunner).run(run_cmd)
# pylint: disable=unnecessary-pass
# Please keep above disable flag to ensure host_env_check is overriden.
@@ -49,9 +52,12 @@
"""
pass
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
diff --git a/atest/test_runners/regression_test_runner.py b/atest/test_runners/regression_test_runner.py
index b71634f..c9db3e4 100644
--- a/atest/test_runners/regression_test_runner.py
+++ b/atest/test_runners/regression_test_runner.py
@@ -16,9 +16,12 @@
Regression Detection test runner class.
"""
-import constants
+from typing import List
-from test_runners import test_runner_base
+from atest import constants
+
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base
class RegressionTestRunner(test_runner_base.TestRunnerBase):
@@ -30,7 +33,7 @@
def __init__(self, results_dir):
"""Init stuff for base class."""
- super(RegressionTestRunner, self).__init__(results_dir)
+ super(RegressionTestRunner).__init__(results_dir)
self.run_cmd_dict = {'exe': self.EXECUTABLE,
'args': ''}
@@ -47,8 +50,8 @@
Return code of the process for running tests.
"""
run_cmds = self.generate_run_commands(test_infos, extra_args)
- proc = super(RegressionTestRunner, self).run(run_cmds[0],
- output_to_stdout=True)
+ proc = super(RegressionTestRunner).run(run_cmds[0],
+ output_to_stdout=True)
proc.wait()
return proc.returncode
@@ -63,9 +66,12 @@
"""
pass
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
diff --git a/atest/test_runners/roboleaf_test_runner.py b/atest/test_runners/roboleaf_test_runner.py
new file mode 100644
index 0000000..bef462a
--- /dev/null
+++ b/atest/test_runners/roboleaf_test_runner.py
@@ -0,0 +1,274 @@
+# Copyright 2023, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Test runner for Roboleaf mode.
+
+This runner is used to run the tests that have been fully converted to Bazel.
+"""
+
+import enum
+import shlex
+import os
+import logging
+import json
+import subprocess
+
+from typing import Any, Dict, List, Set
+
+from atest import atest_utils
+from atest import constants
+from atest import bazel_mode
+from atest import result_reporter
+
+from atest.atest_enum import ExitCode
+from atest.test_finders.test_info import TestInfo
+from atest.test_runners import test_runner_base
+from atest.tools.singleton import Singleton
+
+# Roboleaf maintains allow lists that identify which modules have been
+# fully converted to bazel. Users of atest can use
+# --roboleaf-mode=[PROD/STAGING/DEV] to filter by these allow lists.
+# PROD (default) is the only mode expected to be fully converted and passing.
+_ALLOW_LIST_PROD_PATH = ('/soong/soong_injection/allowlists/'
+ 'mixed_build_prod_allowlist.txt')
+_ALLOW_LIST_STAGING_PATH = ('/soong/soong_injection/allowlists/'
+ 'mixed_build_staging_allowlist.txt')
+_ROBOLEAF_MODULE_MAP_PATH = ('/soong/soong_injection/metrics/'
+ 'converted_modules_path_map.json')
+_ROBOLEAF_BUILD_CMD = 'build/soong/soong_ui.bash'
+
+
+@enum.unique
+class BazelBuildMode(enum.Enum):
+ "Represents different bp2build allow lists to use whening running bazel (b)"
+ OFF = 'off'
+ DEV = 'dev'
+ STAGING = 'staging'
+ PROD = 'prod'
+
+
+class RoboleafModuleMap(metaclass=Singleton):
+ """Roboleaf Module Map Singleton class."""
+
+ def __init__(self,
+ module_map_location: str = ''):
+ self._module_map = _generate_map(module_map_location)
+ self.modules_prod = _read_allow_list(_ALLOW_LIST_PROD_PATH)
+ self.modules_staging = _read_allow_list(_ALLOW_LIST_STAGING_PATH)
+
+ def get_map(self) -> Dict[str, str]:
+ """Return converted module map.
+
+ Returns:
+ A dictionary of test names that bazel paths for eligible tests,
+ for example { "test_a": "//platform/test_a" }.
+ """
+ return self._module_map
+
+def _generate_map(module_map_location: str = '') -> Dict[str, str]:
+ """Generate converted module map.
+
+ Args:
+ module_map_location: Path of the module_map_location to check.
+
+ Returns:
+ A dictionary of test names that bazel paths for eligible tests,
+ for example { "test_a": "//platform/test_a" }.
+ """
+ if not module_map_location:
+ module_map_location = (
+ atest_utils.get_build_out_dir() + _ROBOLEAF_MODULE_MAP_PATH)
+
+ # TODO(b/274161649): It is possible it could be stale on first run.
+ # Invoking m or b test will check/recreate this file. Bug here is
+ # to determine if we can check staleness without a large time penalty.
+ if not os.path.exists(module_map_location):
+ logging.warning('The roboleaf converted modules file: %s was not '
+ 'found.', module_map_location)
+ # Attempt to generate converted modules file.
+ try:
+ cmd = _generate_bp2build_command()
+ env_vars = os.environ.copy()
+ logging.info(
+ 'Running `bp2build` to generate converted modules file.'
+ '\n%s', ' '.join(cmd))
+ subprocess.check_call(cmd, env=env_vars)
+ except subprocess.CalledProcessError as e:
+ logging.error(e)
+ return {}
+
+ with open(module_map_location, 'r', encoding='utf8') as robo_map:
+ return json.load(robo_map)
+
+def _read_allow_list(allow_list_location: str = '') -> List[str]:
+ """Generate a list of modules based on an allow list file.
+ The expected file format is a text file that has a module name on each line.
+ Lines that start with '#' or '//' are considered comments and skipped.
+
+ Args:
+ location: Path of the allow_list file to parse.
+
+ Returns:
+ A list of module names.
+ """
+
+ allow_list_location = (
+ atest_utils.get_build_out_dir() + allow_list_location)
+
+ if not os.path.exists(allow_list_location):
+ logging.error('The roboleaf allow list file: %s was not '
+ 'found.', allow_list_location)
+ return []
+ with open(allow_list_location, encoding='utf-8') as f:
+ allowed = []
+ for module_name in f.read().splitlines():
+ if module_name.startswith('#') or module_name.startswith('//'):
+ continue
+ allowed.append(module_name)
+ return allowed
+
+def _generate_bp2build_command() -> List[str]:
+ """Build command to run bp2build.
+
+ Returns:
+ A list of commands to run bp2build.
+ """
+ soong_ui = (
+ f'{os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd())}/'
+ f'{_ROBOLEAF_BUILD_CMD}')
+ return [soong_ui, '--make-mode', 'bp2build']
+
+
+class AbortRunException(Exception):
+ """Roboleaf Abort Run Exception Class."""
+
+
+class RoboleafTestRunner(test_runner_base.TestRunnerBase):
+ """Roboleaf Test Runner class."""
+ NAME = 'RoboleafTestRunner'
+ EXECUTABLE = 'b'
+
+ # pylint: disable=unused-argument
+ def generate_run_commands(self,
+ test_infos: Set[Any],
+ extra_args: Dict[str, Any],
+ port: int = None) -> List[str]:
+ """Generate a list of run commands from TestInfos.
+
+ Args:
+ test_infos: A set of TestInfo instances.
+ extra_args: A Dict of extra args to append.
+ port: Optional. An int of the port number to send events to.
+
+ Returns:
+ A list of run commands to run the tests.
+ """
+ target_patterns = ' '.join(
+ self.test_info_target_label(i) for i in test_infos)
+ bazel_args = bazel_mode.parse_args(test_infos, extra_args, None)
+ bazel_args.append('--config=android')
+ bazel_args.append(
+ '--//build/bazel/rules/tradefed:runmode=host_driven_test'
+ )
+ bazel_args_str = ' '.join(shlex.quote(arg) for arg in bazel_args)
+ command = f'{self.EXECUTABLE} test {target_patterns} {bazel_args_str}'
+ results = [command]
+ logging.info("Roboleaf test runner command:\n"
+ "\n".join(results))
+ return results
+
+ def test_info_target_label(self, test: TestInfo) -> str:
+ """ Get bazel path of test
+
+ Args:
+ test: An object of TestInfo.
+
+ Returns:
+ The bazel path of the test.
+ """
+ module_map = RoboleafModuleMap().get_map()
+ return f'{module_map[test.test_name]}:{test.test_name}'
+
+ def run_tests(self,
+ test_infos: List[TestInfo],
+ extra_args: Dict[str, Any],
+ reporter: result_reporter.ResultReporter) -> int:
+ """Run the list of test_infos.
+
+ Args:
+ test_infos: List of TestInfo.
+ extra_args: Dict of extra args to add to test run.
+ reporter: An instance of result_reporter.ResultReporter.
+ """
+ reporter.register_unsupported_runner(self.NAME)
+ ret_code = ExitCode.SUCCESS
+ try:
+ run_cmds = self.generate_run_commands(test_infos, extra_args)
+ except AbortRunException as e:
+ atest_utils.colorful_print(f'Stop running test(s): {e}',
+ constants.RED)
+ return ExitCode.ERROR
+ for run_cmd in run_cmds:
+ subproc = self.run(run_cmd, output_to_stdout=True)
+ ret_code |= self.wait_for_subprocess(subproc)
+ return ret_code
+
+ def get_test_runner_build_reqs(
+ self,
+ test_infos: List[TestInfo]) -> Set[str]:
+ return set()
+
+ def host_env_check(self) -> None:
+ """Check that host env has everything we need.
+
+ We actually can assume the host env is fine because we have the same
+ requirements that atest has. Update this to check for android env vars
+ if that changes.
+ """
+
+ def roboleaf_eligible_tests(
+ self,
+ mode: BazelBuildMode,
+ module_names: List[str]) -> Dict[str, TestInfo]:
+ """Filter the given module_names to only ones that are currently
+ fully converted with roboleaf (b test) and then filter further by the
+ given allow list specified in BazelBuildMode.
+
+ Args:
+ mode: A BazelBuildMode value to filter by allow list.
+ module_names: A list of module names to check for roboleaf support.
+
+ Returns:
+ A dictionary keyed by test name and value of Roboleaf TestInfo.
+ """
+ if not module_names:
+ return {}
+
+ mod_map = RoboleafModuleMap()
+ supported_modules = set(filter(
+ lambda m: m in mod_map.get_map(), module_names))
+
+
+ if mode == BazelBuildMode.PROD:
+ supported_modules = set(filter(
+ lambda m: m in supported_modules, mod_map.modules_prod))
+ elif mode == BazelBuildMode.STAGING:
+ supported_modules = set(filter(
+ lambda m: m in supported_modules, mod_map.modules_staging))
+
+ return {
+ module: TestInfo(module, RoboleafTestRunner.NAME, set())
+ for module in supported_modules
+ }
diff --git a/atest/test_runners/roboleaf_test_runner_unittest.py b/atest/test_runners/roboleaf_test_runner_unittest.py
new file mode 100644
index 0000000..75fd597
--- /dev/null
+++ b/atest/test_runners/roboleaf_test_runner_unittest.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for roboleaf_test_runner."""
+
+import json
+import unittest
+import subprocess
+import logging
+
+from pathlib import Path
+from unittest import mock
+from pyfakefs import fake_filesystem_unittest
+
+from atest import atest_utils
+from atest import unittest_constants
+from atest.test_finders.test_info import TestInfo
+from atest.test_runners import roboleaf_test_runner
+from atest.test_runners.roboleaf_test_runner import RoboleafTestRunner
+from atest.test_runners.roboleaf_test_runner import RoboleafModuleMap
+
+# TODO(b/274706697): Refactor to remove disable=protected-access
+# pylint: disable=protected-access
+class RoboleafTestRunnerUnittests(fake_filesystem_unittest.TestCase):
+ """Unit tests for roboleaf_test_runner.py"""
+ def setUp(self):
+ self.test_runner = RoboleafTestRunner(results_dir='')
+ self.setUpPyfakefs()
+ out_dir = atest_utils.get_build_out_dir()
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ROBOLEAF_MODULE_MAP_PATH,
+ contents="{}")
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ALLOW_LIST_PROD_PATH,
+ contents="")
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ALLOW_LIST_STAGING_PATH,
+ contents="")
+
+ def tearDown(self):
+ RoboleafModuleMap()._module_map = {}
+ mock.patch.stopall()
+
+ def test_read_allow_list(self):
+ """Test _read_allow_list method"""
+ self.fs.create_file(
+ atest_utils.get_build_out_dir()+"allow_list",
+ contents="""test1\ntest2\n#comment1\n//comment2""")
+
+ self.assertEqual(
+ roboleaf_test_runner._read_allow_list("allow_list"),
+ ['test1','test2'])
+
+ def test_roboleaf_eligible_tests_filtering(self):
+ """Test roboleaf_eligible_tests method when _module_map has entries"""
+ RoboleafModuleMap._instances = {}
+
+ self.setUpPyfakefs()
+ out_dir = atest_utils.get_build_out_dir()
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ROBOLEAF_MODULE_MAP_PATH,
+ contents=json.dumps({
+ 'test1': "//a",
+ 'test2': "//a/b",
+ 'test3': "//a/b",
+ }))
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ALLOW_LIST_STAGING_PATH,
+ contents="test1\ntest2")
+ self.fs.create_file(
+ out_dir+roboleaf_test_runner._ALLOW_LIST_PROD_PATH,
+ contents="test1")
+ module_names = [
+ 'test1',
+ 'test2',
+ 'test3',
+ 'test4',
+ ]
+
+ eligible_tests = self.test_runner.roboleaf_eligible_tests(
+ roboleaf_test_runner.BazelBuildMode.DEV,
+ module_names)
+
+ self.assertEqual(len(eligible_tests), 3)
+ self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+ self.assertEqual(eligible_tests["test1"].test_runner,
+ RoboleafTestRunner.NAME)
+ self.assertEqual(eligible_tests["test2"].test_name, 'test2')
+ self.assertEqual(eligible_tests["test2"].test_runner,
+ RoboleafTestRunner.NAME)
+ self.assertEqual(eligible_tests["test3"].test_name, 'test3')
+ self.assertEqual(eligible_tests["test3"].test_runner,
+ RoboleafTestRunner.NAME)
+
+ eligible_tests = self.test_runner.roboleaf_eligible_tests(
+ roboleaf_test_runner.BazelBuildMode.STAGING,
+ module_names)
+
+ self.assertEqual(len(eligible_tests), 2)
+ self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+ self.assertEqual(eligible_tests["test1"].test_runner,
+ RoboleafTestRunner.NAME)
+ self.assertEqual(eligible_tests["test2"].test_name, 'test2')
+ self.assertEqual(eligible_tests["test2"].test_runner,
+ RoboleafTestRunner.NAME)
+
+ eligible_tests = self.test_runner.roboleaf_eligible_tests(
+ roboleaf_test_runner.BazelBuildMode.PROD,
+ module_names)
+
+ self.assertEqual(len(eligible_tests), 1)
+ self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+ self.assertEqual(eligible_tests["test1"].test_runner,
+ RoboleafTestRunner.NAME)
+
+ def test_roboleaf_eligible_tests_empty_map(self):
+ """Test roboleaf_eligible_tests method when _module_map is empty"""
+ module_names = [
+ 'test1',
+ 'test2',
+ ]
+ RoboleafModuleMap()._module_map = {}
+
+ eligible_tests = self.test_runner.roboleaf_eligible_tests(
+ roboleaf_test_runner.BazelBuildMode.DEV,
+ module_names)
+ self.assertEqual(eligible_tests, {})
+
+ def test_generate_bp2build_command(self):
+ """Test generate_bp2build method."""
+ cmd = roboleaf_test_runner._generate_bp2build_command()
+
+ self.assertTrue('build/soong/soong_ui.bash --make-mode bp2build' in
+ ' '.join(cmd))
+
+ def test_get_map(self):
+ """Test get_map method."""
+ data = {
+ "test1": "//platform/a",
+ "test2": "//platform/b"
+ }
+ RoboleafModuleMap()._module_map = data
+
+ self.assertEqual(RoboleafModuleMap().get_map(), data)
+
+ @mock.patch.object(subprocess, "check_call")
+ def test_generate_map(self, mock_subprocess):
+ """Test test_generate_map method fomr file."""
+ module_map_location = Path(unittest_constants.TEST_DATA_DIR).joinpath(
+ "roboleaf_testing/converted_modules_path_map.json"
+ )
+ self.fs.create_file(
+ module_map_location,
+ contents=json.dumps({
+ "test1": "//platform/a",
+ "test2": "//platform/b"
+ }))
+
+ data = roboleaf_test_runner._generate_map(module_map_location)
+
+ # Expected to not call a subprocess with the roboleaf bp2build
+ # command since file already exists.
+ self.assertEqual(mock_subprocess.called, False)
+ self.assertEqual(data, {
+ "test1": "//platform/a",
+ "test2": "//platform/b"
+ })
+
+ @mock.patch('builtins.open', mock.mock_open(read_data=json.dumps(
+ {"test3": "//a/b"})))
+ @mock.patch.object(subprocess, "check_call")
+ def test_generate_map_with_command(self, mock_subprocess):
+ """Test that _generate_map runs the bp2build command"""
+ module_map_location = Path(unittest_constants.TEST_DATA_DIR).joinpath(
+ "roboleaf_testing/does_not_exist.json"
+ )
+
+ # Disable expected warning log message "converted modules file was not
+ # found." to reduce noise during tests.
+ logging.disable(logging.WARNING)
+ data = roboleaf_test_runner._generate_map(module_map_location)
+ logging.disable(logging.NOTSET)
+
+ self.assertEqual(mock_subprocess.called, True)
+ self.assertEqual(data, {"test3": "//a/b"})
+
+ def test_info_target_label(self):
+ """Test info_target_label method."""
+ RoboleafModuleMap()._module_map = {
+ "test1": "//a",
+ }
+
+ target_label = self.test_runner.test_info_target_label(
+ TestInfo(
+ "test1",
+ RoboleafTestRunner.NAME,
+ set()),
+ )
+
+ self.assertEqual(target_label, "//a:test1")
+
+ def test_generate_run_commands(self):
+ """Test generate_run_commands method."""
+ RoboleafModuleMap()._module_map = {
+ "test1": "//a",
+ "test2": "//b",
+ }
+ test_infos = (
+ TestInfo(
+ "test1",
+ RoboleafTestRunner.NAME,
+ set()),
+ TestInfo(
+ "test2",
+ RoboleafTestRunner.NAME,
+ set()),
+ )
+
+ cmds = self.test_runner.generate_run_commands(test_infos, extra_args={})
+
+ self.assertEqual(len(cmds), 1)
+ self.assertTrue('b test //a:test1 //b:test2' in cmds[0])
+
+ @mock.patch.object(RoboleafTestRunner, 'run')
+ def test_run_tests(self, mock_run):
+ """Test run_tests_raw method."""
+ RoboleafModuleMap()._module_map = {
+ "test1": "//a",
+ "test2": "//b",
+ }
+ test_infos = (
+ TestInfo(
+ "test1",
+ RoboleafTestRunner.NAME,
+ set()),
+ TestInfo(
+ "test2",
+ RoboleafTestRunner.NAME,
+ set()),
+ )
+ extra_args = {}
+ mock_subproc = mock.Mock()
+ mock_run.return_value = mock_subproc
+ mock_subproc.returncode = 0
+ mock_reporter = mock.Mock()
+
+ result = self.test_runner.run_tests(
+ test_infos, extra_args, mock_reporter)
+
+ self.assertEqual(result, 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/atest/test_runners/robolectric_test_runner.py b/atest/test_runners/robolectric_test_runner.py
index 2f750af..2516607 100644
--- a/atest/test_runners/robolectric_test_runner.py
+++ b/atest/test_runners/robolectric_test_runner.py
@@ -30,13 +30,15 @@
from functools import partial
from pathlib import Path
+from typing import List
-import atest_utils
-import constants
+from atest import atest_utils
+from atest import constants
-from atest_enum import ExitCode
-from test_runners import test_runner_base
-from .event_handler import EventHandler
+from atest.atest_enum import ExitCode
+from atest.test_finders import test_info
+from atest.test_runners import test_runner_base
+from atest.test_runners.event_handler import EventHandler
POLL_FREQ_SECS = 0.1
# A pattern to match event like below
@@ -244,13 +246,18 @@
"""
pass
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
- return set()
+ build_targets = set()
+ build_targets |= test_runner_base.gather_build_targets(test_infos)
+ return build_targets
# pylint: disable=unused-argument
def generate_run_commands(self, test_infos, extra_args, port=None):
diff --git a/atest/test_runners/robolectric_test_runner_unittest.py b/atest/test_runners/robolectric_test_runner_unittest.py
index 0edd061..4e90699 100755
--- a/atest/test_runners/robolectric_test_runner_unittest.py
+++ b/atest/test_runners/robolectric_test_runner_unittest.py
@@ -19,16 +19,15 @@
# pylint: disable=line-too-long
import json
-import platform
import subprocess
import tempfile
import unittest
from unittest import mock
-from test_finders import test_info
-from test_runners import event_handler
-from test_runners import robolectric_test_runner
+from atest.test_finders import test_info
+from atest.test_runners import event_handler
+from atest.test_runners import robolectric_test_runner
# pylint: disable=protected-access
class RobolectricTestRunnerUnittests(unittest.TestCase):
@@ -67,15 +66,20 @@
"""Test _exec_with_robo_polling method."""
event_name = 'TEST_STARTED'
event_data = {'className':'SomeClass', 'testName':'SomeTestName'}
-
json_event_data = json.dumps(event_data)
- data = '%s %s\n\n' %(event_name, json_event_data)
- event_file = tempfile.NamedTemporaryFile(delete=True)
- subprocess.call("echo '%s' -n >> %s" %(data, event_file.name), shell=True)
- robo_proc = subprocess.Popen("sleep %s" %str(self.polling_time * 2), shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(event_name, event_data)]
- mock_pe.assert_has_calls(calls)
+ data = f'{event_name} {json_event_data}\n\n'
+ with tempfile.NamedTemporaryFile() as event_file:
+ subprocess.run(f"echo '{data}' -n >> {event_file.name}",
+ shell=True, check=True)
+ robo_proc = subprocess.Popen(
+ f'sleep {str(self.polling_time * 2)}',
+ shell=True
+ )
+
+ self.suite_tr._exec_with_robo_polling(event_file, robo_proc, mock_pe)
+ calls = [mock.call.process_event(event_name, event_data)]
+
+ mock_pe.assert_has_calls(calls)
@mock.patch.object(event_handler.EventHandler, 'process_event')
def test_exec_with_robo_polling_with_partial_info(self, mock_pe):
@@ -83,21 +87,21 @@
event_name = 'TEST_STARTED'
event1 = '{"className":"SomeClass","test'
event2 = 'Name":"SomeTestName"}\n\n'
- data1 = '%s %s'%(event_name, event1)
+ data1 = f'{event_name} {event1}'
data2 = event2
- event_file = tempfile.NamedTemporaryFile(delete=True)
- subprocess.Popen("echo -n '%s' >> %s" %(data1, event_file.name), shell=True)
- robo_proc = subprocess.Popen("echo '%s' >> %s && sleep %s"
- %(data2,
- event_file.name,
- str(self.polling_time*5)),
- shell=True)
- self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
- calls = [mock.call.process_event(event_name,
- json.loads(event1 + event2))]
- # (b/147569951) subprocessing 'echo' behaves differently between
- # linux/darwin. Ensure it is not called in MacOS.
- if platform.system() == 'Linux':
+ with tempfile.NamedTemporaryFile() as event_file:
+ subprocess.run(f"echo -n '{data1}' >> {event_file.name}",
+ shell=True, check=True)
+ robo_proc = subprocess.Popen(
+ f"echo '{data2}' >> {event_file.name} && "
+ f"sleep {str(self.polling_time * 5)}",
+ shell=True
+ )
+
+ self.suite_tr._exec_with_robo_polling(event_file, robo_proc, mock_pe)
+ calls = [mock.call.process_event(event_name,
+ json.loads(event1 + event2))]
+
mock_pe.assert_has_calls(calls)
@mock.patch.object(event_handler.EventHandler, 'process_event')
diff --git a/atest/test_runners/suite_plan_test_runner.py b/atest/test_runners/suite_plan_test_runner.py
index adcf3b0..30b9bad 100644
--- a/atest/test_runners/suite_plan_test_runner.py
+++ b/atest/test_runners/suite_plan_test_runner.py
@@ -20,14 +20,17 @@
import logging
import os
-import atest_utils
-import constants
+from typing import List
-from atest_enum import ExitCode
-from logstorage import atest_gcp_utils
-from logstorage import logstorage_utils
-from metrics import metrics
-from test_runners import atest_tf_test_runner
+from atest import atest_utils
+from atest import constants
+
+from atest.atest_enum import ExitCode
+from atest.logstorage import atest_gcp_utils
+from atest.logstorage import logstorage_utils
+from atest.metrics import metrics
+from atest.test_finders import test_info
+from atest.test_runners import atest_tf_test_runner
class SuitePlanTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
"""Suite Plan Test Runner class."""
@@ -42,14 +45,17 @@
'test': '',
'args': ''}
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
build_req = set()
- build_req |= super().get_test_runner_build_reqs()
+ build_req |= super().get_test_runner_build_reqs(test_infos)
return build_req
def run_tests(self, test_infos, extra_args, reporter):
diff --git a/atest/test_runners/suite_plan_test_runner_unittest.py b/atest/test_runners/suite_plan_test_runner_unittest.py
index 951a0ea..618470a 100755
--- a/atest/test_runners/suite_plan_test_runner_unittest.py
+++ b/atest/test_runners/suite_plan_test_runner_unittest.py
@@ -22,13 +22,13 @@
from unittest import mock
-import unittest_constants as uc
-import unittest_utils
+from atest import unittest_constants as uc
+from atest import unittest_utils
-from logstorage import atest_gcp_utils
-from logstorage import logstorage_utils
-from test_finders import test_info
-from test_runners import suite_plan_test_runner
+from atest.logstorage import atest_gcp_utils
+from atest.logstorage import logstorage_utils
+from atest.test_finders import test_info
+from atest.test_runners import suite_plan_test_runner
# pylint: disable=protected-access
@@ -42,7 +42,7 @@
def tearDown(self):
mock.patch.stopall()
- @mock.patch('atest_utils.get_result_server_args')
+ @mock.patch('atest.atest_utils.get_result_server_args')
def test_generate_run_commands(self, mock_resultargs):
"""Test _generate_run_command method.
Strategy:
@@ -117,7 +117,7 @@
@mock.patch.object(logstorage_utils, 'BuildClient')
@mock.patch.object(atest_gcp_utils, 'do_upload_flow')
- @mock.patch('atest_utils.get_manifest_branch')
+ @mock.patch('atest.atest_utils.get_manifest_branch')
@mock.patch.object(logstorage_utils.BuildClient, 'update_invocation')
@mock.patch('subprocess.Popen')
@mock.patch.object(suite_plan_test_runner.SuitePlanTestRunner, 'run')
diff --git a/atest/test_runners/test_runner_base.py b/atest/test_runners/test_runner_base.py
index 19bc6e8..cfddfe0 100644
--- a/atest/test_runners/test_runner_base.py
+++ b/atest/test_runners/test_runner_base.py
@@ -28,11 +28,12 @@
import os
from collections import namedtuple
-from typing import Any, Dict
+from typing import Any, Dict, List, Set
-import atest_error
-import atest_utils
-import constants
+from atest import atest_error
+from atest import atest_utils
+from atest import constants
+from atest.test_finders import test_info
OLD_OUTPUT_ENV_VAR = 'ATEST_OLD_OUTPUT'
@@ -48,8 +49,6 @@
IGNORED_STATUS = 'IGNORED'
ERROR_STATUS = 'ERROR'
-ARGS = Dict[str, Any]
-
class TestRunnerBase:
"""Base Test Runner class."""
@@ -66,10 +65,10 @@
raise atest_error.NoTestRunnerExecutable('Class var EXECUTABLE is '
'not defined.')
if kwargs:
- for k, v in kwargs.items():
- if not 'test_infos' in k:
+ for key, value in kwargs.items():
+ if not 'test_infos' in key:
logging.debug('ignoring the following args: %s=%s',
- k, v)
+ key, value)
def run(self, cmd, output_to_stdout=False, env_vars=None):
"""Shell out and execute command.
@@ -188,7 +187,7 @@
"""Checks that host env has met requirements."""
raise NotImplementedError
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Returns a list of build targets required by the test runner."""
raise NotImplementedError
@@ -205,3 +204,21 @@
A list of run commands to run the tests.
"""
raise NotImplementedError
+
+
+def gather_build_targets(
+ test_infos: List[test_info.TestInfo]) -> Set[str]:
+ """Gets all build targets for the given tests.
+
+ Args:
+ test_infos: List of TestInfo.
+
+ Returns:
+ Set of build targets.
+ """
+ build_targets = set()
+
+ for t_info in test_infos:
+ build_targets |= t_info.build_targets
+
+ return build_targets
diff --git a/atest/test_runners/vts_tf_test_runner.py b/atest/test_runners/vts_tf_test_runner.py
index eaf48ea..a909b4d 100644
--- a/atest/test_runners/vts_tf_test_runner.py
+++ b/atest/test_runners/vts_tf_test_runner.py
@@ -19,11 +19,14 @@
import copy
import logging
-import atest_utils
-import constants
+from typing import List
-from atest_enum import ExitCode
-from test_runners import atest_tf_test_runner
+from atest import atest_utils
+from atest import constants
+
+from atest.atest_enum import ExitCode
+from atest.test_finders import test_info
+from atest.test_runners import atest_tf_test_runner
class VtsTradefedTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
"""TradeFed Test Runner class."""
@@ -42,14 +45,17 @@
'test': '',
'args': ''}
- def get_test_runner_build_reqs(self):
+ def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
"""Return the build requirements.
+ Args:
+ test_infos: List of TestInfo.
+
Returns:
Set of build targets.
"""
build_req = self._BUILD_REQ
- build_req |= super().get_test_runner_build_reqs()
+ build_req |= super().get_test_runner_build_reqs(test_infos)
return build_req
def run_tests(self, test_infos, extra_args, reporter):
diff --git a/atest/test_runners/vts_tf_test_runner_unittest.py b/atest/test_runners/vts_tf_test_runner_unittest.py
index ea935fc..438da53 100755
--- a/atest/test_runners/vts_tf_test_runner_unittest.py
+++ b/atest/test_runners/vts_tf_test_runner_unittest.py
@@ -22,9 +22,8 @@
from unittest import mock
-import unittest_constants as uc
-
-from test_runners import vts_tf_test_runner
+from atest import unittest_constants as uc
+from atest.test_runners import vts_tf_test_runner
# pylint: disable=protected-access
class VtsTradefedTestRunnerUnittests(unittest.TestCase):
diff --git a/atest/tf_proto/Android.bp b/atest/tf_proto/Android.bp
deleted file mode 100644
index 9c302dd..0000000
--- a/atest/tf_proto/Android.bp
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This is a copy of the proto from Tradefed at tools/tradefederation/core/proto
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-python_library_host {
- name: "tradefed-protos-py",
- pkg_path: "atest",
- srcs: ["*.proto"],
- visibility: [
- "//tools/asuite/atest",
- ],
- libs: [
- "libprotobuf-python",
- ],
- proto: {
- include_dirs: ["external/protobuf/src"],
- },
- version: {
- py2: {
- enabled: true,
- },
- py3: {
- enabled: true,
- },
- },
-}
diff --git a/atest/tf_proto/invocation_context.proto b/atest/tf_proto/invocation_context.proto
index bbb8545..fec7499 100644
--- a/atest/tf_proto/invocation_context.proto
+++ b/atest/tf_proto/invocation_context.proto
@@ -20,8 +20,8 @@
option java_package = "com.android.tradefed.invoker.proto";
option java_outer_classname = "InvocationContext";
-import public "tools/asuite/atest/tf_proto/configuration_description.proto";
-import public "tools/asuite/atest/tf_proto/build_info.proto";
+import public "atest/tf_proto/configuration_description.proto";
+import public "atest/tf_proto/build_info.proto";
// Representation of a Tradefed Invocation Context in proto.
message Context {
diff --git a/atest/tf_proto/test_record.proto b/atest/tf_proto/test_record.proto
index ae41ab4..c47de88 100644
--- a/atest/tf_proto/test_record.proto
+++ b/atest/tf_proto/test_record.proto
@@ -17,7 +17,7 @@
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
-import "tools/asuite/atest/tf_proto/metric_measurement.proto";
+import "atest/tf_proto/metric_measurement.proto";
option java_package = "com.android.tradefed.result.proto";
option java_outer_classname = "TestRecordProto";
diff --git a/atest/tools/Android.bp b/atest/tools/Android.bp
deleted file mode 100644
index 1f33e92..0000000
--- a/atest/tools/Android.bp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This is a copy of the proto from Tradefed at tools/tradefederation/core/proto
-package {
- default_applicable_licenses: ["Android-Apache-2.0"],
-}
-
-python_library_host {
- name: "metrics-protos",
- pkg_path: "tools",
- srcs: ["asuite/atest/tf_proto/*_pb2.py"],
- proto: {
- include_dirs: ["external/protobuf/src"],
- },
- version: {
- py2: {
- enabled: true,
- },
- py3: {
- enabled: true,
- },
- },
-}
diff --git a/atest/tools/__init__.py b/atest/tools/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/atest/tools/__init__.py
+++ /dev/null
diff --git a/atest/tools/asuite/atest/tf_proto/__init__.py b/atest/tools/asuite/atest/tf_proto/__init__.py
deleted file mode 100755
index e69de29..0000000
--- a/atest/tools/asuite/atest/tf_proto/__init__.py
+++ /dev/null
diff --git a/atest/tools/asuite/atest/tf_proto/metric_measurement_pb2.py b/atest/tools/asuite/atest/tf_proto/metric_measurement_pb2.py
deleted file mode 100644
index 9938102..0000000
--- a/atest/tools/asuite/atest/tf_proto/metric_measurement_pb2.py
+++ /dev/null
@@ -1,373 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: tools/asuite/atest/tf_proto/metric_measurement.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='tools/asuite/atest/tf_proto/metric_measurement.proto',
- package='tradefed.metric',
- syntax='proto3',
- serialized_options=_b('\n\"com.android.tradefed.metrics.protoB\021MetricMeasurement'),
- serialized_pb=_b('\n4tools/asuite/atest/tf_proto/metric_measurement.proto\x12\x0ftradefed.metric\"\x8f\x02\n\x0cMeasurements\x12\x17\n\rsingle_string\x18\x01 \x01(\tH\x00\x12\x14\n\nsingle_int\x18\x02 \x01(\x03H\x00\x12\x17\n\rsingle_double\x18\x03 \x01(\x01H\x00\x12\x36\n\rstring_values\x18\x04 \x01(\x0b\x32\x1d.tradefed.metric.StringValuesH\x00\x12\x38\n\x0enumeric_values\x18\x05 \x01(\x0b\x32\x1e.tradefed.metric.NumericValuesH\x00\x12\x36\n\rdouble_values\x18\x06 \x01(\x0b\x32\x1d.tradefed.metric.DoubleValuesH\x00\x42\r\n\x0bmeasurement\"$\n\x0cStringValues\x12\x14\n\x0cstring_value\x18\x01 \x03(\t\"&\n\rNumericValues\x12\x15\n\rnumeric_value\x18\x01 \x03(\x03\"$\n\x0c\x44oubleValues\x12\x14\n\x0c\x64ouble_value\x18\x01 \x03(\x01\"\xa8\x01\n\x06Metric\x12\x33\n\x0cmeasurements\x18\x01 \x01(\x0b\x32\x1d.tradefed.metric.Measurements\x12\x0c\n\x04unit\x18\x02 \x01(\t\x12\x32\n\tdirection\x18\x03 \x01(\x0e\x32\x1f.tradefed.metric.Directionality\x12\'\n\x04type\x18\x04 \x01(\x0e\x32\x19.tradefed.metric.DataType*c\n\x0e\x44irectionality\x12\x1e\n\x1a\x44IRECTIONALITY_UNSPECIFIED\x10\x00\x12\r\n\tUP_BETTER\x10\x01\x12\x0f\n\x0b\x44OWN_BETTER\x10\x02\x12\x11\n\rCLOSER_BETTER\x10\x03*\"\n\x08\x44\x61taType\x12\x07\n\x03RAW\x10\x00\x12\r\n\tPROCESSED\x10\x01\x42\x37\n\"com.android.tradefed.metrics.protoB\x11MetricMeasurementb\x06proto3')
-)
-
-_DIRECTIONALITY = _descriptor.EnumDescriptor(
- name='Directionality',
- full_name='tradefed.metric.Directionality',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='DIRECTIONALITY_UNSPECIFIED', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='UP_BETTER', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='DOWN_BETTER', index=2, number=2,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='CLOSER_BETTER', index=3, number=3,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=634,
- serialized_end=733,
-)
-_sym_db.RegisterEnumDescriptor(_DIRECTIONALITY)
-
-Directionality = enum_type_wrapper.EnumTypeWrapper(_DIRECTIONALITY)
-_DATATYPE = _descriptor.EnumDescriptor(
- name='DataType',
- full_name='tradefed.metric.DataType',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='RAW', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='PROCESSED', index=1, number=1,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=735,
- serialized_end=769,
-)
-_sym_db.RegisterEnumDescriptor(_DATATYPE)
-
-DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE)
-DIRECTIONALITY_UNSPECIFIED = 0
-UP_BETTER = 1
-DOWN_BETTER = 2
-CLOSER_BETTER = 3
-RAW = 0
-PROCESSED = 1
-
-
-
-_MEASUREMENTS = _descriptor.Descriptor(
- name='Measurements',
- full_name='tradefed.metric.Measurements',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='single_string', full_name='tradefed.metric.Measurements.single_string', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='single_int', full_name='tradefed.metric.Measurements.single_int', index=1,
- number=2, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='single_double', full_name='tradefed.metric.Measurements.single_double', index=2,
- number=3, type=1, cpp_type=5, label=1,
- has_default_value=False, default_value=float(0),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='string_values', full_name='tradefed.metric.Measurements.string_values', index=3,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='numeric_values', full_name='tradefed.metric.Measurements.numeric_values', index=4,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='double_values', full_name='tradefed.metric.Measurements.double_values', index=5,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='measurement', full_name='tradefed.metric.Measurements.measurement',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=74,
- serialized_end=345,
-)
-
-
-_STRINGVALUES = _descriptor.Descriptor(
- name='StringValues',
- full_name='tradefed.metric.StringValues',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='string_value', full_name='tradefed.metric.StringValues.string_value', index=0,
- number=1, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=347,
- serialized_end=383,
-)
-
-
-_NUMERICVALUES = _descriptor.Descriptor(
- name='NumericValues',
- full_name='tradefed.metric.NumericValues',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='numeric_value', full_name='tradefed.metric.NumericValues.numeric_value', index=0,
- number=1, type=3, cpp_type=2, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=385,
- serialized_end=423,
-)
-
-
-_DOUBLEVALUES = _descriptor.Descriptor(
- name='DoubleValues',
- full_name='tradefed.metric.DoubleValues',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='double_value', full_name='tradefed.metric.DoubleValues.double_value', index=0,
- number=1, type=1, cpp_type=5, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=425,
- serialized_end=461,
-)
-
-
-_METRIC = _descriptor.Descriptor(
- name='Metric',
- full_name='tradefed.metric.Metric',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='measurements', full_name='tradefed.metric.Metric.measurements', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='unit', full_name='tradefed.metric.Metric.unit', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='direction', full_name='tradefed.metric.Metric.direction', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='type', full_name='tradefed.metric.Metric.type', index=3,
- number=4, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=464,
- serialized_end=632,
-)
-
-_MEASUREMENTS.fields_by_name['string_values'].message_type = _STRINGVALUES
-_MEASUREMENTS.fields_by_name['numeric_values'].message_type = _NUMERICVALUES
-_MEASUREMENTS.fields_by_name['double_values'].message_type = _DOUBLEVALUES
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['single_string'])
-_MEASUREMENTS.fields_by_name['single_string'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['single_int'])
-_MEASUREMENTS.fields_by_name['single_int'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['single_double'])
-_MEASUREMENTS.fields_by_name['single_double'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['string_values'])
-_MEASUREMENTS.fields_by_name['string_values'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['numeric_values'])
-_MEASUREMENTS.fields_by_name['numeric_values'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_MEASUREMENTS.oneofs_by_name['measurement'].fields.append(
- _MEASUREMENTS.fields_by_name['double_values'])
-_MEASUREMENTS.fields_by_name['double_values'].containing_oneof = _MEASUREMENTS.oneofs_by_name['measurement']
-_METRIC.fields_by_name['measurements'].message_type = _MEASUREMENTS
-_METRIC.fields_by_name['direction'].enum_type = _DIRECTIONALITY
-_METRIC.fields_by_name['type'].enum_type = _DATATYPE
-DESCRIPTOR.message_types_by_name['Measurements'] = _MEASUREMENTS
-DESCRIPTOR.message_types_by_name['StringValues'] = _STRINGVALUES
-DESCRIPTOR.message_types_by_name['NumericValues'] = _NUMERICVALUES
-DESCRIPTOR.message_types_by_name['DoubleValues'] = _DOUBLEVALUES
-DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
-DESCRIPTOR.enum_types_by_name['Directionality'] = _DIRECTIONALITY
-DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-Measurements = _reflection.GeneratedProtocolMessageType('Measurements', (_message.Message,), {
- 'DESCRIPTOR' : _MEASUREMENTS,
- '__module__' : 'tools.asuite.atest.tf_proto.metric_measurement_pb2'
- # @@protoc_insertion_point(class_scope:tradefed.metric.Measurements)
- })
-_sym_db.RegisterMessage(Measurements)
-
-StringValues = _reflection.GeneratedProtocolMessageType('StringValues', (_message.Message,), {
- 'DESCRIPTOR' : _STRINGVALUES,
- '__module__' : 'tools.asuite.atest.tf_proto.metric_measurement_pb2'
- # @@protoc_insertion_point(class_scope:tradefed.metric.StringValues)
- })
-_sym_db.RegisterMessage(StringValues)
-
-NumericValues = _reflection.GeneratedProtocolMessageType('NumericValues', (_message.Message,), {
- 'DESCRIPTOR' : _NUMERICVALUES,
- '__module__' : 'tools.asuite.atest.tf_proto.metric_measurement_pb2'
- # @@protoc_insertion_point(class_scope:tradefed.metric.NumericValues)
- })
-_sym_db.RegisterMessage(NumericValues)
-
-DoubleValues = _reflection.GeneratedProtocolMessageType('DoubleValues', (_message.Message,), {
- 'DESCRIPTOR' : _DOUBLEVALUES,
- '__module__' : 'tools.asuite.atest.tf_proto.metric_measurement_pb2'
- # @@protoc_insertion_point(class_scope:tradefed.metric.DoubleValues)
- })
-_sym_db.RegisterMessage(DoubleValues)
-
-Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), {
- 'DESCRIPTOR' : _METRIC,
- '__module__' : 'tools.asuite.atest.tf_proto.metric_measurement_pb2'
- # @@protoc_insertion_point(class_scope:tradefed.metric.Metric)
- })
-_sym_db.RegisterMessage(Metric)
-
-
-DESCRIPTOR._options = None
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/tools/asuite/atest/tf_proto/test_record_pb2.py b/atest/tools/asuite/atest/tf_proto/test_record_pb2.py
deleted file mode 100644
index beb5cf1..0000000
--- a/atest/tools/asuite/atest/tf_proto/test_record_pb2.py
+++ /dev/null
@@ -1,556 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: tools/asuite/atest/tf_proto/test_record.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
-from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
-from tools.asuite.atest.tf_proto import metric_measurement_pb2 as tools_dot_asuite_dot_atest_dot_tf__proto_dot_metric__measurement__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='tools/asuite/atest/tf_proto/test_record.proto',
- package='android_test_record',
- syntax='proto3',
- serialized_options=_b('\n!com.android.tradefed.result.protoB\017TestRecordProto'),
- serialized_pb=_b('\n-tools/asuite/atest/tf_proto/test_record.proto\x12\x13\x61ndroid_test_record\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x34tools/asuite/atest/tf_proto/metric_measurement.proto\"\xae\x05\n\nTestRecord\x12\x16\n\x0etest_record_id\x18\x01 \x01(\t\x12\x1d\n\x15parent_test_record_id\x18\x02 \x01(\t\x12\x35\n\x08\x63hildren\x18\x03 \x03(\x0b\x32#.android_test_record.ChildReference\x12\x1d\n\x15num_expected_children\x18\x04 \x01(\x03\x12/\n\x06status\x18\x05 \x01(\x0e\x32\x1f.android_test_record.TestStatus\x12\x32\n\ndebug_info\x18\x06 \x01(\x0b\x32\x1e.android_test_record.DebugInfo\x12.\n\nstart_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\tartifacts\x18\t \x03(\x0b\x32..android_test_record.TestRecord.ArtifactsEntry\x12=\n\x07metrics\x18\n \x03(\x0b\x32,.android_test_record.TestRecord.MetricsEntry\x12)\n\x0b\x64\x65scription\x18\x0b \x01(\x0b\x32\x14.google.protobuf.Any\x12\x12\n\nattempt_id\x18\x0c \x01(\x03\x1a\x46\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\x1aG\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.tradefed.metric.Metric:\x02\x38\x01\"v\n\x0e\x43hildReference\x12\x18\n\x0etest_record_id\x18\x01 \x01(\tH\x00\x12=\n\x12inline_test_record\x18\x02 \x01(\x0b\x32\x1f.android_test_record.TestRecordH\x00\x42\x0b\n\treference\"\xb0\x01\n\tDebugInfo\x12\x15\n\rerror_message\x18\x01 \x01(\t\x12\r\n\x05trace\x18\x02 \x01(\t\x12:\n\x0e\x66\x61ilure_status\x18\x03 \x01(\x0e\x32\".android_test_record.FailureStatus\x12\x41\n\x12\x64\x65\x62ug_info_context\x18\x04 \x01(\x0b\x32%.android_test_record.DebugInfoContext\"\x96\x01\n\x10\x44\x65\x62ugInfoContext\x12\x1a\n\x12\x61\x63tion_in_progress\x18\x01 \x01(\t\x12\x1a\n\x12\x64\x65\x62ug_help_message\x18\n \x01(\t\x12\x12\n\nerror_type\x18\x14 \x01(\t\x12\x12\n\nerror_name\x18\x1e \x01(\t\x12\x0e\n\x06origin\x18\x1f \x01(\t\x12\x12\n\nerror_code\x18 \x01(\x03*R\n\nTestStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04PASS\x10\x01\x12\x08\n\x04\x46\x41IL\x10\x02\x12\x0b\n\x07IGNORED\x10\x03\x12\x16\n\x12\x41SSUMPTION_FAILURE\x10\x04*\xd4\x01\n\rFailureStatus\x12\t\n\x05UNSET\x10\x00\x12\x10\n\x0cTEST_FAILURE\x10\x01\x12\r\n\tTIMED_OUT\x10\x02\x12\r\n\tCANCELLED\x10\x03\x12\x11\n\rINFRA_FAILURE\x10\n\x12\x1d\n\x19SYSTEM_UNDER_TEST_CRASHED\x10\x14\x12\x10\n\x0cNOT_EXECUTED\x10\x1e\x12\x1a\n\x16LOST_SYSTEM_UNDER_TEST\x10#\x12\x14\n\x10\x44\x45PENDENCY_ISSUE\x10(\x12\x12\n\x0e\x43USTOMER_ISSUE\x10)B4\n!com.android.tradefed.result.protoB\x0fTestRecordProtob\x06proto3')
- ,
- dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,tools_dot_asuite_dot_atest_dot_tf__proto_dot_metric__measurement__pb2.DESCRIPTOR,])
-
-_TESTSTATUS = _descriptor.EnumDescriptor(
- name='TestStatus',
- full_name='android_test_record.TestStatus',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='UNKNOWN', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='PASS', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='FAIL', index=2, number=2,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='IGNORED', index=3, number=3,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='ASSUMPTION_FAILURE', index=4, number=4,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=1325,
- serialized_end=1407,
-)
-_sym_db.RegisterEnumDescriptor(_TESTSTATUS)
-
-TestStatus = enum_type_wrapper.EnumTypeWrapper(_TESTSTATUS)
-_FAILURESTATUS = _descriptor.EnumDescriptor(
- name='FailureStatus',
- full_name='android_test_record.FailureStatus',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='UNSET', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='TEST_FAILURE', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='TIMED_OUT', index=2, number=2,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='CANCELLED', index=3, number=3,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='INFRA_FAILURE', index=4, number=10,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='SYSTEM_UNDER_TEST_CRASHED', index=5, number=20,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='NOT_EXECUTED', index=6, number=30,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='LOST_SYSTEM_UNDER_TEST', index=7, number=35,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='DEPENDENCY_ISSUE', index=8, number=40,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='CUSTOMER_ISSUE', index=9, number=41,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=1410,
- serialized_end=1622,
-)
-_sym_db.RegisterEnumDescriptor(_FAILURESTATUS)
-
-FailureStatus = enum_type_wrapper.EnumTypeWrapper(_FAILURESTATUS)
-UNKNOWN = 0
-PASS = 1
-FAIL = 2
-IGNORED = 3
-ASSUMPTION_FAILURE = 4
-UNSET = 0
-TEST_FAILURE = 1
-TIMED_OUT = 2
-CANCELLED = 3
-INFRA_FAILURE = 10
-SYSTEM_UNDER_TEST_CRASHED = 20
-NOT_EXECUTED = 30
-LOST_SYSTEM_UNDER_TEST = 35
-DEPENDENCY_ISSUE = 40
-CUSTOMER_ISSUE = 41
-
-
-
-_TESTRECORD_ARTIFACTSENTRY = _descriptor.Descriptor(
- name='ArtifactsEntry',
- full_name='android_test_record.TestRecord.ArtifactsEntry',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='key', full_name='android_test_record.TestRecord.ArtifactsEntry.key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='value', full_name='android_test_record.TestRecord.ArtifactsEntry.value', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=_b('8\001'),
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=728,
- serialized_end=798,
-)
-
-_TESTRECORD_METRICSENTRY = _descriptor.Descriptor(
- name='MetricsEntry',
- full_name='android_test_record.TestRecord.MetricsEntry',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='key', full_name='android_test_record.TestRecord.MetricsEntry.key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='value', full_name='android_test_record.TestRecord.MetricsEntry.value', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=_b('8\001'),
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=800,
- serialized_end=871,
-)
-
-_TESTRECORD = _descriptor.Descriptor(
- name='TestRecord',
- full_name='android_test_record.TestRecord',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='test_record_id', full_name='android_test_record.TestRecord.test_record_id', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='parent_test_record_id', full_name='android_test_record.TestRecord.parent_test_record_id', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='children', full_name='android_test_record.TestRecord.children', index=2,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='num_expected_children', full_name='android_test_record.TestRecord.num_expected_children', index=3,
- number=4, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='status', full_name='android_test_record.TestRecord.status', index=4,
- number=5, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='debug_info', full_name='android_test_record.TestRecord.debug_info', index=5,
- number=6, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='start_time', full_name='android_test_record.TestRecord.start_time', index=6,
- number=7, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='end_time', full_name='android_test_record.TestRecord.end_time', index=7,
- number=8, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='artifacts', full_name='android_test_record.TestRecord.artifacts', index=8,
- number=9, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='metrics', full_name='android_test_record.TestRecord.metrics', index=9,
- number=10, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='description', full_name='android_test_record.TestRecord.description', index=10,
- number=11, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='attempt_id', full_name='android_test_record.TestRecord.attempt_id', index=11,
- number=12, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[_TESTRECORD_ARTIFACTSENTRY, _TESTRECORD_METRICSENTRY, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=185,
- serialized_end=871,
-)
-
-
-_CHILDREFERENCE = _descriptor.Descriptor(
- name='ChildReference',
- full_name='android_test_record.ChildReference',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='test_record_id', full_name='android_test_record.ChildReference.test_record_id', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='inline_test_record', full_name='android_test_record.ChildReference.inline_test_record', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='reference', full_name='android_test_record.ChildReference.reference',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=873,
- serialized_end=991,
-)
-
-
-_DEBUGINFO = _descriptor.Descriptor(
- name='DebugInfo',
- full_name='android_test_record.DebugInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='error_message', full_name='android_test_record.DebugInfo.error_message', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='trace', full_name='android_test_record.DebugInfo.trace', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='failure_status', full_name='android_test_record.DebugInfo.failure_status', index=2,
- number=3, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='debug_info_context', full_name='android_test_record.DebugInfo.debug_info_context', index=3,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=994,
- serialized_end=1170,
-)
-
-
-_DEBUGINFOCONTEXT = _descriptor.Descriptor(
- name='DebugInfoContext',
- full_name='android_test_record.DebugInfoContext',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='action_in_progress', full_name='android_test_record.DebugInfoContext.action_in_progress', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='debug_help_message', full_name='android_test_record.DebugInfoContext.debug_help_message', index=1,
- number=10, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='error_type', full_name='android_test_record.DebugInfoContext.error_type', index=2,
- number=20, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='error_name', full_name='android_test_record.DebugInfoContext.error_name', index=3,
- number=30, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='origin', full_name='android_test_record.DebugInfoContext.origin', index=4,
- number=31, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=_b("").decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='error_code', full_name='android_test_record.DebugInfoContext.error_code', index=5,
- number=32, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1173,
- serialized_end=1323,
-)
-
-_TESTRECORD_ARTIFACTSENTRY.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
-_TESTRECORD_ARTIFACTSENTRY.containing_type = _TESTRECORD
-_TESTRECORD_METRICSENTRY.fields_by_name['value'].message_type = tools_dot_asuite_dot_atest_dot_tf__proto_dot_metric__measurement__pb2._METRIC
-_TESTRECORD_METRICSENTRY.containing_type = _TESTRECORD
-_TESTRECORD.fields_by_name['children'].message_type = _CHILDREFERENCE
-_TESTRECORD.fields_by_name['status'].enum_type = _TESTSTATUS
-_TESTRECORD.fields_by_name['debug_info'].message_type = _DEBUGINFO
-_TESTRECORD.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
-_TESTRECORD.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
-_TESTRECORD.fields_by_name['artifacts'].message_type = _TESTRECORD_ARTIFACTSENTRY
-_TESTRECORD.fields_by_name['metrics'].message_type = _TESTRECORD_METRICSENTRY
-_TESTRECORD.fields_by_name['description'].message_type = google_dot_protobuf_dot_any__pb2._ANY
-_CHILDREFERENCE.fields_by_name['inline_test_record'].message_type = _TESTRECORD
-_CHILDREFERENCE.oneofs_by_name['reference'].fields.append(
- _CHILDREFERENCE.fields_by_name['test_record_id'])
-_CHILDREFERENCE.fields_by_name['test_record_id'].containing_oneof = _CHILDREFERENCE.oneofs_by_name['reference']
-_CHILDREFERENCE.oneofs_by_name['reference'].fields.append(
- _CHILDREFERENCE.fields_by_name['inline_test_record'])
-_CHILDREFERENCE.fields_by_name['inline_test_record'].containing_oneof = _CHILDREFERENCE.oneofs_by_name['reference']
-_DEBUGINFO.fields_by_name['failure_status'].enum_type = _FAILURESTATUS
-_DEBUGINFO.fields_by_name['debug_info_context'].message_type = _DEBUGINFOCONTEXT
-DESCRIPTOR.message_types_by_name['TestRecord'] = _TESTRECORD
-DESCRIPTOR.message_types_by_name['ChildReference'] = _CHILDREFERENCE
-DESCRIPTOR.message_types_by_name['DebugInfo'] = _DEBUGINFO
-DESCRIPTOR.message_types_by_name['DebugInfoContext'] = _DEBUGINFOCONTEXT
-DESCRIPTOR.enum_types_by_name['TestStatus'] = _TESTSTATUS
-DESCRIPTOR.enum_types_by_name['FailureStatus'] = _FAILURESTATUS
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-TestRecord = _reflection.GeneratedProtocolMessageType('TestRecord', (_message.Message,), {
-
- 'ArtifactsEntry' : _reflection.GeneratedProtocolMessageType('ArtifactsEntry', (_message.Message,), {
- 'DESCRIPTOR' : _TESTRECORD_ARTIFACTSENTRY,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.TestRecord.ArtifactsEntry)
- })
- ,
-
- 'MetricsEntry' : _reflection.GeneratedProtocolMessageType('MetricsEntry', (_message.Message,), {
- 'DESCRIPTOR' : _TESTRECORD_METRICSENTRY,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.TestRecord.MetricsEntry)
- })
- ,
- 'DESCRIPTOR' : _TESTRECORD,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.TestRecord)
- })
-_sym_db.RegisterMessage(TestRecord)
-_sym_db.RegisterMessage(TestRecord.ArtifactsEntry)
-_sym_db.RegisterMessage(TestRecord.MetricsEntry)
-
-ChildReference = _reflection.GeneratedProtocolMessageType('ChildReference', (_message.Message,), {
- 'DESCRIPTOR' : _CHILDREFERENCE,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.ChildReference)
- })
-_sym_db.RegisterMessage(ChildReference)
-
-DebugInfo = _reflection.GeneratedProtocolMessageType('DebugInfo', (_message.Message,), {
- 'DESCRIPTOR' : _DEBUGINFO,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.DebugInfo)
- })
-_sym_db.RegisterMessage(DebugInfo)
-
-DebugInfoContext = _reflection.GeneratedProtocolMessageType('DebugInfoContext', (_message.Message,), {
- 'DESCRIPTOR' : _DEBUGINFOCONTEXT,
- '__module__' : 'tools.asuite.atest.tf_proto.test_record_pb2'
- # @@protoc_insertion_point(class_scope:android_test_record.DebugInfoContext)
- })
-_sym_db.RegisterMessage(DebugInfoContext)
-
-
-DESCRIPTOR._options = None
-_TESTRECORD_ARTIFACTSENTRY._options = None
-_TESTRECORD_METRICSENTRY._options = None
-# @@protoc_insertion_point(module_scope)
diff --git a/atest/tools/atest_tools.py b/atest/tools/atest_tools.py
index cefbad4..d92bd3f 100755
--- a/atest/tools/atest_tools.py
+++ b/atest/tools/atest_tools.py
@@ -23,25 +23,25 @@
import logging
import os
import pickle
+import re
import shutil
import subprocess
import sys
+import tempfile
import time
-import atest_utils as au
-import constants
+from pathlib import Path
-from atest_enum import ExitCode
-from metrics import metrics_utils
+from atest import atest_utils as au
+from atest import constants
-MAC_UPDB_SRC = os.path.join(os.path.dirname(__file__), 'updatedb_darwin.sh')
-MAC_UPDB_DST = os.path.join(os.getenv(constants.ANDROID_HOST_OUT, ''), 'bin')
+from atest.atest_enum import DetectType, ExitCode
+from atest.metrics import metrics, metrics_utils
+
UPDATEDB = 'updatedb'
LOCATE = 'locate'
ACLOUD_DURATION = 'duration'
SEARCH_TOP = os.getenv(constants.ANDROID_BUILD_TOP, '')
-MACOSX = 'Darwin'
-OSNAME = os.uname()[0]
# When adding new index, remember to append constants to below tuple.
INDEXES = (constants.CC_CLASS_INDEX,
constants.CLASS_INDEX,
@@ -73,18 +73,8 @@
'.travis_scripts',
'.tx',
'.vscode']
-
-def _mkdir_when_inexists(dirname):
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
-
-def _install_updatedb():
- """Install a customized updatedb for MacOS and ensure it is executable."""
- _mkdir_when_inexists(MAC_UPDB_DST)
- _mkdir_when_inexists(constants.INDEX_DIR)
- if OSNAME == MACOSX:
- shutil.copy2(MAC_UPDB_SRC, os.path.join(MAC_UPDB_DST, UPDATEDB))
- os.chmod(os.path.join(MAC_UPDB_DST, UPDATEDB), 0o0755)
+PRUNEPATHS = ['prebuilts']
+ACLOUD_REPORT_FILE_RE = re.compile(r'.*--report[_-]file(=|\s+)(?P<report_file>[\w/.]+)')
def _delete_indexes():
"""Delete all available index files."""
@@ -92,6 +82,7 @@
if os.path.isfile(index):
os.remove(index)
+
def get_report_file(results_dir, acloud_args):
"""Get the acloud report file path.
@@ -108,11 +99,12 @@
Returns:
A string path of acloud report file.
"""
- match = constants.ACLOUD_REPORT_FILE_RE.match(acloud_args)
+ match = ACLOUD_REPORT_FILE_RE.match(acloud_args)
if match:
return match.group('report_file')
return os.path.join(results_dir, 'acloud_status.json')
+
def has_command(cmd):
"""Detect if the command is available in PATH.
@@ -124,6 +116,7 @@
"""
return bool(shutil.which(cmd))
+
def run_updatedb(search_root=SEARCH_TOP, output_cache=constants.LOCATE_CACHE,
**kwargs):
"""Run updatedb and generate cache in $ANDROID_HOST_OUT/indexes/plocate.db
@@ -136,37 +129,41 @@
prunenames: A list of dirname that won't be cached(-n).
"""
prunenames = kwargs.pop('prunenames', ' '.join(PRUNENAMES))
- prunepaths = kwargs.pop('prunepaths', os.path.join(search_root, 'out'))
+ _prunepaths = [os.path.join(SEARCH_TOP, p) for p in PRUNEPATHS]
+ _prunepaths.append(str(au.get_build_out_dir()))
+ prunepaths = kwargs.pop('prunepaths', ' '.join(_prunepaths))
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
updatedb_cmd = [UPDATEDB, '-l0']
updatedb_cmd.append('-U%s' % search_root)
updatedb_cmd.append('-n%s' % prunenames)
updatedb_cmd.append('-o%s' % output_cache)
- if OSNAME == MACOSX:
- updatedb_cmd.append('-e%s' % prunepaths)
- else:
- # (b/206866627) /etc/updatedb.conf excludes /mnt from scanning on Linux.
- # Use --prunepaths to override the default configuration.
- updatedb_cmd.append('--prunepaths')
- updatedb_cmd.append(prunepaths)
- # Support scanning bind mounts as well.
- updatedb_cmd.extend(['--prune-bind-mounts', 'no'])
- try:
- _install_updatedb()
- except IOError as e:
- logging.error('Error installing updatedb: %s', e)
+ # (b/206866627) /etc/updatedb.conf excludes /mnt from scanning on Linux.
+ # Use --prunepaths to override the default configuration.
+ updatedb_cmd.append('--prunepaths')
+ updatedb_cmd.append(prunepaths)
+ # Support scanning bind mounts as well.
+ updatedb_cmd.extend(['--prune-bind-mounts', 'no'])
- if not has_command(UPDATEDB):
- return
logging.debug('Running updatedb... ')
try:
full_env_vars = os.environ.copy()
logging.debug('Executing: %s', updatedb_cmd)
- if subprocess.check_call(updatedb_cmd, env=full_env_vars) == 0:
- au.save_md5([constants.LOCATE_CACHE], constants.LOCATE_CACHE_MD5)
+ if not os.path.isdir(constants.INDEX_DIR):
+ os.makedirs(constants.INDEX_DIR)
+ subprocess.run(updatedb_cmd, env=full_env_vars, check=True)
except (KeyboardInterrupt, SystemExit):
logging.error('Process interrupted or failure.')
+ # Delete indexes when plocate.db is locked() or other CalledProcessError.
+ # (b/141588997)
+ except subprocess.CalledProcessError as err:
+ logging.error('Executing %s error.', UPDATEDB)
+ metrics_utils.handle_exc_and_send_exit_event(
+ constants.PLOCATEDB_LOCKED)
+ if err.output:
+ logging.error(err.output)
+ os.remove(output_cache)
+
def _dump_index(dump_file, output, output_re, key, value):
"""Dump indexed data with pickle.
@@ -185,8 +182,9 @@
'Boo': {'/path3/to/Boo.java'}
}
"""
+ temp_file = tempfile.NamedTemporaryFile()
_dict = {}
- with open(dump_file, 'wb') as cache_file:
+ with open(temp_file.name, 'wb') as cache_file:
if isinstance(output, bytes):
output = output.decode()
for entry in output.splitlines():
@@ -197,9 +195,12 @@
try:
pickle.dump(_dict, cache_file, protocol=2)
except IOError:
- os.remove(dump_file)
logging.error('Failed in dumping %s', dump_file)
+ shutil.copy(temp_file.name, dump_file)
+ temp_file.close()
+
+# pylint: disable=anomalous-backslash-in-string
def get_cc_result(locatedb=constants.LOCATE_CACHE, **kwargs):
"""Search all testable cc/cpp and grep TEST(), TEST_F() or TEST_P().
@@ -210,20 +211,17 @@
kwargs: (optional)
cc_class_index: A path string of the CC class index.
"""
- if OSNAME == MACOSX:
- # (b/204398677) suppress stderr when indexing target terminated.
- find_cmd = (r"locate -d {0} '*.cpp' '*.cc' | grep -i test "
- "| xargs egrep -sH '{1}' 2>/dev/null || true")
- else:
- find_cmd = (r"locate -d {0} / | egrep -i '/*.test.*\.(cc|cpp)$' "
- "| xargs egrep -sH '{1}' 2>/dev/null || true")
- find_cc_cmd = find_cmd.format(locatedb, constants.CC_GREP_RE)
+ find_cc_cmd = (
+ f"{LOCATE} -id{locatedb} --regex '/*.test.*\.(cc|cpp)$'"
+ f"| xargs egrep -sH '{constants.CC_GREP_RE}' 2>/dev/null || true")
logging.debug('Probing CC classes:\n %s', find_cc_cmd)
result = subprocess.check_output(find_cc_cmd, shell=True)
cc_class_index = kwargs.pop('cc_class_index', constants.CC_CLASS_INDEX)
au.run_multi_proc(func=_index_cc_classes, args=[result, cc_class_index])
+
+# pylint: disable=anomalous-backslash-in-string
def get_java_result(locatedb=constants.LOCATE_CACHE, **kwargs):
"""Search all testable java/kt and grep package.
@@ -237,12 +235,10 @@
package_index: A path string of the package index.
"""
package_grep_re = r'^\s*package\s+[a-z][[:alnum:]]+[^{]'
- if OSNAME == MACOSX:
- find_cmd = r"locate -d%s '*.java' '*.kt'|grep -i test" % locatedb
- else:
- find_cmd = r"locate -d%s / | egrep -i '/*.test.*\.(java|kt)$'" % locatedb
- # (b/204398677) suppress stderr when indexing target terminated.
- find_java_cmd = find_cmd + '| xargs egrep -sH \'%s\' 2>/dev/null|| true' % package_grep_re
+ find_java_cmd = (
+ f"{LOCATE} -id{locatedb} --regex '/*.test.*\.(java|kt)$' "
+ # (b/204398677) suppress stderr when indexing target terminated.
+ f"| xargs egrep -sH '{package_grep_re}' 2>/dev/null|| true")
logging.debug('Probing Java classes:\n %s', find_java_cmd)
result = subprocess.check_output(find_java_cmd, shell=True)
@@ -253,6 +249,7 @@
au.run_multi_proc(func=_index_qualified_classes, args=[result, qclass_index])
au.run_multi_proc(func=_index_packages, args=[result, package_index])
+
def _index_cc_classes(output, index):
"""Index CC classes.
@@ -271,6 +268,7 @@
output_re=constants.CC_OUTPUT_RE,
key='test_name', value='file_path')
+
def _index_java_classes(output, index):
"""Index Java classes.
The data structure is like:
@@ -288,6 +286,7 @@
output_re=constants.CLASS_OUTPUT_RE,
key='class', value='java_path')
+
def _index_packages(output, index):
"""Index Java packages.
The data structure is like:
@@ -305,6 +304,7 @@
output=output, output_re=constants.PACKAGE_OUTPUT_RE,
key='package', value='java_dir')
+
def _index_qualified_classes(output, index):
"""Index Fully Qualified Java Classes(FQCN).
The data structure is like:
@@ -318,8 +318,9 @@
index: A string path of the index file.
"""
logging.debug('indexing qualified classes.')
+ temp_file = tempfile.NamedTemporaryFile()
_dict = {}
- with open(index, 'wb') as cache_file:
+ with open(temp_file.name, 'wb') as cache_file:
if isinstance(output, bytes):
output = output.decode()
for entry in output.split('\n'):
@@ -331,48 +332,60 @@
pickle.dump(_dict, cache_file, protocol=2)
except (KeyboardInterrupt, SystemExit):
logging.error('Process interrupted or failure.')
- os.remove(index)
except IOError:
logging.error('Failed in dumping %s', index)
+ shutil.copy(temp_file.name, index)
+ temp_file.close()
+
def index_targets(output_cache=constants.LOCATE_CACHE):
"""The entrypoint of indexing targets.
Utilise plocate database to index reference types of CLASS, CC_CLASS,
- PACKAGE and QUALIFIED_CLASS. Testable module for tab completion is also
- generated in this method.
+ PACKAGE and QUALIFIED_CLASS.
+
+ (b/206886222) The checksum and file size of plocate.db may differ even the
+ src is not changed at all; therefore, it will skip indexing when both
+ conditions are fulfilled:
+ - not undergo `repo sync` before running atest.
+ - file numbers recorded in current and previous plocate.db are the same.
Args:
output_cache: A file path of the updatedb cache
(e.g. /path/to/plocate.db).
"""
- if not has_command(LOCATE):
- logging.debug('command %s is unavailable; skip indexing.', LOCATE)
+ unavailable_cmds = [
+ cmd for cmd in [UPDATEDB, LOCATE] if not has_command(cmd)]
+ if unavailable_cmds:
+ logging.debug('command %s is unavailable; skip indexing...',
+ ' '.join(unavailable_cmds))
return
- pre_md5sum = ""
- try:
- # Step 0: generate plocate database prior to indexing targets.
- if os.path.exists(constants.LOCATE_CACHE_MD5):
- pre_md5sum = au.md5sum(constants.LOCATE_CACHE_MD5)
- run_updatedb(SEARCH_TOP, output_cache)
- if pre_md5sum == au.md5sum(constants.LOCATE_CACHE_MD5):
- logging.debug('%s remains the same.', output_cache)
- return
- # Step 1: generate output string for indexing targets when needed.
- logging.debug('Indexing targets... ')
- au.run_multi_proc(func=get_java_result, args=[output_cache])
- au.run_multi_proc(func=get_cc_result, args=[output_cache])
- # Delete indexes when plocate.db is locked() or other CalledProcessError.
- # (b/141588997)
- except subprocess.CalledProcessError as err:
- logging.error('Executing %s error.', UPDATEDB)
- metrics_utils.handle_exc_and_send_exit_event(
- constants.PLOCATEDB_LOCKED)
- if err.output:
- logging.error(err.output)
- _delete_indexes()
-def acloud_create(report_file, args="", no_metrics_notice=True):
+ # Get the amount of indexed files.
+ get_num_cmd = f'{LOCATE} -d{output_cache} --count /'
+ ret, pre_number = subprocess.getstatusoutput(get_num_cmd)
+ if ret != 0:
+ logging.debug('Failed to run %s', get_num_cmd)
+ pre_number = sys.maxsize
+
+ run_updatedb(SEARCH_TOP, output_cache)
+ checksum_file = os.path.join(constants.INDEX_DIR, 'repo_sync.md5')
+ repo_syncd = not au.check_md5(checksum_file, missing_ok=False)
+ if repo_syncd:
+ repo_file = Path(SEARCH_TOP).joinpath(
+ '.repo/.repo_fetchtimes.json')
+ au.run_multi_proc(
+ func=au.save_md5,
+ args=[[repo_file], checksum_file])
+ if not repo_syncd and pre_number == subprocess.getoutput(get_num_cmd):
+ logging.debug('%s remains the same. Ignore indexing', output_cache)
+ return
+ logging.debug('Indexing targets... ')
+ au.run_multi_proc(func=get_java_result, args=[output_cache])
+ au.run_multi_proc(func=get_cc_result, args=[output_cache])
+
+
+def acloud_create(report_file, args, no_metrics_notice=True):
"""Method which runs acloud create with specified args in background.
Args:
@@ -381,8 +394,9 @@
no_metrics_notice: Boolean whether sending data to metrics or not.
"""
notice = constants.NO_METRICS_ARG if no_metrics_notice else ""
- match = constants.ACLOUD_REPORT_FILE_RE.match(args)
- report_file_arg = '--report-file={}'.format(report_file) if not match else ""
+ match = ACLOUD_REPORT_FILE_RE.match(args)
+ report_file_arg = f'--report-file={report_file}' if not match else ""
+
# (b/161759557) Assume yes for acloud create to streamline atest flow.
acloud_cmd = ('acloud create -y {ACLOUD_ARGS} '
'{REPORT_FILE_ARG} '
@@ -398,17 +412,48 @@
acloud_duration = time.time() - start
logging.info('"acloud create" process has completed.')
# Insert acloud create duration into the report file.
- if au.is_valid_json_file(report_file):
+ result = au.load_json_safely(report_file)
+ if result:
+ result[ACLOUD_DURATION] = acloud_duration
try:
- with open(report_file, 'r') as _rfile:
- result = json.load(_rfile)
- result[ACLOUD_DURATION] = acloud_duration
with open(report_file, 'w+') as _wfile:
_wfile.write(json.dumps(result))
except OSError as e:
- logging.error("Failed dumping duration to the report file: %s", str(e))
+ logging.error("Failed dumping duration to the report file: %s",
+ str(e))
-def probe_acloud_status(report_file):
+
+def acloud_create_validator(results_dir, args):
+ """Check lunch'd target before running 'acloud create'.
+
+ Args:
+ results_dir: A string of the results directory.
+ args: An argparse.Namespace object.
+
+ Returns:
+ If the target is valid:
+ A tuple of (multiprocessing.Process,
+ report_file path)
+ else:
+ A tuple of (None, None)
+ """
+ target = os.getenv('TARGET_PRODUCT')
+ if not re.match(r'^(aosp_|)cf_.*', target):
+ au.colorful_print(
+ f'{target} is not in cuttlefish family; will not create any AVD.',
+ constants.RED)
+ return None, None
+ if args.start_avd:
+ args.acloud_create = []
+ acloud_args = ' '.join(args.acloud_create)
+ report_file = get_report_file(results_dir, acloud_args)
+ acloud_proc = au.run_multi_proc(
+ func=acloud_create,
+ args=[report_file, acloud_args, args.no_metrics])
+ return acloud_proc, report_file
+
+
+def probe_acloud_status(report_file, find_build_duration):
"""Method which probes the 'acloud create' result status.
If the report file exists and the status is 'SUCCESS', then the creation is
@@ -416,6 +461,7 @@
Args:
report_file: A path string of acloud report file.
+ find_build_duration: A float of seconds.
Returns:
0: success.
@@ -423,8 +469,8 @@
9: invalid acloud create arguments.
"""
# 1. Created but the status is not 'SUCCESS'
- if os.path.exists(report_file):
- if not au.is_valid_json_file(report_file):
+ if Path(report_file).exists():
+ if not au.load_json_safely(report_file):
return ExitCode.AVD_CREATE_FAILURE
with open(report_file, 'r') as rfile:
result = json.load(rfile)
@@ -433,7 +479,25 @@
logging.info('acloud create successfully!')
# Always fetch the adb of the first created AVD.
adb_port = result.get('data').get('devices')[0].get('adb_port')
- os.environ[constants.ANDROID_SERIAL] = '127.0.0.1:{}'.format(adb_port)
+ is_remote_instance = result.get('command') == 'create_cf'
+ adb_ip = '127.0.0.1' if is_remote_instance else '0.0.0.0'
+ os.environ[constants.ANDROID_SERIAL] = f'{adb_ip}:{adb_port}'
+
+ acloud_duration = get_acloud_duration(report_file)
+ if find_build_duration - acloud_duration >= 0:
+ # find+build took longer, saved acloud create time.
+ logging.debug('Saved acloud create time: %ss.',
+ acloud_duration)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.ACLOUD_CREATE,
+ result=round(acloud_duration))
+ else:
+ # acloud create took longer, saved find+build time.
+ logging.debug('Saved Find and Build time: %ss.',
+ find_build_duration)
+ metrics.LocalDetectEvent(
+ detect_type=DetectType.FIND_BUILD,
+ result=round(find_build_duration))
return ExitCode.SUCCESS
au.colorful_print(
'acloud create failed. Please check\n{}\nfor detail'.format(
@@ -444,6 +508,7 @@
logging.error('Invalid acloud arguments found!')
return ExitCode.AVD_INVALID_ARGS
+
def get_acloud_duration(report_file):
"""Method which gets the duration of 'acloud create' from a report file.
@@ -453,10 +518,10 @@
Returns:
An float of seconds which acloud create takes.
"""
- if not au.is_valid_json_file(report_file):
+ content = au.load_json_safely(report_file)
+ if not content:
return 0
- with open(report_file, 'r') as rfile:
- return json.load(rfile).get(ACLOUD_DURATION, 0)
+ return content.get(ACLOUD_DURATION, 0)
if __name__ == '__main__':
diff --git a/atest/tools/atest_tools_unittest.py b/atest/tools/atest_tools_unittest.py
index bf0744d..4619c49 100755
--- a/atest/tools/atest_tools_unittest.py
+++ b/atest/tools/atest_tools_unittest.py
@@ -25,11 +25,12 @@
from unittest import mock
-import atest_utils as au
-import unittest_constants as uc
+from atest import atest_utils as au
+from atest import unittest_constants as uc
+from atest import constants
-from atest_enum import ExitCode
-from tools import atest_tools
+from atest.atest_enum import ExitCode
+from atest.tools import atest_tools
SEARCH_ROOT = uc.TEST_DATA_DIR
PRUNEPATH = uc.TEST_CONFIG_DATA_DIR
@@ -39,10 +40,11 @@
class AtestToolsUnittests(unittest.TestCase):
""""Unittest Class for atest_tools.py."""
- @mock.patch('constants.INDEX_DIR', uc.INDEX_DIR)
- @mock.patch('constants.LOCATE_CACHE_MD5', uc.LOCATE_CACHE_MD5)
- @mock.patch('constants.LOCATE_CACHE', uc.LOCATE_CACHE)
- @mock.patch('tools.atest_tools.SEARCH_TOP', uc.TEST_DATA_DIR)
+ # TODO: (b/265245404) Re-write test cases with AAA style.
+ # TODO: (b/242520851) constants.LOCATE_CACHE should be in literal.
+ @mock.patch('atest.constants.INDEX_DIR', uc.INDEX_DIR)
+ @mock.patch('atest.constants.LOCATE_CACHE', uc.LOCATE_CACHE)
+ @mock.patch('atest.tools.atest_tools.SEARCH_TOP', uc.TEST_DATA_DIR)
def test_index_targets(self):
"""Test method index_targets."""
if atest_tools.has_command(UPDATEDB) and atest_tools.has_command(LOCATE):
@@ -51,10 +53,11 @@
prunepaths=PRUNEPATH)
# test_config/ is excluded so that a.xml won't be found.
locate_cmd1 = [LOCATE, '-d', uc.LOCATE_CACHE, '/a.xml']
- # locate always return 0 when not found, therefore check null
- # return if nothing found.
- output = subprocess.check_output(locate_cmd1).decode()
- self.assertEqual(output, '')
+ # locate returns non-zero when target not found; therefore, use run
+ # method and assert stdout only.
+ result = subprocess.run(locate_cmd1, check=False,
+ capture_output=True)
+ self.assertEqual(result.stdout.decode(), '')
# module-info.json can be found in the search_root.
locate_cmd2 = [LOCATE, '-d', uc.LOCATE_CACHE, 'module-info.json']
@@ -128,16 +131,26 @@
def test_probe_acloud_status(self):
"""Test method prob_acloud_status."""
+ duration = 100
success = os.path.join(SEARCH_ROOT, 'acloud', 'create_success.json')
- self.assertEqual(atest_tools.probe_acloud_status(success),
+ self.assertEqual(atest_tools.probe_acloud_status(success, duration),
ExitCode.SUCCESS)
+ self.assertEqual(
+ os.environ[constants.ANDROID_SERIAL], '127.0.0.1:58167')
+
+ success_local_instance = os.path.join(
+ SEARCH_ROOT, 'acloud', 'create_success_local_instance.json')
+ self.assertEqual(atest_tools.probe_acloud_status(success_local_instance,
+ duration),
+ ExitCode.SUCCESS)
+ self.assertEqual(os.environ[constants.ANDROID_SERIAL], '0.0.0.0:6521')
failure = os.path.join(SEARCH_ROOT, 'acloud', 'create_failure.json')
- self.assertEqual(atest_tools.probe_acloud_status(failure),
+ self.assertEqual(atest_tools.probe_acloud_status(failure, duration),
ExitCode.AVD_CREATE_FAILURE)
inexistence = os.path.join(SEARCH_ROOT, 'acloud', 'inexistence.json')
- self.assertEqual(atest_tools.probe_acloud_status(inexistence),
+ self.assertEqual(atest_tools.probe_acloud_status(inexistence, duration),
ExitCode.AVD_INVALID_ARGS)
def test_get_acloud_duration(self):
diff --git a/atest/tools/singleton.py b/atest/tools/singleton.py
new file mode 100644
index 0000000..aace379
--- /dev/null
+++ b/atest/tools/singleton.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# Copyright 2023 - The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A meta class for singleton pattern.
+
+ Usage:
+ from atest.tools.singleton import Singleton
+
+ class AClass(BaseClass, metaclass=Singleton):
+ pass
+"""
+
+
+class Singleton(type):
+ """A singleton metaclass that returns the same instance when called."""
+ _instances = {}
+
+ def __call__(cls, *args, **kwds):
+ """Initialize a singleton instance."""
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwds)
+ return cls._instances[cls]
diff --git a/atest/unittest_constants.py b/atest/unittest_constants.py
index 78807f3..7286b76 100644
--- a/atest/unittest_constants.py
+++ b/atest/unittest_constants.py
@@ -25,10 +25,10 @@
import os
-import constants
+from atest import constants
-from test_finders import test_info
-from test_runners import atest_tf_test_runner as atf_tr
+from atest.test_finders import test_info
+from atest.test_runners import atest_tf_test_runner as atf_tr
ROOT = '/'
MODULE_DIR = 'foo/bar/jank'
@@ -310,6 +310,7 @@
QCLASS_INDEX = os.path.join(INDEX_DIR, 'fqcn.idx')
CC_CLASS_INDEX = os.path.join(INDEX_DIR, 'cc_classes.idx')
PACKAGE_INDEX = os.path.join(INDEX_DIR, 'packages.idx')
+MODULE_INFO_MD5 = os.path.join(INDEX_DIR, 'module-info.md5')
# TF's log dir
TEST_INFO_DIR = '/tmp/atest_run_1510085893_pi_Nbi'
diff --git a/atest/unittest_data/acloud/create_success_local_instance.json b/atest/unittest_data/acloud/create_success_local_instance.json
new file mode 100644
index 0000000..ace5a8a
--- /dev/null
+++ b/atest/unittest_data/acloud/create_success_local_instance.json
@@ -0,0 +1,31 @@
+{
+ "command": "create",
+ "data": {
+ "devices": [
+ {
+ "adb_port": 6521,
+ "instance_name": "local-instance-1",
+ "ip": "0.0.0.0:6521",
+ "logs": [
+ {
+ "path": "/tmp/acloud_cvd_temp/local-instance-1/cuttlefish_runtime/instances/cvd-1/logs/launcher.log",
+ "type": "TEXT"
+ },
+ {
+ "path": "/tmp/acloud_cvd_temp/local-instance-1/cuttlefish_runtime/instances/cvd-1/logs/kernel.log",
+ "type": "KERNEL_LOG"
+ },
+ {
+ "path": "/tmp/acloud_cvd_temp/local-instance-1/cuttlefish_runtime/instances/cvd-1/logs/logcat",
+ "type": "LOGCAT"
+ }
+ ],
+ "webrtc_port": 8444
+ }
+ ]
+ },
+ "error_type": "",
+ "errors": [],
+ "status": "SUCCESS",
+ "duration": 75.8845522403717
+}
diff --git a/atest/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache b/atest/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache
index 3871882..65b1047 100644
--- a/atest/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache
+++ b/atest/unittest_data/cache_root/78ea54ef315f5613f7c11dd1a87f10c7.cache
Binary files differ
diff --git a/atest/unittest_data/cache_root/README.md b/atest/unittest_data/cache_root/README.md
new file mode 100644
index 0000000..4730d86
--- /dev/null
+++ b/atest/unittest_data/cache_root/README.md
@@ -0,0 +1,7 @@
+Theses cache files are generated by atest. To regenerate them:
+
+78ea54ef315f5613f7c11dd1a87f10c7.cache -> `m atest && atest-dev -c --host hello_world_test`
+
+cd66f9f5ad63b42d0d77a9334de6bb73.cache -> Open test_finders/test_info.py, and in the constructor of TestInfo, add `self.some_new_property = "foo"`. Then run `m atest && atest-dev -c --host hello_world_test`. The result will be under the 78ea... name, but just rename it to the cd66... name.
+
+The new files will be in ~/.atest/info_cache/
diff --git a/atest/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache b/atest/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache
index 451a51e..954fa24 100644
--- a/atest/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache
+++ b/atest/unittest_data/cache_root/cd66f9f5ad63b42d0d77a9334de6bb73.cache
Binary files differ
diff --git a/atest/unittest_data/foo/bar/AmSlam/AndroidManifest.xml b/atest/unittest_data/foo/bar/AmSlam/AndroidManifest.xml
new file mode 100644
index 0000000..d347485
--- /dev/null
+++ b/atest/unittest_data/foo/bar/AmSlam/AndroidManifest.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:androidprv="http://schemas.android.com/apk/prv/res/android"
+ package="c0m.andr0id.settingS"
+ coreApp="true"
+ android:sharedUserId="android.uid.system">
+ <application>
+ <activity
+ android:name=".wifi.WifiPickerActivity"
+ android:exported="true">
+ <intent-filter android:priority="1">
+ <action android:name="android.net.wifi.PICK_WIFI_NETWORK" />
+ <category android:name="android.intent.category.DEFAULT" />
+ </intent-filter>
+ <meta-data android:name="com.android.settings.PRIMARY_PROFILE_CONTROLLED"
+ android:value="true" />
+ </activity>
+ </application>
+</manifest>
+
diff --git a/atest/unittest_data/foo/bar/AmSlam/test/AndroidManifest.xml b/atest/unittest_data/foo/bar/AmSlam/test/AndroidManifest.xml
new file mode 100644
index 0000000..003fcb7
--- /dev/null
+++ b/atest/unittest_data/foo/bar/AmSlam/test/AndroidManifest.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.settings.tests.unit">
+
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="c0m.andr0id.settingS"
+ android:label="Settings Test Cases">
+ </instrumentation>
+
+</manifest>
+
diff --git a/atest/unittest_data/module-info.json b/atest/unittest_data/module-info.json
index 0187bbd..59e6e95 100644
--- a/atest/unittest_data/module-info.json
+++ b/atest/unittest_data/module-info.json
@@ -1,5 +1,6 @@
{
"AmSlam": { "class": ["APPS"], "path": ["foo/bar/AmSlam"], "tags": ["tests"], "installed": ["out/target/product/generic/data/app/AmSlam/AmSlam.apk"], "module_name": "AmSlam" },
+ "AmSlamTests": { "class": ["APPS"], "path": ["foo/bar/AmSlam/test"], "tags": ["tests"], "installed": ["out/target/product/generic/data/app/AmSlam/AmSlamTests.apk"], "module_name": "AmSlamTests" },
"CtsJankDeviceTestCases": { "class": ["APPS"], "path": ["foo/bar/jank"], "tags": ["optional"], "installed": ["out/target/product/generic/data/app/CtsJankDeviceTestCases/CtsJankDeviceTestCases.apk"], "module_name": "CtsJankDeviceTestCases" },
"CtsUiDeviceTestCases": { "class": ["APPS"], "path": ["tf/core/CtsUiDeviceTestCases"], "tags": ["optional"], "installed": ["out/target/product/generic/data/app/CtsUiDeviceTestCases/CtsUiDeviceTestCases.apk"], "module_name": "CtsJankDeviceTestCases" },
"VtsTarget": { "class": ["FAKE"], "path": ["foo/bar/jank"], "tags": ["optional"], "installed": ["out/target/product/generic/VtsTarget"], "module_name": "VtsTarget" },
diff --git a/atest/unittest_data/module_bp_java_deps.json b/atest/unittest_data/module_bp_java_deps.json
index 72b1839..fd379b9 100644
--- a/atest/unittest_data/module_bp_java_deps.json
+++ b/atest/unittest_data/module_bp_java_deps.json
@@ -25,5 +25,10 @@
"test_dep_level_2_1": {
},
"test_dep_level_2_2": {
+ },
+ "not_in_module_info": {
+ "dependencies": [
+ "test_dep_level_1_1"
+ ]
}
}
diff --git a/atest/unittest_utils.py b/atest/unittest_utils.py
index 453662e..62219bc 100644
--- a/atest/unittest_utils.py
+++ b/atest/unittest_utils.py
@@ -18,8 +18,8 @@
import os
-import constants
-import unittest_constants as uc
+from atest import constants
+from atest import unittest_constants as uc
def assert_strict_equal(test_class, first, second):
"""Check for strict equality and strict equality of nametuple elements.
diff --git a/atest_normal_mode_integration_test/Android.bp b/atest_normal_mode_integration_test/Android.bp
new file mode 100644
index 0000000..5e09895
--- /dev/null
+++ b/atest_normal_mode_integration_test/Android.bp
@@ -0,0 +1,61 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The below module creates a standalone zip that end-to-end tests can depend
+// on for running the suite. This is a workaround since we can't use csuite.zip
+// which is defined in an external Makefile that Soong can't depend on.
+//
+// Besides listing jars we know the launcher script depends on which is
+// brittle, this is a hack for several reasons. First, we're listing our
+// dependencies in the tools attribute when we should be using the 'srcs'
+// attribute. Second, we're accessing jars using a path relative to a known
+// artifact location instead of using the Soong 'location' feature.
+
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+python_test_host {
+ name: "atest_normal_mode_integration_test",
+ srcs: [
+ "atest_normal_mode_integration_test.py",
+ ],
+ test_config_template: "atest_normal_mode_integration_test.xml",
+ test_suites: [
+ "general-tests",
+ ],
+ test_options: {
+ unit_test: false,
+ },
+ data: ["INTEGRATION_TESTS",
+ ":atest_integration_fake_src",
+ ":atest-py3-prebuilt",]
+}
+
+genrule {
+ name: "atest_integration_fake_src",
+ tools: ["soong_zip"],
+ out: ["atest_integration_fake_src.zip"],
+ cmd:
+ // Create a android-src directory and copy the source code into it.
+ "mkdir $(genDir)/atest_integration_fake_src;" +
+ "cp -r tools/asuite/atest_normal_mode_integration_test/fake_android_src/ " +
+ " $(genDir)/atest_integration_fake_src;" +
+
+ // Pack the android-src directory and clean up the directory.
+ "$(location soong_zip) -o $(out) " +
+ " -C $(genDir)/atest_integration_fake_src " +
+ " -D $(genDir)/atest_integration_fake_src; " +
+ "rm -rf $(genDir)/atest_integration_fake_src",
+}
diff --git a/atest_normal_mode_integration_test/INTEGRATION_TESTS b/atest_normal_mode_integration_test/INTEGRATION_TESTS
new file mode 100644
index 0000000..69bbe62
--- /dev/null
+++ b/atest_normal_mode_integration_test/INTEGRATION_TESTS
@@ -0,0 +1,10 @@
+# TODO (b/121362882): Add deviceless tests when dry-run is ready.
+###[Test Finder: MODULE, Test Runner:AtestTradefedTestRunner]###
+###Purpose: Test with finder: MODULE and runner: AtestTradefedTestRunner###
+#HelloWorldTests
+hello_world_test
+
+
+###[Option verify]###
+--help
+
diff --git a/atest_normal_mode_integration_test/atest_normal_mode_integration_test.py b/atest_normal_mode_integration_test/atest_normal_mode_integration_test.py
new file mode 100755
index 0000000..f8ae02c
--- /dev/null
+++ b/atest_normal_mode_integration_test/atest_normal_mode_integration_test.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python3
+#
+# Copyright 2022, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ATest Integration Test Class.
+
+The purpose is to prevent potential side-effects from breaking ATest at the
+early stage while landing CLs with potential side-effects.
+
+It forks a subprocess with ATest commands to validate if it can pass all the
+finding, running logic of the python code, and waiting for TF to exit properly.
+ - When running with ROBOLECTRIC tests, it runs without TF, and will exit
+ the subprocess with the message "All tests passed"
+ - If FAIL, it means something breaks ATest unexpectedly!
+"""
+
+from __future__ import print_function
+
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+import zipfile
+
+_TEST_RUN_DIR_PREFIX = 'atest_integration_tests_%s_'
+_LOG_FILE = 'integration_tests.log'
+_FAILED_LINE_LIMIT = 50
+_EXIT_TEST_FAILED = 1
+_EXIT_MISSING_ZIP = 2
+
+
+class IntegrationConstants:
+ """ATest Integration Class for constants definition."""
+ FAKE_SRC_ZIP = os.path.join(os.path.dirname(__file__),
+ 'atest_integration_fake_src.zip')
+ FAKE_SRC_ROOT = ''
+ INTEGRATION_TESTS = [
+ os.path.join(os.path.dirname(__file__), 'INTEGRATION_TESTS')]
+
+ def __init__(self):
+ pass
+
+
+class ATestIntegrationTest(unittest.TestCase):
+ """ATest Integration Test Class."""
+ NAME = 'ATestIntegrationTest'
+ EXECUTABLE = os.path.join(os.path.dirname(__file__), 'atest-py3')
+ OPTIONS = ' -cy --no-bazel-mode '
+ EXTRA_ENV = {}
+ _RUN_CMD = '{exe} {options} {test}'
+ _PASSED_CRITERIA = ['will be rescheduled', 'All tests passed']
+
+ def setUp(self):
+ """Set up stuff for testing."""
+ self.full_env_vars = os.environ.copy()
+ if self.EXTRA_ENV:
+ self.full_env_vars.update(self.EXTRA_ENV)
+ self.test_passed = False
+ self.log = []
+
+ def run_test(self, testcase):
+ """Create a subprocess to execute the test command.
+
+ Strategy:
+ Fork a subprocess to wait for TF exit properly, and log the error
+ if the exit code isn't 0.
+
+ Args:
+ testcase: A string of testcase name.
+ """
+ run_cmd_dict = {'exe': self.EXECUTABLE, 'options': self.OPTIONS,
+ 'test': testcase}
+ run_command = self._RUN_CMD.format(**run_cmd_dict)
+ try:
+ subprocess.check_output(run_command,
+ cwd=self.full_env_vars['ANDROID_BUILD_TOP'],
+ stderr=subprocess.PIPE,
+ env=self.full_env_vars,
+ shell=True)
+ except subprocess.CalledProcessError as e:
+ self.log.append(e.output.decode())
+ return False
+ return True
+
+ def get_failed_log(self):
+ """Get a trimmed failed log.
+
+ Strategy:
+ In order not to show the unnecessary log such as build log,
+ it's better to get a trimmed failed log that contains the
+ most important information.
+
+ Returns:
+ A trimmed failed log.
+ """
+ failed_log = '\n'.join(filter(None, self.log[-_FAILED_LINE_LIMIT:]))
+ return failed_log
+
+
+def create_test_method(testcase, log_path):
+ """Create a test method according to the testcase.
+
+ Args:
+ testcase: A testcase name.
+ log_path: A file path for storing the test result.
+
+ Returns:
+ A created test method, and a test function name.
+ """
+ test_function_name = 'test_%s' % testcase.replace(' ', '_')
+
+ # pylint: disable=missing-docstring
+ def template_test_method(self):
+ self.test_passed = self.run_test(testcase)
+ with open(log_path, 'a', encoding='utf-8') as log_file:
+ log_file.write('\n'.join(self.log))
+ failed_message = f'Running command: {testcase} failed.\n'
+ failed_message += '' if self.test_passed else self.get_failed_log()
+ self.assertTrue(self.test_passed, failed_message)
+ return test_function_name, template_test_method
+
+
+def create_test_run_dir():
+ """Create the test run directory in tmp.
+
+ Returns:
+ A string of the directory path.
+ """
+ utc_epoch_time = int(time.time())
+ prefix = _TEST_RUN_DIR_PREFIX % utc_epoch_time
+ return tempfile.mkdtemp(prefix=prefix)
+
+
+def init_test_env():
+ """Initialize the environment to run the integration test."""
+ # Prepare test environment.
+ if not os.path.isfile(IntegrationConstants.FAKE_SRC_ZIP):
+ print(f'{IntegrationConstants.FAKE_SRC_ZIP} does not exist.')
+ sys.exit(_EXIT_MISSING_ZIP)
+
+ # Extract fake src tree and make soong_ui.bash as executable.
+ IntegrationConstants.FAKE_SRC_ROOT = tempfile.mkdtemp()
+ if os.path.exists(IntegrationConstants.FAKE_SRC_ROOT):
+ shutil.rmtree(IntegrationConstants.FAKE_SRC_ROOT)
+ os.mkdir(IntegrationConstants.FAKE_SRC_ROOT)
+ with zipfile.ZipFile(IntegrationConstants.FAKE_SRC_ZIP, 'r') as zip_ref:
+ print(f'Extract {IntegrationConstants.FAKE_SRC_ZIP} to '
+ f'{IntegrationConstants.FAKE_SRC_ROOT}')
+ zip_ref.extractall(IntegrationConstants.FAKE_SRC_ROOT)
+ IntegrationConstants.FAKE_SRC_ROOT = os.path.join(
+ IntegrationConstants.FAKE_SRC_ROOT, 'fake_android_src')
+ soong_ui = os.path.join(IntegrationConstants.FAKE_SRC_ROOT,
+ 'build/soong/soong_ui.bash')
+ os.chmod(soong_ui, 0o755)
+ os.chdir(IntegrationConstants.FAKE_SRC_ROOT)
+
+ # Copy atest-py3
+ dst = os.path.join(IntegrationConstants.FAKE_SRC_ROOT, 'atest')
+ shutil.copyfile(ATestIntegrationTest.EXECUTABLE, dst)
+ os.chmod(dst, 0o755)
+ ATestIntegrationTest.EXECUTABLE = dst
+
+ # Setup env
+ ATestIntegrationTest.EXTRA_ENV[
+ 'ANDROID_BUILD_TOP'] = IntegrationConstants.FAKE_SRC_ROOT
+ ATestIntegrationTest.EXTRA_ENV['OUT'] = os.path.join(
+ IntegrationConstants.FAKE_SRC_ROOT, 'out')
+ ATestIntegrationTest.EXTRA_ENV[
+ 'ANDROID_HOST_OUT'] = os.path.join(
+ IntegrationConstants.FAKE_SRC_ROOT, 'out/host')
+ ATestIntegrationTest.EXTRA_ENV[
+ 'ANDROID_PRODUCT_OUT'] = os.path.join(
+ IntegrationConstants.FAKE_SRC_ROOT, 'out/target/product/vsoc_x86_64')
+ ATestIntegrationTest.EXTRA_ENV[
+ 'ANDROID_TARGET_OUT_TESTCASES'] = os.path.join(
+ IntegrationConstants.FAKE_SRC_ROOT,
+ 'out/target/product/vsoc_x86_64/testcase')
+ ATestIntegrationTest.EXTRA_ENV['ANDROID_SERIAL'] = ''
+
+
+if __name__ == '__main__':
+ # Init test
+ init_test_env()
+
+ print(f'Running tests with {ATestIntegrationTest.EXECUTABLE}\n')
+ RESULT = None
+ try:
+ LOG_PATH = os.path.join(create_test_run_dir(), _LOG_FILE)
+ for TEST_PLANS in IntegrationConstants.INTEGRATION_TESTS:
+ with open(TEST_PLANS, encoding='utf-8') as test_plans:
+ for test in test_plans:
+ # Skip test when the line startswith #.
+ if not test.strip() or test.strip().startswith('#'):
+ continue
+ test_func_name, test_func = create_test_method(
+ test.strip(), LOG_PATH)
+ setattr(ATestIntegrationTest, test_func_name, test_func)
+ SUITE = unittest.TestLoader().loadTestsFromTestCase(
+ ATestIntegrationTest)
+ RESULT = unittest.TextTestRunner(verbosity=2).run(SUITE)
+ finally:
+ shutil.rmtree(IntegrationConstants.FAKE_SRC_ROOT)
+ if RESULT.failures:
+ print('Full test log is saved to %s' % LOG_PATH)
+ sys.exit(_EXIT_TEST_FAILED)
+ else:
+ os.remove(LOG_PATH)
diff --git a/atest-py2/atest_unittests.xml b/atest_normal_mode_integration_test/atest_normal_mode_integration_test.xml
similarity index 70%
rename from atest-py2/atest_unittests.xml
rename to atest_normal_mode_integration_test/atest_normal_mode_integration_test.xml
index 6649026..ea8ef0b 100644
--- a/atest-py2/atest_unittests.xml
+++ b/atest_normal_mode_integration_test/atest_normal_mode_integration_test.xml
@@ -1,20 +1,21 @@
<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2018 The Android Open Source Project
+<!-- Copyright (C) 2022 The Android Open Source Project
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
+
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
-<configuration description="Config to run atest unittests">
- <option name="test-suite-tag" value="atest_unittests" />
-
+<configuration>
<test class="com.android.tradefed.testtype.python.PythonBinaryHostTest" >
- <option name="par-file-name" value="atest-py2_unittests" />
- <option name="test-timeout" value="2m" />
+ <option name="par-file-name" value="{MODULE}"/>
+ <option name="test-timeout" value="420m" />
</test>
</configuration>
diff --git a/atest_normal_mode_integration_test/fake_android_src/build/soong/soong_ui.bash b/atest_normal_mode_integration_test/fake_android_src/build/soong/soong_ui.bash
new file mode 100755
index 0000000..1fb5213
--- /dev/null
+++ b/atest_normal_mode_integration_test/fake_android_src/build/soong/soong_ui.bash
@@ -0,0 +1,13 @@
+#!/bin/bash -eu
+
+# Generate module-info.json
+mkdir -p $ANDROID_PRODUCT_OUT/
+echo '{
+ "hello_world_test": { "class": ["NATIVE_TESTS"], "path": ["platform_testing/tests/example/native"], "tags": ["optional"], "installed": ["out/host/linux-x86/nativetest64/hello_world_test/hello_world_test", "out/target/product/vsoc_x86_64/data/nativetest64/hello_world_test/hello_world_test"], "compatibility_suites": ["general-tests"], "auto_test_config": [true], "module_name": "hello_world_test", "test_config": ["out/soong/.intermediates/platform_testing/tests/example/native/hello_world_test/android_x86_64_silvermont/hello_world_test.config"], "dependencies": ["libc", "libc++", "libc++demangle", "libclang_rt.builtins", "libdl", "libgtest", "libgtest_main", "libm"], "shared_libs": ["libc", "libc++", "libdl", "libm"], "system_shared_libs": ["libc", "libdl", "libm"], "srcs": [], "srcjars": [], "classes_jar": [], "test_mainline_modules": [], "is_unit_test": "", "data": [], "runtime_dependencies": [], "data_dependencies": [], "supported_variants": ["DEVICE", "HOST"]},
+ "hello_world_test_32": { "class": ["NATIVE_TESTS"], "path": ["platform_testing/tests/example/native"], "tags": ["optional"], "installed": ["out/host/linux-x86/nativetest/hello_world_test/hello_world_test", "out/target/product/vsoc_x86_64/data/nativetest/hello_world_test/hello_world_test"], "compatibility_suites": ["general-tests"], "auto_test_config": [true], "module_name": "hello_world_test", "test_config": ["out/soong/.intermediates/platform_testing/tests/example/native/hello_world_test/android_x86_silvermont/hello_world_test.config"], "dependencies": [], "shared_libs": ["libc", "libc++", "libdl", "libm"], "system_shared_libs": ["libc", "libdl", "libm"], "srcs": [], "srcjars": [], "classes_jar": [], "test_mainline_modules": [], "is_unit_test": "", "data": [], "runtime_dependencies": [], "data_dependencies": [], "supported_variants": ["DEVICE", "HOST"]}
+}' > $ANDROID_PRODUCT_OUT/module-info.json
+
+# Generate deps.json
+mkdir -p $OUT/soong/
+touch $OUT/soong/module_bp_cc_deps.json
+touch $OUT/soong/module_bp_java_deps.json
diff --git a/atest-py2/unittest_data/cc_path_testing/PathTesting.cpp b/atest_normal_mode_integration_test/fake_android_src/platform_testing/tests/example/native/HelloWorldTest.cpp
similarity index 91%
rename from atest-py2/unittest_data/cc_path_testing/PathTesting.cpp
rename to atest_normal_mode_integration_test/fake_android_src/platform_testing/tests/example/native/HelloWorldTest.cpp
index cf29370..25408d8 100644
--- a/atest-py2/unittest_data/cc_path_testing/PathTesting.cpp
+++ b/atest_normal_mode_integration_test/fake_android_src/platform_testing/tests/example/native/HelloWorldTest.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_commands.json b/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_commands.json
new file mode 100644
index 0000000..4283ad0
--- /dev/null
+++ b/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_commands.json
@@ -0,0 +1,643 @@
+{
+"AnimatorTest": [
+"--atest-include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--module",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsAnimationTestCases",
+"CtsAnimationTestCases:android.animation.cts.AnimatorTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
+"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsAnimationTestCases:AnimatorTest": [
+"--atest-include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--module",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsAnimationTestCases",
+"CtsAnimationTestCases:android.animation.cts.AnimatorTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
+"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsSampleDeviceTestCases",
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceTest#testSharedPreferences",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsSampleDeviceTestCases:android.sample.cts": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsSampleDeviceTestCases",
+"CtsSampleDeviceTestCases:android.sample.cts",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsSampleDeviceTestCases",
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsAnimationTestCases CtsSampleDeviceTestCases": [
+"--include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsAnimationTestCases",
+"CtsSampleDeviceTestCases",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsWindowManagerDeviceTestCases:android.server.wm.DisplayCutoutTests#testDisplayCutout_default": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"CtsWindowManagerDeviceTestCases",
+"CtsWindowManagerDeviceTestCases:android.server.wm.DisplayCutoutTests#testDisplayCutout_default*",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"HelloWorldTests": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"HelloWorldTests",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"MixedManagedProfileOwnerTest#testPasswordSufficientInitially": [
+"--atest-include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--module",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"CtsDevicePolicyManagerTestCases",
+"CtsDevicePolicyManagerTestCases:com.android.cts.devicepolicy.MixedManagedProfileOwnerTest#testPasswordSufficientInitially",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"instant_app",
+"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
+"template/atest_local_min",
+"test=atest"
+],
+"PerInstance/CameraHidlTest#configureInjectionStreamsAvailableOutputs/0_internal_0": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"VtsHalCameraProviderV2_4TargetTest",
+"VtsHalCameraProviderV2_4TargetTest:PerInstance/CameraHidlTest.configureInjectionStreamsAvailableOutputs/0_internal_0",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"QuickAccessWalletRoboTests": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"QuickAccessWalletRoboTests",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"RpcWire": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"binderRpcWireProtocolTest",
+"binderRpcWireProtocolTest:RpcWire.*",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"RpcWire#CurrentVersion,ReleaseBranchHasFrozenRpcWireProtocol,IfNotExperimentalCodeHasNoExperimentalFeatures": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"binderRpcWireProtocolTest",
+"binderRpcWireProtocolTest:RpcWire.CurrentVersion:RpcWire.IfNotExperimentalCodeHasNoExperimentalFeatures:RpcWire.ReleaseBranchHasFrozenRpcWireProtocol",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"VtsHalCameraProviderV2_4TargetTest:PerInstance/CameraHidlTest#configureInjectionStreamsAvailableOutputs/0_internal_0": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"VtsHalCameraProviderV2_4TargetTest",
+"VtsHalCameraProviderV2_4TargetTest:PerInstance/CameraHidlTest.configureInjectionStreamsAvailableOutputs/0_internal_0",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"android.animation.cts": [
+"--atest-include-filter",
+"--enable-parameterized-modules",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--exclude-module-parameters",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--module",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsAnimationTestCases",
+"CtsAnimationTestCases:android.animation.cts",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"instant_app",
+"log_saver=template/log/atest_log_saver",
+"multi_abi",
+"secondary_user",
+"template/atest_local_min",
+"test=atest"
+],
+"android.os.cts.CompanionDeviceManagerTest": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsOsTestCases",
+"CtsOsTestCases:android.os.cts.CompanionDeviceManagerTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"android.os.cts.CompanionDeviceManagerTest#testIsDeviceAssociatedWithCompanionApproveWifiConnectionsPermission": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsOsTestCases",
+"CtsOsTestCases:android.os.cts.CompanionDeviceManagerTest#testIsDeviceAssociatedWithCompanionApproveWifiConnectionsPermission",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"android.sample.cts.SampleDeviceReportLogTest": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsSampleDeviceTestCases",
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"android.sample.cts.SampleDeviceTest#testSharedPreferences": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsSampleDeviceTestCases",
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceTest#testSharedPreferences",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"cts/tests/framework/base/windowmanager/src/android/server/wm/DisplayCutoutTests.java#testDisplayCutout_default": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"CtsWindowManagerDeviceTestCases",
+"CtsWindowManagerDeviceTestCases:android.server.wm.DisplayCutoutTests#testDisplayCutout_default*",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"cts/tests/tests/os/src/android/os/cts/CompanionDeviceManagerTest.kt#testIsDeviceAssociated": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"--test-arg",
+"CtsOsTestCases",
+"CtsOsTestCases:android.os.cts.CompanionDeviceManagerTest#testIsDeviceAssociated",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"com.android.tradefed.testtype.AndroidJUnitTest:exclude-annotation:android.platform.test.annotations.AppModeInstant",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"hello_world_test": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"hello_world_test",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"native-benchmark": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"native-benchmark",
+"template/atest_local_min",
+"test=atest"
+],
+"packages/apps/QuickAccessWallet/tests/robolectric/src/com/android/systemui/plugin/globalactions/wallet/WalletPluginServiceTest.java": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"QuickAccessWalletRoboTests",
+"QuickAccessWalletRoboTests:com.android.systemui.plugin.globalactions.wallet.WalletPluginServiceTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"platform_testing/tests/example/native": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"hello_world_test",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"platform_testing/tests/example/native/Android.bp": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"hello_world_test",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"tools/tradefederation/core/res/config/native-benchmark.xml": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"native-benchmark",
+"template/atest_local_min",
+"test=atest"
+],
+"HOST=True QuickAccessWalletRoboTests": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--prioritize-host-config",
+"--skip-host-arch-check",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"-n",
+"QuickAccessWalletRoboTests",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"HOST=True packages/apps/QuickAccessWallet/tests/robolectric/src/com/android/systemui/plugin/globalactions/wallet/WalletPluginServiceTest.java": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--prioritize-host-config",
+"--skip-host-arch-check",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"-n",
+"QuickAccessWalletRoboTests",
+"QuickAccessWalletRoboTests:com.android.systemui.plugin.globalactions.wallet.WalletPluginServiceTest",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+],
+"CtsWifiAwareTestCases": [
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--multi-device-count",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--replicate-parent-setup",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"2",
+"CtsWifiAwareTestCases",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"template/atest_local_min",
+"test=atest"
+]
+}
\ No newline at end of file
diff --git a/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_environ.json b/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_environ.json
new file mode 100644
index 0000000..9042591
--- /dev/null
+++ b/atest_normal_mode_integration_test/fake_android_src/tools/asuite/atest/test_data/test_environ.json
@@ -0,0 +1,70 @@
+{
+"AnimatorTest": [
+],
+"QuickAccessWalletRoboTests": [
+],
+"CtsAnimationTestCases:AnimatorTest": [
+],
+"CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences": [
+],
+"CtsSampleDeviceTestCases:android.sample.cts": [
+],
+"CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest": [
+],
+"CtsAnimationTestCases CtsSampleDeviceTestCases": [
+],
+"CtsWindowManagerDeviceTestCases:android.server.wm.DisplayCutoutTests#testDisplayCutout_default": [
+],
+"HelloWorldTests": [
+],
+"android.animation.cts": [
+],
+"cts/tests/framework/base/windowmanager/src/android/server/wm/DisplayCutoutTests.java#testDisplayCutout_default": [
+],
+"hello_world_test": [
+"GCE_METADATA_TIMEOUT=3",
+"ANDROID_JAVA_HOME"
+],
+"native-benchmark": [
+],
+"packages/apps/QuickAccessWallet/tests/robolectric/src/com/android/systemui/plugin/globalactions/wallet/WalletPluginServiceTest.java": [
+],
+"platform_testing/tests/example/native": [
+],
+"platform_testing/tests/example/native/Android.bp": [
+],
+"tools/tradefederation/core/res/config/native-benchmark.xml": [
+],
+"PacketFragmenterTest": [
+],
+"PacketFragmenterTest#test_no_fragment_necessary,test_ble_fragment_necessary": [
+],
+"VtsHalCameraProviderV2_4TargetTest:PerInstance/CameraHidlTest#startStopPreview/0_internal_0": [
+],
+"MixedManagedProfileOwnerTest#testPasswordSufficientInitially": [
+],
+"android.sample.cts.SampleDeviceReportLogTest": [
+],
+"android.sample.cts.SampleDeviceTest#testSharedPreferences": [
+],
+"com.android.server.wm.ScreenDecorWindowTests": [
+],
+"com.android.server.wm.ScreenDecorWindowTests#testMultipleDecors": [
+],
+"android.os.cts.CompanionDeviceManagerTest": [
+],
+"android.os.cts.CompanionDeviceManagerTest#testIsDeviceAssociatedWithCompanionApproveWifiConnectionsPermission": [
+],
+"cts/tests/tests/os/src/android/os/cts/CompanionDeviceManagerTest.kt#testIsDeviceAssociated": [
+],
+"RpcWire": [
+],
+"RpcWire#CurrentVersion,ReleaseBranchHasFrozenRpcWireProtocol,IfNotExperimentalCodeHasNoExperimentalFeatures": [
+],
+"VtsHalCameraProviderV2_4TargetTest:PerInstance/CameraHidlTest#configureInjectionStreamsAvailableOutputs/0_internal_0": [
+],
+"PerInstance/CameraHidlTest#configureInjectionStreamsAvailableOutputs/0_internal_0": [
+],
+"CtsWifiAwareTestCases": [
+]
+}
diff --git a/plugin_lib/Android.bp b/plugin_lib/Android.bp
index adddfd4..d0f187d 100644
--- a/plugin_lib/Android.bp
+++ b/plugin_lib/Android.bp
@@ -19,16 +19,6 @@
python_defaults {
name: "plugin_default",
pkg_path: "plugin_lib",
- version: {
- py2: {
- enabled: false,
- embedded_launcher: false,
- },
- py3: {
- enabled: true,
- embedded_launcher: false,
- },
- },
}
python_library_host {
diff --git a/plugin_lib/__init__.py b/plugin_lib/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/plugin_lib/__init__.py
+++ /dev/null
diff --git a/plugin_lib/deployment_unittest.py b/plugin_lib/deployment_unittest.py
index 116453a..3671d1b 100644
--- a/plugin_lib/deployment_unittest.py
+++ b/plugin_lib/deployment_unittest.py
@@ -22,7 +22,7 @@
import unittest
from unittest import mock
-from deployment import PluginDeployment
+from plugin_lib.deployment import PluginDeployment
# pylint: disable=protected-access
diff --git a/plugin_lib/plugin_run_unittests.py b/plugin_lib/plugin_run_unittests.py
index b88780a..4cff648 100644
--- a/plugin_lib/plugin_run_unittests.py
+++ b/plugin_lib/plugin_run_unittests.py
@@ -35,9 +35,10 @@
A list of strings (the testable module import path).
"""
testable_modules = []
- base_path = os.path.dirname(os.path.realpath(__file__))
+ package = os.path.dirname(os.path.realpath(__file__))
+ base_path = os.path.dirname(package)
- for dirpath, _, files in os.walk(base_path):
+ for dirpath, _, files in os.walk(package):
for _file in files:
if _file.endswith("_unittest.py"):
# Now transform it into a relative import path.
diff --git a/pylintrc b/pylintrc
index 1bbd71f..7a2299f 100644
--- a/pylintrc
+++ b/pylintrc
@@ -8,25 +8,24 @@
# More than 7 variables is reasonable in ModuleData class.
too-many-instance-attributes,
# Atest unittests requires below flags:
- no-self-use,
duplicate-code,
consider-using-f-string
[MASTER]
-init-hook='import sys, os; sys.path.append(os.getcwd() + '/atest')'
+init-hook='import sys, os; sys.path.append(os.getcwd())'
[BASIC]
-# Naming hint for method names.
-method-name-hint=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
-
# Regular expression matching correct method names.
method-rgx=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
# Good variable names which should always be accepted, separated by a comma.
good-names=e, f, i, j
+# Naming style for methods
+method-naming-style=snake_case
+
[DESIGN]
# Maximum number of arguments for function/method.