Snap for 10209341 from d174ec6488aa69b7add86d3c23c0e00cd768e847 to mainline-healthfitness-release

Change-Id: Ia01897fa9c019c773a9e6062f270943470666c76
diff --git a/aidegen/data/AndroidStyle_aidegen.xml b/aidegen/data/AndroidStyle_aidegen.xml
index 51bf5d1..0fcc8d9 100644
--- a/aidegen/data/AndroidStyle_aidegen.xml
+++ b/aidegen/data/AndroidStyle_aidegen.xml
@@ -18,6 +18,8 @@
         <emptyLine />
         <package name="com" withSubpackages="true" static="true" />
         <emptyLine />
+        <package name="dagger" withSubpackages="true" static="true" />
+        <emptyLine />
         <package name="gov" withSubpackages="true" static="true" />
         <emptyLine />
         <package name="junit" withSubpackages="true" static="true" />
@@ -26,6 +28,8 @@
         <emptyLine />
         <package name="org" withSubpackages="true" static="true" />
         <emptyLine />
+        <package name="robolectric" withSubpackages="true" static="true" />
+        <emptyLine />
         <package name="java" withSubpackages="true" static="true" />
         <emptyLine />
         <package name="javax" withSubpackages="true" static="true" />
@@ -44,6 +48,8 @@
         <emptyLine />
         <package name="com" withSubpackages="true" static="false" />
         <emptyLine />
+        <package name="dagger" withSubpackages="true" static="false" />
+        <emptyLine />
         <package name="gov" withSubpackages="true" static="false" />
         <emptyLine />
         <package name="junit" withSubpackages="true" static="false" />
@@ -52,6 +58,8 @@
         <emptyLine />
         <package name="org" withSubpackages="true" static="false" />
         <emptyLine />
+        <package name="robolectric" withSubpackages="true" static="false" />
+        <emptyLine />
         <package name="java" withSubpackages="true" static="false" />
         <emptyLine />
         <package name="javax" withSubpackages="true" static="false" />
@@ -105,6 +113,8 @@
           <emptyLine />
           <package name="com" withSubpackages="true" static="true" />
           <emptyLine />
+          <package name="dagger" withSubpackages="true" static="true" />
+          <emptyLine />
           <package name="gov" withSubpackages="true" static="true" />
           <emptyLine />
           <package name="junit" withSubpackages="true" static="true" />
@@ -113,6 +123,8 @@
           <emptyLine />
           <package name="org" withSubpackages="true" static="true" />
           <emptyLine />
+          <package name="robolectric" withSubpackages="true" static="true" />
+          <emptyLine />
           <package name="java" withSubpackages="true" static="true" />
           <emptyLine />
           <package name="javax" withSubpackages="true" static="true" />
@@ -131,6 +143,8 @@
           <emptyLine />
           <package name="com" withSubpackages="true" static="false" />
           <emptyLine />
+          <package name="dagger" withSubpackages="true" static="false" />
+          <emptyLine />
           <package name="gov" withSubpackages="true" static="false" />
           <emptyLine />
           <package name="junit" withSubpackages="true" static="false" />
@@ -139,6 +153,8 @@
           <emptyLine />
           <package name="org" withSubpackages="true" static="false" />
           <emptyLine />
+          <package name="robolectric" withSubpackages="true" static="false" />
+          <emptyLine />
           <package name="java" withSubpackages="true" static="false" />
           <emptyLine />
           <package name="javax" withSubpackages="true" static="false" />
@@ -373,4 +389,4 @@
       </arrangement>
     </codeStyleSettings>
   </code_scheme>
-</component>
\ No newline at end of file
+</component>
diff --git a/aidegen/lib/module_info_util.py b/aidegen/lib/module_info_util.py
index 8dd30d8..0e4fbee 100644
--- a/aidegen/lib/module_info_util.py
+++ b/aidegen/lib/module_info_util.py
@@ -79,7 +79,7 @@
     """Generate a merged dictionary.
 
     Linked functions:
-        _build_bp_info(module_info, project, verbose, skip_build)
+        _build_bp_info(module_info, project, skip_build)
         _get_soong_build_json_dict()
         _merge_dict(mk_dict, bp_dict)
 
@@ -97,14 +97,14 @@
     skip_build = config.is_skip_build
     main_project = projects[0] if projects else None
     _build_bp_info(
-        module_info, main_project, verbose, skip_build, env_on)
+        module_info, main_project, skip_build, env_on)
     json_path = common_util.get_blueprint_json_path(
         constant.BLUEPRINT_JAVA_JSONFILE_NAME)
     bp_dict = common_util.get_json_dict(json_path)
     return _merge_dict(module_info.name_to_module_info, bp_dict)
 
 
-def _build_bp_info(module_info, main_project=None, verbose=False,
+def _build_bp_info(module_info, main_project=None,
                    skip_build=False, env_on=_BUILD_BP_JSON_ENV_ON):
     """Make nothing to create module_bp_java_deps.json, module_bp_cc_deps.json.
 
@@ -115,7 +115,6 @@
     Args:
         module_info: A ModuleInfo instance contains data of module-info.json.
         main_project: A string of the main project name.
-        verbose: A boolean, if true displays full build output.
         skip_build: A boolean, if true, skip building if
                     get_blueprint_json_path(file_name) file exists, otherwise
                     build it.
@@ -144,7 +143,7 @@
     logging.warning(
         '\nGenerate files:\n %s by atest build method.', files)
     atest_utils.update_build_env(env_on)
-    build_with_on_cmd = atest_utils.build([_TARGET], verbose)
+    build_with_on_cmd = atest_utils.build([_TARGET])
 
     # For Android Rust projects, we need to create a symbolic link to the file
     # out/soong/rust-project.json to launch the rust projects in IDEs.
diff --git a/aidegen/lib/project_info.py b/aidegen/lib/project_info.py
index b71b72f..436baf2 100644
--- a/aidegen/lib/project_info.py
+++ b/aidegen/lib/project_info.py
@@ -607,9 +607,8 @@
     """
     build_cmd = ['-k', '-j']
     build_cmd.extend(list(targets))
-    verbose = True
     atest_utils.update_build_env(_BUILD_BP_JSON_ENV_ON)
-    if not atest_utils.build(build_cmd, verbose):
+    if not atest_utils.build(build_cmd):
         message = ('Build failed!\n{}\nAIDEGen will proceed but dependency '
                    'correctness is not guaranteed if not all targets being '
                    'built successfully.'.format('\n'.join(targets)))
diff --git a/atest/Android.bp b/atest/Android.bp
index 8b9b32b..24af31f 100644
--- a/atest/Android.bp
+++ b/atest/Android.bp
@@ -50,7 +50,6 @@
         "tradefed-protos-py",
         "py-google-api-python-client",
         "py-oauth2client",
-        "py-six",
     ],
     data: [
         "bazel/resources/**/*",
@@ -106,24 +105,16 @@
 }
 
 python_library_host {
-    name: "asuite_metrics",
-    pkg_path: "atest",
-    srcs: [
-        "asuite_metrics.py",
-    ],
-}
-
-python_library_host {
     name: "asuite_cc_client",
     pkg_path: "atest",
     srcs: [
         "atest_enum.py",
+        "asuite_metrics.py",
         "metrics/*.py",
         "coverage/*.py",
     ],
     libs: [
         "asuite_proto",
-        "asuite_metrics",
         "atest_module_info",
     ],
 }
diff --git a/atest/OWNERS b/atest/OWNERS
index d190768..738e735 100644
--- a/atest/OWNERS
+++ b/atest/OWNERS
@@ -3,3 +3,5 @@
 kevcheng@google.com
 yangbill@google.com
 kellyhung@google.com
+yikezh@google.com
+nelsonli@google.com
diff --git a/atest/asuite_lib_test/Android.bp b/atest/asuite_lib_test/Android.bp
index d0d8254..9669ada 100644
--- a/atest/asuite_lib_test/Android.bp
+++ b/atest/asuite_lib_test/Android.bp
@@ -16,28 +16,10 @@
 // also include asuite_metrics and other needed python files, in order to make sure asuite_metrics
 // tests result is accurate, separate them to two different test modules.
 
-// For testing asuite_metrics python2 libs
 package {
     default_applicable_licenses: ["Android-Apache-2.0"],
 }
 
-// For testing asuite_metrics python3 libs
-python_test_host {
-    name: "asuite_metrics_lib_tests",
-    main: "asuite_lib_run_tests.py",
-    pkg_path: "asuite_test",
-    srcs: [
-        "asuite_lib_run_tests.py",
-        "asuite_metrics_test.py",
-    ],
-    libs: [
-        "asuite_metrics",
-    ],
-    test_options: {
-        unit_test: true,
-    },
-}
-
 // For testing asuite_cc_client python3 libs
 python_test_host {
     name: "asuite_cc_lib_tests",
@@ -46,6 +28,7 @@
     srcs: [
         "asuite_lib_run_tests.py",
         "asuite_cc_client_test.py",
+        "asuite_metrics_test.py",
     ],
     libs: [
         "asuite_cc_client",
diff --git a/atest/asuite_metrics.py b/atest/asuite_metrics.py
index 0828152..49d8b85 100644
--- a/atest/asuite_metrics.py
+++ b/atest/asuite_metrics.py
@@ -27,6 +27,7 @@
     from urllib2 import Request
     from urllib2 import urlopen
 
+
 _JSON_HEADERS = {'Content-Type': 'application/json'}
 _METRICS_RESPONSE = 'done'
 _METRICS_TIMEOUT = 2 #seconds
@@ -70,17 +71,15 @@
 
 
 def _get_grouping_key():
-    """Get grouping key. Returns UUID.uuid4."""
+    """Get grouping key. Returns UUID.uuid5."""
     meta_file = os.path.join(os.path.expanduser('~'),
                              '.config', 'asuite', '.metadata')
-    if os.path.isfile(meta_file):
-        with open(meta_file) as f:
-            try:
-                return uuid.UUID(f.read(), version=4)
-            except ValueError:
-                logging.debug('malformed group_key in file, rewriting')
-    # Cache uuid to file. Raise exception if any file access error.
-    key = uuid.uuid4()
+    # (b/278503654) Treat non-human invocation as the same user when the email
+    # is null.
+    # Prevent circular import.
+    #pylint: disable=import-outside-toplevel
+    from atest.metrics import metrics_base
+    key = uuid.uuid5(uuid.NAMESPACE_DNS, metrics_base.get_user_email())
     dir_path = os.path.dirname(meta_file)
     if os.path.isfile(dir_path):
         os.remove(dir_path)
diff --git a/atest/atest_arg_parser.py b/atest/atest_arg_parser.py
index 750e20a..49b054a 100644
--- a/atest/atest_arg_parser.py
+++ b/atest/atest_arg_parser.py
@@ -18,7 +18,8 @@
 Atest Argument Parser class for atest.
 """
 
-# pylint: disable=line-too-long
+# TODO: (@jimtang) Unsuppress too-many-lines Pylint warning.
+# pylint: disable=line-too-long, too-many-lines
 
 import argparse
 import pydoc
@@ -26,6 +27,24 @@
 from atest import bazel_mode
 from atest import constants
 
+from atest.atest_utils import BuildOutputMode
+from atest.test_runners.roboleaf_test_runner import BazelBuildMode
+
+def output_mode_msg() -> str:
+    """Generate helper strings for BuildOutputMode."""
+    msg = []
+    for _, value in BuildOutputMode.__members__.items():
+        if value == BuildOutputMode.STREAMED:
+            msg.append(f'\t\t{BuildOutputMode.STREAMED.value}: '
+                       'full output like what "m" does. (default)')
+        elif value == BuildOutputMode.LOGGED:
+            msg.append(f'\t\t{BuildOutputMode.LOGGED.value}: '
+                       'print build output to a log file.')
+        else:
+            raise RuntimeError('Found unknown attribute!')
+    return '\n'.join(msg)
+
+
 # Constants used for AtestArgParser and EPILOG_TEMPLATE
 HELP_DESC = ('A command line tool that allows users to build, install, and run '
              'Android tests locally, greatly speeding test re-runs without '
@@ -45,6 +64,8 @@
 BAZEL_MODE = 'Run tests using Bazel.'
 BAZEL_ARG = ('Forward a flag to Bazel for tests executed with Bazel; '
              'see --bazel-mode.')
+BUILD_OUTPUT = (r'Specifies the desired build output mode. '
+                f'Valid values are:\n{output_mode_msg()}')
 CLEAR_CACHE = 'Wipe out the test_infos cache of the test and start a new search.'
 COLLECT_TESTS_ONLY = ('Collect a list test cases of the instrumentation tests '
                       'without testing them in real.')
@@ -81,6 +102,8 @@
 NO_ENABLE_ROOT = ('Do NOT restart adbd with root permission even the test config '
                   'has RootTargetPreparer.')
 NO_METRICS = 'Do not send metrics.'
+ROBOLEAF_MODE = ('Check if module has been listed in the ["prod", "staging", or'
+                 ' "dev"] roboleaf allowlists and invoke with b test.')
 REBUILD_MODULE_INFO = ('Forces a rebuild of the module-info.json file. '
                        'This may be necessary following a repo sync or '
                        'when writing a new test.')
@@ -196,6 +219,13 @@
                           action='store_true', help=REBUILD_MODULE_INFO)
         self.add_argument('--no-enable-root', help=NO_ENABLE_ROOT,
                           action='store_true')
+        self.add_argument('--roboleaf-mode',
+                          nargs='?',
+                          default=BazelBuildMode.OFF,
+                          const=BazelBuildMode.PROD,
+                          choices=BazelBuildMode,
+                          type=BazelBuildMode,
+                          help=ROBOLEAF_MODE)
         self.add_argument('--sharding', nargs='?', const=2,
                           type=_positive_int, default=0,
                           help=SHARDING)
@@ -246,6 +276,11 @@
         self.add_argument('-L', '--list-modules', help=LIST_MODULES)
         self.add_argument('-v', '--verbose', action='store_true', help=VERBOSE)
         self.add_argument('-V', '--version', action='store_true', help=VERSION)
+        self.add_argument('--build-output',
+                          default=BuildOutputMode.STREAMED,
+                          choices=BuildOutputMode,
+                          type=BuildOutputMode,
+                          help=BUILD_OUTPUT)
 
         # Options that switch on/off fuzzy searching.
         fgroup = self.add_mutually_exclusive_group()
@@ -417,11 +452,13 @@
         NO_CHECKING_DEVICE=NO_CHECKING_DEVICE,
         FUZZY_SEARCH=FUZZY_SEARCH,
         REBUILD_MODULE_INFO=REBUILD_MODULE_INFO,
+        ROBOLEAF_MODE=ROBOLEAF_MODE,
         REQUEST_UPLOAD_RESULT=REQUEST_UPLOAD_RESULT,
         RERUN_UNTIL_FAILURE=RERUN_UNTIL_FAILURE,
         RETRY_ANY_FAILURE=RETRY_ANY_FAILURE,
         SERIAL=SERIAL,
         SHARDING=SHARDING,
+        BUILD_OUTPUT=BUILD_OUTPUT,
         SMART_TESTING_LOCAL=SMART_TESTING_LOCAL,
         START_AVD=START_AVD,
         TEST=TEST,
@@ -518,6 +555,9 @@
         -m, --rebuild-module-info
             {REBUILD_MODULE_INFO}
 
+        --roboleaf-mode
+            {ROBOLEAF_MODE}
+
         --no-enable-root
             {NO_ENABLE_ROOT}
 
@@ -607,6 +647,8 @@
         -V, --version
             {VERSION}
 
+        --build-output
+            {BUILD_OUTPUT}
 
         [ Dry-Run and Caching ]
         --dry-run
diff --git a/atest/atest_enum.py b/atest/atest_enum.py
index 6c9d794..f60a17d 100644
--- a/atest/atest_enum.py
+++ b/atest/atest_enum.py
@@ -21,7 +21,7 @@
 @unique
 class DetectType(IntEnum):
     """An Enum class for local_detect_event."""
-    # Detect type for local_detect_event; next expansion: 29
+    # Detect type for local_detect_event; next expansion: 43
     BUG_DETECTED = 0
     ACLOUD_CREATE = 1
     FIND_BUILD = 2
@@ -58,6 +58,28 @@
     FOUND_TARGET_ARTIFACTS = 27
     FIND_TEST_IN_DEPS=28
     FULL_GENERATE_BAZEL_WORKSPACE_TIME = 29
+    # Below detect types are used for determine build conditions:
+    # 1. *_CLEAN_OUT: when out/ dir is empty or does not exist.
+    # 2. *_BPMK_CHANGE: when any Android.bp/Android.mk has changed.
+    # 3. *_ENV_CHANGE: when build-related variable has changed.
+    # 4. *_SRC_CHANGE: when source code has changed.
+    # 5. *_OTHER: none of above reasons that triggers renewal of ninja file.
+    # 6. *_INCREMENTAL: the build doesn't need to renew ninja file.
+    MODULE_INFO_CLEAN_OUT = 30
+    MODULE_INFO_BPMK_CHANGE = 31
+    MODULE_INFO_ENV_CHANGE = 32
+    MODULE_INFO_SRC_CHANGE = 33
+    MODULE_INFO_OTHER = 34
+    MODULE_INFO_INCREMENTAL = 35
+    BUILD_CLEAN_OUT = 36
+    BUILD_BPMK_CHANGE = 37
+    BUILD_ENV_CHANGE = 38
+    BUILD_SRC_CHANGE = 39
+    BUILD_OTHER = 40
+    BUILD_INCREMENTAL = 41
+    BUILD_TIME_PER_TARGET = 42
+    MODULE_INFO_GEN_NINJA = 43
+    BUILD_GEN_NINJA = 44
 
 @unique
 class ExitCode(IntEnum):
diff --git a/atest/atest_main.py b/atest/atest_main.py
index 095c7cb..277b9ac 100755
--- a/atest/atest_main.py
+++ b/atest/atest_main.py
@@ -38,6 +38,8 @@
 import time
 import platform
 
+from typing import Dict, List
+
 from dataclasses import dataclass
 from pathlib import Path
 
@@ -61,6 +63,8 @@
 from atest.metrics import metrics_utils
 from atest.test_finders import test_finder_utils
 from atest.test_runners import regression_test_runner
+from atest.test_runners import roboleaf_test_runner
+from atest.test_finders.test_info import TestInfo
 from atest.tools import atest_tools as at
 
 EXPECTED_VARS = frozenset([
@@ -896,6 +900,25 @@
         return False
     return True
 
+def _all_tests_are_bazel_buildable(
+    roboleaf_tests: Dict[str, TestInfo],
+    tests: List[str]) -> bool:
+    """Method that determines whether all tests have been fully converted to
+    bazel mode (roboleaf).
+
+    If all tests are fully converted, then indexing, generating mod-info, and
+    generating atest bazel workspace can be skipped since dependencies are
+    mapped already with `b`.
+
+    Args:
+        roboleaf_tests: A dictionary keyed by testname of roboleaf tests.
+        tests: A list of testnames.
+
+    Returns:
+        True when none of the above conditions were found.
+    """
+    return roboleaf_tests and set(tests) == set(roboleaf_tests)
+
 def perm_consistency_metrics(test_infos, mod_info, args):
     """collect inconsistency between preparer and device root permission.
 
@@ -917,6 +940,17 @@
         logging.debug('perm_consistency_metrics raised exception: %s', err)
         return
 
+
+def set_build_output_mode(mode: atest_utils.BuildOutputMode):
+    """Update environment variable dict accordingly to args.build_output."""
+    # Changing this variable does not retrigger builds.
+    atest_utils.update_build_env(
+        {'ANDROID_QUIET_BUILD': 'true',
+         #(b/271654778) Showing the reasons for the ninja file was regenerated.
+         'SOONG_UI_NINJA_ARGS': '-d explain',
+         'BUILD_OUTPUT_MODE': mode.value})
+
+
 def get_device_count_config(test_infos, mod_info):
     """Get the amount of desired devices from the test config.
 
@@ -938,27 +972,6 @@
     return max_count
 
 
-def _get_host_framework_targets(mod_info):
-    """Get the build target name for all the existing jars under host framework.
-
-    Args:
-        mod_info: ModuleInfo object.
-
-    Returns:
-        A set of build target name under $(ANDROID_HOST_OUT)/framework.
-    """
-    host_targets = set()
-    framework_host_dir = Path(
-        os.environ.get(constants.ANDROID_HOST_OUT)).joinpath('framework')
-    if framework_host_dir.is_dir():
-        jars = framework_host_dir.glob('*.jar')
-        for jar in jars:
-            if mod_info.is_module(jar.stem):
-                host_targets.add(jar.stem)
-        logging.debug('Found exist host framework target:%s', host_targets)
-    return host_targets
-
-
 def _is_auto_shard_test(test_infos):
     """Determine whether the given tests are in shardable test list.
 
@@ -994,6 +1007,7 @@
     # Sets coverage environment variables.
     if args.experimental_coverage:
         atest_utils.update_build_env(coverage.build_env_vars())
+    set_build_output_mode(args.build_output)
 
     _configure_logging(args.verbose)
     _validate_args(args)
@@ -1015,34 +1029,48 @@
         os.environ.get(constants.ANDROID_PRODUCT_OUT, ''))
     extra_args = get_extra_args(args)
     verify_env_variables = extra_args.get(constants.VERIFY_ENV_VARIABLE, False)
-    proc_idx = None
-    # Do not index targets while the users intend to dry-run tests.
-    if need_run_index_targets(args, extra_args):
-        proc_idx = atest_utils.run_multi_proc(at.index_targets)
-    smart_rebuild = need_rebuild_module_info(args)
 
-    mod_start = time.time()
-    mod_info = module_info.ModuleInfo(force_build=smart_rebuild)
-    mod_stop = time.time() - mod_start
-    metrics.LocalDetectEvent(detect_type=DetectType.MODULE_INFO_INIT_MS,
-                             result=int(mod_stop * 1000))
-    atest_utils.run_multi_proc(func=mod_info._save_module_info_checksum)
-    atest_utils.run_multi_proc(
-        func=atest_utils.generate_buildfiles_checksum,
-        args=[mod_info.module_index.parent])
+    # Gather roboleaf tests now to see if we can skip mod info generation.
+    mod_info = module_info.ModuleInfo(no_generate=True)
+    if args.roboleaf_mode != roboleaf_test_runner.BazelBuildMode.OFF:
+        mod_info.roboleaf_tests = roboleaf_test_runner.RoboleafTestRunner(
+            results_dir).roboleaf_eligible_tests(
+                args.roboleaf_mode,
+                args.tests)
+    all_tests_are_bazel_buildable = _all_tests_are_bazel_buildable(
+                                mod_info.roboleaf_tests,
+                                args.tests)
 
     # Run Test Mapping or coverage by no-bazel-mode.
     if atest_utils.is_test_mapping(args) or args.experimental_coverage:
         atest_utils.colorful_print('Not running using bazel-mode.', constants.YELLOW)
         args.bazel_mode = False
-    if args.bazel_mode:
-        start = time.time()
-        bazel_mode.generate_bazel_workspace(
-            mod_info,
-            enabled_features=set(args.bazel_mode_features or []))
-        metrics.LocalDetectEvent(
-            detect_type=DetectType.BAZEL_WORKSPACE_GENERATE_TIME,
-            result=int(time.time() - start))
+
+    proc_idx = None
+    if not all_tests_are_bazel_buildable:
+        # Do not index targets while the users intend to dry-run tests.
+        if need_run_index_targets(args, extra_args):
+            proc_idx = atest_utils.run_multi_proc(at.index_targets)
+        smart_rebuild = need_rebuild_module_info(args)
+
+        mod_start = time.time()
+        mod_info = module_info.ModuleInfo(force_build=smart_rebuild)
+        mod_stop = time.time() - mod_start
+        metrics.LocalDetectEvent(detect_type=DetectType.MODULE_INFO_INIT_MS,
+                                 result=int(mod_stop * 1000))
+        atest_utils.run_multi_proc(func=mod_info._save_module_info_checksum)
+        atest_utils.run_multi_proc(
+            func=atest_utils.generate_buildfiles_checksum,
+            args=[mod_info.module_index.parent])
+
+        if args.bazel_mode:
+            start = time.time()
+            bazel_mode.generate_bazel_workspace(
+                mod_info,
+                enabled_features=set(args.bazel_mode_features or []))
+            metrics.LocalDetectEvent(
+                detect_type=DetectType.BAZEL_WORKSPACE_GENERATE_TIME,
+                result=int(time.time() - start))
 
     translator = cli_translator.CLITranslator(
         mod_info=mod_info,
@@ -1053,7 +1081,6 @@
     if args.list_modules:
         _print_testable_modules(mod_info, args.list_modules)
         return ExitCode.SUCCESS
-    build_targets = set()
     test_infos = set()
     dry_run_args = (args.update_cmd_mapping, args.verify_cmd_mapping,
                     args.dry_run, args.generate_runner_cmd)
@@ -1064,7 +1091,7 @@
         if proc_idx and not atest_utils.has_index_files():
             proc_idx.join()
         find_start = time.time()
-        build_targets, test_infos = translator.translate(args)
+        test_infos = translator.translate(args)
         given_amount  = len(args.serial) if args.serial else 0
         required_amount = get_device_count_config(test_infos, mod_info)
         args.device_count_config = required_amount
@@ -1078,9 +1105,7 @@
                     f'but {given_amount} were given.',
                     constants.RED)
                 return 0
-        # Remove MODULE-IN-* from build targets by default.
-        if not args.use_modules_in:
-            build_targets = _exclude_modules_in_targets(build_targets)
+
         find_duration = time.time() - find_start
         if not test_infos:
             return ExitCode.TEST_NOT_FOUND
@@ -1118,8 +1143,13 @@
 
     if args.info:
         return _print_test_info(mod_info, test_infos)
-    build_targets |= test_runner_handler.get_test_runner_reqs(
+
+    build_targets = test_runner_handler.get_test_runner_reqs(
         mod_info, test_infos, extra_args=extra_args)
+    # Remove MODULE-IN-* from build targets by default.
+    if not args.use_modules_in:
+        build_targets = _exclude_modules_in_targets(build_targets)
+
     if any(dry_run_args):
         if not verify_env_variables:
             return _dry_run_validator(args, results_dir, extra_args, test_infos,
@@ -1132,7 +1162,7 @@
             return 0
     if args.detect_regression:
         build_targets |= (regression_test_runner.RegressionTestRunner('')
-                          .get_test_runner_build_reqs())
+                          .get_test_runner_build_reqs([]))
 
     steps = parse_steps(args)
     if build_targets and steps.has_build():
@@ -1142,16 +1172,18 @@
         # Add module-info.json target to the list of build targets to keep the
         # file up to date.
         build_targets.add(mod_info.module_info_target)
-        # Force rebuilt all jars under $ANDROID_HOST_OUT to prevent old version
-        # host jars break the test.
-        build_targets |= _get_host_framework_targets(mod_info)
+
         build_start = time.time()
-        success = atest_utils.build(build_targets, verbose=args.verbose)
+        success = atest_utils.build(build_targets)
         build_duration = time.time() - build_start
         metrics.BuildFinishEvent(
             duration=metrics_utils.convert_duration(build_duration),
             success=success,
             targets=build_targets)
+        metrics.LocalDetectEvent(
+            detect_type=DetectType.BUILD_TIME_PER_TARGET,
+            result=int(build_duration/len(build_targets))
+        )
         rebuild_module_info = DetectType.NOT_REBUILD_MODULE_INFO
         if is_clean:
             rebuild_module_info = DetectType.CLEAN_BUILD
@@ -1265,4 +1297,23 @@
                 result=DETECTOR.caught_result)
             if result_file:
                 print("Run 'atest --history' to review test result history.")
+
+    # Only asking internal google user to do this survey.
+    if metrics_base.get_user_type() == metrics_base.INTERNAL_USER:
+        # The bazel_mode value will only be false if user apply --no-bazel-mode.
+        if not atest_configs.GLOBAL_ARGS.bazel_mode:
+            MESSAGE = ('\nDear `--no-bazel-mode` users,\n'
+                         'We are conducting a survey to understand why you are '
+                         'still using `--no-bazel-mode`. The survey should '
+                         'take less than 3 minutes and your responses will be '
+                         'kept confidential and will only be used to improve '
+                         'our understanding of the situation. Please click on '
+                         'the link below to begin the survey:\n\n'
+                         'http://go/atest-no-bazel-survey\n\n'
+                         'Thanks for your time and feedback.\n\n'
+                         'Sincerely,\n'
+                         'The ATest Team')
+
+            print(atest_utils.colorize(MESSAGE, constants.BLACK, bp_color=constants.CYAN))
+
     sys.exit(EXIT_CODE)
diff --git a/atest/atest_main_unittest.py b/atest/atest_main_unittest.py
index 020c602..49c1223 100755
--- a/atest/atest_main_unittest.py
+++ b/atest/atest_main_unittest.py
@@ -27,8 +27,8 @@
 from importlib import reload
 from io import StringIO
 from unittest import mock
+from pyfakefs import fake_filesystem_unittest
 
-# pylint: disable=wrong-import-order
 from atest import atest_main
 from atest import atest_utils
 from atest import constants
@@ -37,6 +37,12 @@
 from atest.metrics import metrics_utils
 from atest.test_finders import test_info
 
+GREEN= '\x1b[1;32m'
+CYAN = '\x1b[1;36m'
+MAGENTA = '\x1b[1;35m'
+END = '\x1b[0m'
+
+
 #pylint: disable=protected-access
 class AtestUnittests(unittest.TestCase):
     """Unit tests for atest_main.py"""
@@ -95,135 +101,6 @@
                     atest_main._has_valid_test_mapping_args(parsed_args),
                     'Failed to validate: %s' % args)
 
-    @mock.patch.object(module_info.ModuleInfo, '_merge_build_system_infos')
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
-    @mock.patch('json.load', return_value={})
-    @mock.patch('builtins.open', new_callable=mock.mock_open)
-    @mock.patch('os.path.isfile', return_value=True)
-    @mock.patch('atest.atest_utils._has_colors', return_value=True)
-    @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
-    def test_print_module_info_from_module_name(self, mock_get_module_info,
-                                                _mock_has_colors, _isfile,
-                                                _open, _json, _merge):
-        """Test _print_module_info_from_module_name method."""
-        mod_one_name = 'mod1'
-        mod_one_path = ['src/path/mod1']
-        mod_one_installed = ['installed/path/mod1']
-        mod_one_suites = ['device_test_mod1', 'native_test_mod1']
-        mod_one = {constants.MODULE_NAME: mod_one_name,
-                   constants.MODULE_PATH: mod_one_path,
-                   constants.MODULE_INSTALLED: mod_one_installed,
-                   constants.MODULE_COMPATIBILITY_SUITES: mod_one_suites}
-
-        # Case 1: The testing_module('mod_one') can be found in module_info.
-        mock_get_module_info.return_value = mod_one
-        capture_output = StringIO()
-        sys.stdout = capture_output
-        mod_info = module_info.ModuleInfo(module_file='/somewhere/module-info')
-        # Check return value = True, since 'mod_one' can be found.
-        self.assertTrue(
-            atest_main._print_module_info_from_module_name(mod_info, mod_one_name))
-        # Assign sys.stdout back to default.
-        sys.stdout = sys.__stdout__
-        correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
-                          '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
-                          '\t\tdevice_test_mod1\n'
-                          '\t\tnative_test_mod1\n'
-                          '\x1b[1;36m\tSource code path\x1b[0m\n'
-                          '\t\tsrc/path/mod1\n'
-                          '\x1b[1;36m\tInstalled path\x1b[0m\n'
-                          '\t\tinstalled/path/mod1\n')
-        # Check the function correctly printed module_info in color to stdout
-        self.assertEqual(capture_output.getvalue(), correct_output)
-
-        # Case 2: The testing_module('mod_one') can NOT be found in module_info.
-        mock_get_module_info.return_value = None
-        capture_output = StringIO()
-        sys.stdout = capture_output
-        # Check return value = False, since 'mod_one' can NOT be found.
-        self.assertFalse(
-            atest_main._print_module_info_from_module_name(mod_info, mod_one_name))
-        # Assign sys.stdout back to default.
-        sys.stdout = sys.__stdout__
-        null_output = ''
-        # Check if no module_info, then nothing printed to screen.
-        self.assertEqual(capture_output.getvalue(), null_output)
-
-    @mock.patch.object(module_info.ModuleInfo, '_merge_build_system_infos')
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
-    @mock.patch('json.load', return_value={})
-    @mock.patch('builtins.open', new_callable=mock.mock_open)
-    @mock.patch('os.path.isfile', return_value=True)
-    @mock.patch('atest.atest_utils._has_colors', return_value=True)
-    @mock.patch.object(module_info.ModuleInfo, 'get_module_info',)
-    def test_print_test_info(self, mock_get_module_info, _mock_has_colors,
-                             _isfile, _open, _json, _merge):
-        """Test _print_test_info method."""
-        mod_one_name = 'mod1'
-        mod_one = {constants.MODULE_NAME: mod_one_name,
-                   constants.MODULE_PATH: ['path/mod1'],
-                   constants.MODULE_INSTALLED: ['installed/mod1'],
-                   constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod1']}
-        mod_two_name = 'mod2'
-        mod_two = {constants.MODULE_NAME: mod_two_name,
-                   constants.MODULE_PATH: ['path/mod2'],
-                   constants.MODULE_INSTALLED: ['installed/mod2'],
-                   constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod2']}
-        mod_three_name = 'mod3'
-        mod_three = {constants.MODULE_NAME: mod_two_name,
-                     constants.MODULE_PATH: ['path/mod3'],
-                     constants.MODULE_INSTALLED: ['installed/mod3'],
-                     constants.MODULE_COMPATIBILITY_SUITES: ['suite_mod3']}
-        test_name = mod_one_name
-        build_targets = set([mod_one_name, mod_two_name, mod_three_name])
-        t_info = test_info.TestInfo(test_name, 'mock_runner', build_targets)
-        test_infos = set([t_info])
-
-        # The _print_test_info() will print the module_info of the test_info's
-        # test_name first. Then, print its related build targets. If the build
-        # target be printed before(e.g. build_target == test_info's test_name),
-        # it will skip it and print the next build_target.
-        # Since the build_targets of test_info are mod_one, mod_two, and
-        # mod_three, it will print mod_one first, then mod_two, and mod_three.
-        #
-        # _print_test_info() calls _print_module_info_from_module_name() to
-        # print the module_info. And _print_module_info_from_module_name()
-        # calls get_module_info() to get the module_info. So we can mock
-        # get_module_info() to achieve that.
-        mock_get_module_info.side_effect = [mod_one, mod_two, mod_three]
-
-        capture_output = StringIO()
-        sys.stdout = capture_output
-        mod_info = module_info.ModuleInfo(module_file='/somewhere/module-info')
-        atest_main._print_test_info(mod_info, test_infos)
-        # Assign sys.stdout back to default.
-        sys.stdout = sys.__stdout__
-        correct_output = ('\x1b[1;32mmod1\x1b[0m\n'
-                          '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
-                          '\t\tsuite_mod1\n'
-                          '\x1b[1;36m\tSource code path\x1b[0m\n'
-                          '\t\tpath/mod1\n'
-                          '\x1b[1;36m\tInstalled path\x1b[0m\n'
-                          '\t\tinstalled/mod1\n'
-                          '\x1b[1;35m\tRelated build targets\x1b[0m\n'
-                          '\t\tmod1, mod2, mod3\n'
-                          '\x1b[1;32mmod2\x1b[0m\n'
-                          '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
-                          '\t\tsuite_mod2\n'
-                          '\x1b[1;36m\tSource code path\x1b[0m\n'
-                          '\t\tpath/mod2\n'
-                          '\x1b[1;36m\tInstalled path\x1b[0m\n'
-                          '\t\tinstalled/mod2\n'
-                          '\x1b[1;32mmod3\x1b[0m\n'
-                          '\x1b[1;36m\tCompatibility suite\x1b[0m\n'
-                          '\t\tsuite_mod3\n'
-                          '\x1b[1;36m\tSource code path\x1b[0m\n'
-                          '\t\tpath/mod3\n'
-                          '\x1b[1;36m\tInstalled path\x1b[0m\n'
-                          '\t\tinstalled/mod3\n'
-                          '\x1b[1;37m\x1b[0m\n')
-        self.assertEqual(capture_output.getvalue(), correct_output)
-
     @mock.patch.object(atest_utils, 'get_adb_devices')
     @mock.patch.object(metrics_utils, 'send_exit_event')
     def test_validate_exec_mode(self, _send_exit, _devs):
@@ -301,5 +178,173 @@
         self.assertTrue(date_time)
 
 
+# pylint: disable=missing-function-docstring
+class AtestUnittestFixture(fake_filesystem_unittest.TestCase):
+    """Fixture for ModuleInfo tests."""
+
+    def setUp(self):
+        self.setUpPyfakefs()
+
+    # pylint: disable=protected-access
+    def create_empty_module_info(self):
+        fake_temp_file_name = next(tempfile._get_candidate_names())
+        self.fs.create_file(fake_temp_file_name, contents='{}')
+        return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+    def create_module_info(self, modules=None):
+        mod_info = self.create_empty_module_info()
+        modules = modules or []
+
+        for m in modules:
+            mod_info.name_to_module_info[m['module_name']] = m
+
+        return mod_info
+
+    def create_test_info(
+            self,
+            test_name='hello_world_test',
+            test_runner='AtestTradefedRunner',
+            build_targets=None):
+        """Create a test_info.TestInfo object."""
+        if not build_targets:
+            build_targets = set()
+        return test_info.TestInfo(test_name, test_runner, build_targets)
+
+
+class PrintModuleInfoTest(AtestUnittestFixture):
+    """Test conditions for _print_module_info."""
+
+    def tearDown(self):
+        sys.stdout = sys.__stdout__
+
+    @mock.patch('atest.atest_utils._has_colors', return_value=True)
+    def test_print_module_info_from_module_name(self, _):
+        """Test _print_module_info_from_module_name method."""
+        mod_info = self.create_module_info(
+            [module(
+                name='mod1',
+                path=['src/path/mod1'],
+                installed=['installed/path/mod1'],
+                compatibility_suites=['device_test_mod1', 'native_test_mod1']
+            )]
+        )
+        correct_output = (f'{GREEN}mod1{END}\n'
+                          f'{CYAN}\tCompatibility suite{END}\n'
+                          '\t\tdevice_test_mod1\n'
+                          '\t\tnative_test_mod1\n'
+                          f'{CYAN}\tSource code path{END}\n'
+                          '\t\t[\'src/path/mod1\']\n'
+                          f'{CYAN}\tInstalled path{END}\n'
+                          '\t\tinstalled/path/mod1\n')
+        capture_output = StringIO()
+        sys.stdout = capture_output
+
+        atest_main._print_module_info_from_module_name(mod_info, 'mod1')
+
+        # Check the function correctly printed module_info in color to stdout
+        self.assertEqual(correct_output, capture_output.getvalue())
+
+    @mock.patch('atest.atest_utils._has_colors', return_value=True)
+    def test_print_test_info(self, _):
+        """Test _print_test_info method."""
+        modules = []
+        for index in {1, 2, 3}:
+            modules.append(
+                module(
+                    name=f'mod{index}',
+                    path=[f'path/mod{index}'],
+                    installed=[f'installed/mod{index}'],
+                    compatibility_suites=[f'suite_mod{index}']
+                )
+            )
+        mod_info = self.create_module_info(modules)
+        test_infos = {
+            self.create_test_info(
+                test_name='mod1',
+                test_runner='mock_runner',
+                build_targets={'mod1', 'mod2', 'mod3'},
+            ),
+        }
+        correct_output = (f'{GREEN}mod1{END}\n'
+                          f'{CYAN}\tCompatibility suite{END}\n'
+                          '\t\tsuite_mod1\n'
+                          f'{CYAN}\tSource code path{END}\n'
+                          '\t\t[\'path/mod1\']\n'
+                          f'{CYAN}\tInstalled path{END}\n'
+                          '\t\tinstalled/mod1\n'
+                          f'{MAGENTA}\tRelated build targets{END}\n'
+                          '\t\tmod1, mod2, mod3\n'
+                          f'{GREEN}mod2{END}\n'
+                          f'{CYAN}\tCompatibility suite{END}\n'
+                          '\t\tsuite_mod2\n'
+                          f'{CYAN}\tSource code path{END}\n'
+                          '\t\t[\'path/mod2\']\n'
+                          f'{CYAN}\tInstalled path{END}\n'
+                          '\t\tinstalled/mod2\n'
+                          f'{GREEN}mod3{END}\n'
+                          f'{CYAN}\tCompatibility suite{END}\n'
+                          '\t\tsuite_mod3\n'
+                          f'{CYAN}\tSource code path{END}\n'
+                          '\t\t[\'path/mod3\']\n'
+                          f'{CYAN}\tInstalled path{END}\n'
+                          '\t\tinstalled/mod3\n'
+                          f'\x1b[1;37m{END}\n')
+        capture_output = StringIO()
+        sys.stdout = capture_output
+
+        # The _print_test_info() will print the module_info of the test_info's
+        # test_name first. Then, print its related build targets. If the build
+        # target be printed before(e.g. build_target == test_info's test_name),
+        # it will skip it and print the next build_target.
+        # Since the build_targets of test_info are mod_one, mod_two, and
+        # mod_three, it will print mod_one first, then mod_two, and mod_three.
+        #
+        # _print_test_info() calls _print_module_info_from_module_name() to
+        # print the module_info. And _print_module_info_from_module_name()
+        # calls get_module_info() to get the module_info. So we can mock
+        # get_module_info() to achieve that.
+        atest_main._print_test_info(mod_info, test_infos)
+
+        self.assertEqual(correct_output, capture_output.getvalue())
+
+
+# pylint: disable=too-many-arguments
+def module(
+    name=None,
+    path=None,
+    installed=None,
+    classes=None,
+    auto_test_config=None,
+    test_config=None,
+    shared_libs=None,
+    dependencies=None,
+    runtime_dependencies=None,
+    data=None,
+    data_dependencies=None,
+    compatibility_suites=None,
+    host_dependencies=None,
+    srcs=None,
+):
+    name = name or 'libhello'
+
+    m = {}
+
+    m['module_name'] = name
+    m['class'] = classes
+    m['path'] = [path or '']
+    m['installed'] = installed or []
+    m['is_unit_test'] = 'false'
+    m['auto_test_config'] = auto_test_config or []
+    m['test_config'] = test_config or []
+    m['shared_libs'] = shared_libs or []
+    m['runtime_dependencies'] = runtime_dependencies or []
+    m['dependencies'] = dependencies or []
+    m['data'] = data or []
+    m['data_dependencies'] = data_dependencies or []
+    m['compatibility_suites'] = compatibility_suites or []
+    m['host_dependencies'] = host_dependencies or []
+    m['srcs'] = srcs or []
+    return m
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/atest/atest_run_unittests.py b/atest/atest_run_unittests.py
index 919b9f8..45a46c0 100755
--- a/atest/atest_run_unittests.py
+++ b/atest/atest_run_unittests.py
@@ -22,11 +22,21 @@
 import unittest
 
 from importlib import import_module
-
+from unittest import mock
 
 # Setup logging to be silent so unittests can pass through TF.
 logging.disable(logging.ERROR)
 
+ENV = {
+    'ANDROID_BUILD_TOP': '/',
+    'ANDROID_PRODUCT_OUT': '/out/prod',
+    'ANDROID_TARGET_OUT_TESTCASES': '/out/prod/tcases',
+    'ANDROID_HOST_OUT': '/out/host',
+    'ANDROID_HOST_OUT_TESTCASES': '/out/host/tcases',
+    'TARGET_PRODUCT': 'aosp_cf_x86_64',
+    'TARGET_BUILD_VARIANT': 'userdebug',
+}
+
 def get_test_modules():
     """Returns a list of testable modules.
 
@@ -73,7 +83,8 @@
 
 if __name__ == '__main__':
     print(sys.version_info)
-    result = run_test_modules(get_test_modules())
-    if not result.wasSuccessful():
-        sys.exit(not result.wasSuccessful())
-    sys.exit(0)
+    with mock.patch.dict('os.environ', ENV):
+        result = run_test_modules(get_test_modules())
+        if not result.wasSuccessful():
+            sys.exit(not result.wasSuccessful())
+        sys.exit(0)
diff --git a/atest/atest_tradefed.sh b/atest/atest_tradefed.sh
index a3d23e3..ba61a99 100755
--- a/atest/atest_tradefed.sh
+++ b/atest/atest_tradefed.sh
@@ -72,7 +72,7 @@
 fi
 
 # Note: must leave $RDBG_FLAG and $TRADEFED_OPTS unquoted so that they go away when unset
-START_FEATURE_SERVER=1 ${TF_JAVA} $RDBG_FLAG \
+LOCAL_MODE=1 START_FEATURE_SERVER=1 ${TF_JAVA} $RDBG_FLAG \
     -XX:+HeapDumpOnOutOfMemoryError \
     -XX:-OmitStackTraceInFastThrow \
     $TRADEFED_OPTS \
diff --git a/atest/atest_utils.py b/atest/atest_utils.py
index 99231b1..914d8f5 100644
--- a/atest/atest_utils.py
+++ b/atest/atest_utils.py
@@ -22,6 +22,7 @@
 
 from __future__ import print_function
 
+import enum
 import datetime
 import fnmatch
 import hashlib
@@ -41,9 +42,10 @@
 import urllib
 import zipfile
 
+from dataclasses import dataclass
 from multiprocessing import Process
 from pathlib import Path
-from typing import Any, Dict
+from typing import Any, Dict, List, Set
 
 import xml.etree.ElementTree as ET
 
@@ -111,6 +113,31 @@
 _BUILD_ENV = {}
 
 
+@dataclass
+class BuildEnvProfiler:
+    """Represents the condition before and after trigging build."""
+    ninja_file: Path
+    ninja_file_mtime: float
+    variable_file: Path
+    variable_file_md5: str
+    clean_out: bool
+    build_files_integrity: bool
+
+
+@enum.unique
+class BuildOutputMode(enum.Enum):
+    "Represents the different ways to display build output."
+    STREAMED = 'streamed'
+    LOGGED = 'logged'
+
+    def __init__(self, arg_name: str):
+        self._description = arg_name
+
+    # pylint: disable=missing-function-docstring
+    def description(self):
+        return self._description
+
+
 def get_build_cmd(dump=False):
     """Compose build command with no-absolute path and flag "--make-mode".
 
@@ -224,7 +251,7 @@
         raise subprocess.CalledProcessError(proc.returncode, cmd, output)
 
 
-def get_build_out_dir():
+def get_build_out_dir() -> str:
     """Get android build out directory.
 
     The order of the rules are:
@@ -269,13 +296,12 @@
     global _BUILD_ENV
     _BUILD_ENV.update(env)
 
-def build(build_targets, verbose=False):
+
+def build(build_targets: Set[str]):
     """Shell out and invoke run_build_cmd to make build_targets.
 
     Args:
         build_targets: A set of strings of build targets to make.
-        verbose: Optional arg. If True output is streamed to the console.
-                 If False, only the last line of the build output is outputted.
 
     Returns:
         Boolean of whether build command was successful, True if nothing to
@@ -288,34 +314,43 @@
     # pylint: disable=global-statement
     global _BUILD_ENV
     full_env_vars = os.environ.copy()
-    full_env_vars.update(_BUILD_ENV)
+    update_build_env(full_env_vars)
     print('\n%s\n%s' % (
         colorize("Building Dependencies...", constants.CYAN),
                  ', '.join(build_targets)))
     logging.debug('Building Dependencies: %s', ' '.join(build_targets))
     cmd = get_build_cmd() + list(build_targets)
-    return _run_build_cmd(cmd, verbose, full_env_vars)
+    return _run_build_cmd(cmd, _BUILD_ENV)
 
-def _run_build_cmd(cmd, verbose=False, env_vars=None):
+
+def _run_build_cmd(cmd: List[str], env_vars: Dict[str, str]):
     """The main process of building targets.
 
     Args:
         cmd: A list of soong command.
-        verbose: Optional arg. If True output is streamed to the console.
-                 If False, only the last line of the build output is outputted.
-        env_vars: Optional arg. Dict of env vars to set during build.
-
+        env_vars: Dict of environment variables used for build.
     Returns:
         Boolean of whether build command was successful, True if nothing to
         build.
     """
     logging.debug('Executing command: %s', cmd)
+    build_profiler = _build_env_profiling()
     try:
-        if verbose:
+        if env_vars.get('BUILD_OUTPUT_MODE') == BuildOutputMode.STREAMED.value:
+            print()
             subprocess.check_call(cmd, stderr=subprocess.STDOUT, env=env_vars)
         else:
-            # TODO: Save output to a log file.
+            # Note that piping stdout forces Soong to switch to 'dumb terminal
+            # mode' which only prints completed actions. This gives users the
+            # impression that actions are taking longer than they really are.
+            # See b/233044822 for more details.
+            log_path = Path(get_build_out_dir()).joinpath('verbose.log.gz')
+            print('\n(Build log may not reflect actual status in simple output'
+                  'mode; check {} for detail after build finishes.)'.format(
+                    colorize(f'{log_path}', constants.CYAN)
+                  ), end='')
             _run_limited_output(cmd, env_vars=env_vars)
+        _send_build_condition_metrics(build_profiler, cmd)
         logging.info('Build successful')
         return True
     except subprocess.CalledProcessError as err:
@@ -519,6 +554,9 @@
     """
     _cmd = ' '.join(cmd_list).split()
     for cmd in _cmd:
+        if cmd.startswith('--skip-all-system-status-check'):
+            _cmd.remove(cmd)
+            continue
         if cmd.startswith('--atest-log-file-path'):
             _cmd.remove(cmd)
             continue
@@ -1087,7 +1125,11 @@
         try:
             xml_root = ET.parse(xml).getroot()
         except (IOError, OSError, ET.ParseError):
-            logging.warning('%s could not be read.', xml)
+            # TODO(b/274989179) Change back to warning once warning if not going
+            # to be treat as test failure. Or test_get_manifest_branch unit test
+            # could be fix if return None if portal_xml or default_xml not
+            # exist.
+            logging.info('%s could not be read.', xml)
             return ''
         default_tags = xml_root.findall('./default')
         if default_tags:
@@ -1102,7 +1144,11 @@
         try:
             xml_root = ET.parse(xml).getroot()
         except (IOError, OSError, ET.ParseError):
-            logging.warning('%s could not be read.', xml)
+            # TODO(b/274989179) Change back to warning once warning if not going
+            # to be treat as test failure. Or test_get_manifest_branch unit test
+            # could be fix if return None if portal_xml or default_xml not
+            # exist.
+            logging.info('%s could not be read.', xml)
             return Path()
         include_tags = xml_root.findall('./include')
         if include_tags:
@@ -1143,8 +1189,7 @@
     module_info_path = Path(product_out).joinpath('module-info.json')
     if module_info_path.is_file():
         os.remove(module_info_path)
-    if not build([module_info_target],
-                  verbose=logging.getLogger().isEnabledFor(logging.DEBUG)):
+    if not build([module_info_target]):
         sys.exit(ExitCode.BUILD_FAILURE)
     build_duration = time.time() - build_start
     metrics.LocalDetectEvent(
@@ -1928,6 +1973,97 @@
     # 2. Ensure the consistency of all build files.
     return check_md5(constants.BUILDFILES_MD5, missing_ok=False)
 
+
+def _build_env_profiling() -> BuildEnvProfiler:
+    """Determine the status profile before build.
+
+    The BuildEnvProfiler object can help use determine whether a build is:
+        1. clean build. (empty out/ dir)
+        2. Build files Integrity (Android.bp/Android.mk changes).
+        3. Environment variables consistency.
+        4. New Ninja file generated. (mtime of soong/build.ninja)
+
+    Returns:
+        the BuildProfile object.
+    """
+    out_dir = Path(get_build_out_dir())
+    ninja_file = out_dir.joinpath('soong/build.ninja')
+    mtime = ninja_file.stat().st_mtime if ninja_file.is_file() else 0
+    variables_file = out_dir.joinpath('soong/soong.environment.used.build')
+
+    return BuildEnvProfiler(
+        ninja_file=ninja_file,
+        ninja_file_mtime=mtime,
+        variable_file=variables_file,
+        variable_file_md5=md5sum(variables_file),
+        clean_out=not ninja_file.exists(),
+        build_files_integrity=build_files_integrity_is_ok()
+    )
+
+
+def _send_build_condition_metrics(
+        build_profile: BuildEnvProfiler, cmd: List[str]):
+    """Send build conditions by comparing build env profilers."""
+
+    # when build module-info.json only, 'module-info.json' will be
+    # the last element.
+    m_mod_info_only = 'module-info.json' in cmd.pop()
+
+    def ninja_file_is_changed(env_profiler: BuildEnvProfiler) -> bool:
+        """Determine whether the ninja file had been renewal."""
+        if not env_profiler.ninja_file.is_file():
+            return True
+        return (env_profiler.ninja_file.stat().st_mtime !=
+                env_profiler.ninja_file_mtime)
+
+    def env_var_is_changed(env_profiler: BuildEnvProfiler) -> bool:
+        """Determine whether soong-related variables had changed."""
+        return (md5sum(env_profiler.variable_file) !=
+                env_profiler.variable_file_md5)
+
+    def send_data(detect_type):
+        """A simple wrapper of metrics.LocalDetectEvent."""
+        metrics.LocalDetectEvent(detect_type=detect_type, result=1)
+
+    # Determine the correct detect type before profiling.
+    # (build module-info.json or build dependencies.)
+    clean_out = (DetectType.MODULE_INFO_CLEAN_OUT
+                 if m_mod_info_only else DetectType.BUILD_CLEAN_OUT)
+    ninja_generation = (DetectType.MODULE_INFO_GEN_NINJA
+                        if m_mod_info_only else DetectType.BUILD_GEN_NINJA)
+    bpmk_change = (DetectType.MODULE_INFO_BPMK_CHANGE
+                   if m_mod_info_only else DetectType.BUILD_BPMK_CHANGE)
+    env_change = (DetectType.MODULE_INFO_ENV_CHANGE
+                  if m_mod_info_only else DetectType.BUILD_ENV_CHANGE)
+    src_change = (DetectType.MODULE_INFO_SRC_CHANGE
+                  if m_mod_info_only else DetectType.BUILD_SRC_CHANGE)
+    other = (DetectType.MODULE_INFO_OTHER
+             if m_mod_info_only else DetectType.BUILD_OTHER)
+    incremental =(DetectType.MODULE_INFO_INCREMENTAL
+                  if m_mod_info_only else DetectType.BUILD_INCREMENTAL)
+
+    if build_profile.clean_out:
+        send_data(clean_out)
+    else:
+        send_data(incremental)
+
+    if ninja_file_is_changed(build_profile):
+        send_data(ninja_generation)
+
+    other_condition = True
+    if not build_profile.build_files_integrity:
+        send_data(bpmk_change)
+        other_condition = False
+    if env_var_is_changed(build_profile):
+        send_data(env_change)
+        other_condition = False
+    if bool(get_modified_files(os.getcwd())):
+        send_data(src_change)
+        other_condition = False
+    if other_condition:
+        send_data(other)
+
+
 def get_local_auto_shardable_tests():
     """Get the auto shardable test names in shardable file.
 
diff --git a/atest/atest_utils_unittest.py b/atest/atest_utils_unittest.py
index f3346ca..f284948 100755
--- a/atest/atest_utils_unittest.py
+++ b/atest/atest_utils_unittest.py
@@ -343,6 +343,7 @@
         build_top = '/home/a/b/c'
         rel_path = 'd/e'
         mock_cwd.return_value = os.path.join(build_top, rel_path)
+        # TODO: (b/264015241) Stop mocking build variables.
         os_environ_mock = {constants.ANDROID_BUILD_TOP: build_top}
         with mock.patch.dict('os.environ', os_environ_mock, clear=True):
             expected_cmd = ['../../build/soong/soong_ui.bash', '--make-mode']
diff --git a/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java b/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
index 59ef2e9..5537aeb 100644
--- a/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
+++ b/atest/bazel/reporter/javatests/com/android/tradefed/result/BazelExitCodeResultReporterTest.java
@@ -54,7 +54,7 @@
         reporter.invocationStarted(DEFAULT_CONTEXT);
         reporter.invocationEnded(1);
 
-        assertFileContentsEquals("4", exitCodeFile);
+        assertFileContentsEquals("0", exitCodeFile);
     }
 
     @Test
diff --git a/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java b/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
index 840818e..cbf9621 100644
--- a/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
+++ b/atest/bazel/reporter/src/com/android/tradefed/result/BazelExitCodeResultReporter.java
@@ -49,7 +49,6 @@
 
     private boolean mHasRunFailures;
     private boolean mHasTestFailures;
-    private int mTestCount = 0;
 
     @VisibleForTesting
     BazelExitCodeResultReporter(FileSystem fs) {
@@ -61,21 +60,6 @@
     }
 
     @Override
-    public void testRunStarted(String name, int numTests) {
-        testRunStarted(name, numTests, 0);
-    }
-
-    @Override
-    public void testRunStarted(String name, int numTests, int attemptNumber) {
-        testRunStarted(name, numTests, attemptNumber, System.currentTimeMillis());
-    }
-
-    @Override
-    public void testRunStarted(String name, int numTests, int attemptNumber, long startTime) {
-        mTestCount += numTests;
-    }
-
-    @Override
     public void testRunFailed(String errorMessage) {
         mHasRunFailures = true;
     }
@@ -127,18 +111,12 @@
             return ExitCode.TESTS_FAILED;
         }
 
-        // Return NO_TESTS_FOUND only when there are no run failures.
-        if (mTestCount == 0) {
-            return ExitCode.NO_TESTS_FOUND;
-        }
-
         return ExitCode.SUCCESS;
     }
 
     private enum ExitCode {
         SUCCESS(0),
         TESTS_FAILED(3),
-        NO_TESTS_FOUND(4),
         RUN_FAILURE(6);
 
         private final int value;
diff --git a/atest/bazel/resources/WORKSPACE b/atest/bazel/resources/WORKSPACE
index 996c058..55e72ed 100644
--- a/atest/bazel/resources/WORKSPACE
+++ b/atest/bazel/resources/WORKSPACE
@@ -1,3 +1,12 @@
 register_toolchains(
-  "//prebuilts/build-tools:py_toolchain"
+    "//prebuilts/build-tools:py_toolchain",
+    "//prebuilts/jdk/jdk17:runtime_toolchain_definition",
+)
+
+# `device_infra` repository provides rules needed to start cuttlefish devices
+# remotely. This repository is loaded when Bazel needs a target from it,
+# otherwise won't load.
+local_repository(
+    name = "device_infra",
+    path = "vendor/google/tools/atest/device_infra",
 )
diff --git a/atest/bazel/resources/bazelrc b/atest/bazel/resources/bazelrc
index 061771f..b052d94 100644
--- a/atest/bazel/resources/bazelrc
+++ b/atest/bazel/resources/bazelrc
@@ -9,21 +9,15 @@
 # rate.
 build --incompatible_strict_action_env
 
+# Use the JDK defined by local_java_runtime in //prebuilts/jdk/jdk<VERSION>
+build --java_runtime_version=jdk17
+
 # Depending on how many machines are in the remote execution instance, setting
 # this higher can make builds faster by allowing more jobs to run in parallel.
 # Setting it too high can result in jobs that timeout, however, while waiting
 # for a remote machine to execute them.
 build:remote --jobs=200
 
-# Set the Java used in remote environment.
-build:remote --action_env=REMOTE_JAVA_HOME=/usr/lib/jvm/11.29.3-ca-jdk11.0.2/reduced
-
-# Set a host platform specifying the Docker container image used by the RBE
-# instance.
-# See https://docs.bazel.build/versions/master/platforms.html for more about
-# platforms.
-build:remote --host_platform=//bazel/configs/rbe/config:platform
-
 # Enable the remote cache so that action results can be shared across machines,
 # developers, and workspaces.
 build:remote --remote_cache=grpcs://remotebuildexecution.googleapis.com
diff --git a/atest/bazel/resources/device_def/BUILD.bazel b/atest/bazel/resources/device_def/BUILD.bazel
new file mode 100644
index 0000000..3926fc5
--- /dev/null
+++ b/atest/bazel/resources/device_def/BUILD.bazel
@@ -0,0 +1,21 @@
+load("//bazel/rules:soong_prebuilt.bzl", "soong_prebuilt")
+load("//bazel/rules/device:cuttlefish_device.bzl", "cuttlefish_device")
+load("@device_infra//remote_device:download_cvd_artifact.bzl", "build_id", "download_cvd_artifact")
+
+package(default_visibility = ["//visibility:public"])
+
+build_id(
+    name = "cvd_build_id",
+    build_setting_default = "",
+)
+
+download_cvd_artifact(
+    name = "cvd_artifacts",
+    build_id = ":cvd_build_id",
+)
+
+cuttlefish_device(
+    name = "cf_x86_64_phone",
+    out = "android_cuttlefish.sh",
+    build_files = ":cvd_artifacts",
+)
diff --git a/atest/bazel/resources/format_as_soong_module_name.cquery b/atest/bazel/resources/format_as_soong_module_name.cquery
index 401d31c..7d784b5 100644
--- a/atest/bazel/resources/format_as_soong_module_name.cquery
+++ b/atest/bazel/resources/format_as_soong_module_name.cquery
@@ -6,5 +6,5 @@
     soong_prebuilt_info = p.get(
         "//bazel/rules:soong_prebuilt.bzl%SoongPrebuiltInfo")
     if soong_prebuilt_info:
-        return soong_prebuilt_info.module_name
+        return "%s:%s" % (soong_prebuilt_info.module_name, soong_prebuilt_info.platform_flavor)
     return ""
diff --git a/atest/bazel/resources/rules/BUILD.bazel b/atest/bazel/resources/rules/BUILD.bazel
index 9d8900f..00cbb2b 100644
--- a/atest/bazel/resources/rules/BUILD.bazel
+++ b/atest/bazel/resources/rules/BUILD.bazel
@@ -1,5 +1,6 @@
 load("//bazel/rules:common_settings.bzl", "string_flag")
 load("//bazel/rules:common_settings.bzl", "string_list_flag")
+load("//bazel/rules/device:single_local_device.bzl", "local_device")
 
 package(default_visibility = ["//visibility:public"])
 
@@ -8,6 +9,16 @@
     build_setting_default = "",
 )
 
+local_device(
+    name = "local_device",
+    out = "single_local_device.sh",
+)
+
+label_flag(
+    name = "target_device",
+    build_setting_default = ":local_device",
+)
+
 string_list_flag(
     name = "extra_tradefed_result_reporters",
     build_setting_default = [],
@@ -23,4 +34,7 @@
     flag_values = {":platform_flavor": "host"},
 )
 
-exports_files(["tradefed_test.sh.template"])
+exports_files([
+    "tradefed_test.sh.template",
+    "device_test.sh.template",
+])
diff --git a/atest/bazel/resources/rules/device-provider/single_local_device.sh b/atest/bazel/resources/rules/device-provider/single_local_device.sh
deleted file mode 100644
index e645e37..0000000
--- a/atest/bazel/resources/rules/device-provider/single_local_device.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-TEST_EXECUTABLE=$1
-$TEST_EXECUTABLE
\ No newline at end of file
diff --git a/atest/bazel/resources/rules/device/BUILD.bazel b/atest/bazel/resources/rules/device/BUILD.bazel
new file mode 100644
index 0000000..3c25c29
--- /dev/null
+++ b/atest/bazel/resources/rules/device/BUILD.bazel
@@ -0,0 +1,6 @@
+package(default_visibility = ["//visibility:public"])
+
+exports_files([
+    "create_cuttlefish.sh.template",
+    "single_local_device.sh",
+])
diff --git a/atest/bazel/resources/rules/device/create_cuttlefish.sh.template b/atest/bazel/resources/rules/device/create_cuttlefish.sh.template
new file mode 100644
index 0000000..c90b3ae
--- /dev/null
+++ b/atest/bazel/resources/rules/device/create_cuttlefish.sh.template
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+DEVICE_IMAGE_PATH="{img_path}"
+DEVICE_IMAGE_DIR=$(dirname "$DEVICE_IMAGE_PATH")
+CVD_HOST_PACKAGE_PATH="{cvd_host_package_path}"
+
+PATH_ADDITIONS="{path_additions}"
+TEST_EXECUTABLE="$1"
+shift
+
+LOCAL_TOOL="$(dirname "$CVD_HOST_PACKAGE_PATH")"
+
+user="$(whoami)"
+
+su - << EOF
+export PATH="${LOCAL_TOOL}:${PATH_ADDITIONS}:${PATH}"
+/usr/sbin/service rsyslog restart
+/etc/init.d/cuttlefish-common start
+/usr/sbin/usermod -aG kvm "${USER}"
+
+pushd "${LOCAL_TOOL}"
+tar xvf "${CVD_HOST_PACKAGE_PATH}"
+popd
+
+pushd "${DEVICE_IMAGE_DIR}"
+unzip -o "${DEVICE_IMAGE_PATH}"
+popd
+
+HOME="${LOCAL_TOOL}" "${LOCAL_TOOL}"/bin/launch_cvd \
+  -daemon \
+  -config=phone \
+  -system_image_dir "${DEVICE_IMAGE_DIR}" \
+  -undefok=report_anonymous_usage_stats,config \
+  -report_anonymous_usage_stats=y \
+  -instance_dir=/tmp/cvd \
+  -guest_enforce_security=false
+adb connect localhost:6520
+exit
+EOF
+
+"${TEST_EXECUTABLE}" "$@"
\ No newline at end of file
diff --git a/atest/bazel/resources/rules/device/cuttlefish_device.bzl b/atest/bazel/resources/rules/device/cuttlefish_device.bzl
new file mode 100644
index 0000000..2432e82
--- /dev/null
+++ b/atest/bazel/resources/rules/device/cuttlefish_device.bzl
@@ -0,0 +1,82 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Rule used to generate a Cuttlefish device environment.
+
+This rule creates a device environment rule to run tests on a Cuttlefish Android
+Virtual Device. Test targets that run in this environment will start a new
+dedicated virtual device for each execution.
+
+Device properties such as the image used can be configured via an attribute.
+"""
+
+load("//bazel/rules:platform_transitions.bzl", "host_transition")
+load("//bazel/rules:device_test.bzl", "DeviceEnvironment")
+load("@device_infra//remote_device:download_cvd_artifact.bzl", "ImageProvider")
+load(
+    "//:constants.bzl",
+    "adb_label",
+)
+
+_BAZEL_WORK_DIR = "${TEST_SRCDIR}/${TEST_WORKSPACE}/"
+
+def _cuttlefish_device_impl(ctx):
+    path_additions = [_BAZEL_WORK_DIR + ctx.file._adb.dirname]
+    image_file = ctx.attr.build_files[ImageProvider].image
+    cvd_host_file = ctx.attr.build_files[ImageProvider].cvd_host_package
+    ctx.actions.expand_template(
+        template = ctx.file._create_script_template,
+        output = ctx.outputs.out,
+        is_executable = True,
+        substitutions = {
+            "{img_path}": _BAZEL_WORK_DIR + image_file.short_path,
+            "{cvd_host_package_path}": _BAZEL_WORK_DIR + cvd_host_file.short_path,
+            "{path_additions}": ":".join(path_additions),
+        },
+    )
+
+    return DeviceEnvironment(
+        runner = depset([ctx.outputs.out]),
+        data = ctx.runfiles(files = [
+            cvd_host_file,
+            ctx.outputs.out,
+            image_file,
+        ]),
+    )
+
+cuttlefish_device = rule(
+    attrs = {
+        "build_files": attr.label(
+            providers = [ImageProvider],
+            mandatory = True,
+        ),
+        "out": attr.output(mandatory = True),
+        "_create_script_template": attr.label(
+            default = "//bazel/rules/device:create_cuttlefish.sh.template",
+            allow_single_file = True,
+        ),
+        # This attribute is required to use Starlark transitions. It allows
+        # allowlisting usage of this rule. For more information, see
+        # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
+        "_allowlist_function_transition": attr.label(
+            default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
+        ),
+        "_adb": attr.label(
+            default = adb_label,
+            allow_single_file = True,
+            cfg = host_transition,
+        ),
+    },
+    implementation = _cuttlefish_device_impl,
+)
diff --git a/atest/bazel/resources/rules/device-provider/single_local_device.bzl b/atest/bazel/resources/rules/device/single_local_device.bzl
similarity index 88%
rename from atest/bazel/resources/rules/device-provider/single_local_device.bzl
rename to atest/bazel/resources/rules/device/single_local_device.bzl
index 1002787..552d1e6 100644
--- a/atest/bazel/resources/rules/device-provider/single_local_device.bzl
+++ b/atest/bazel/resources/rules/device/single_local_device.bzl
@@ -15,7 +15,7 @@
 local_device = rule(
     attrs = {
         "_source_script": attr.label(
-            default = "//bazel/rules/device-provider:single_local_device.sh",
+            default = "//bazel/rules/device:single_local_device.sh",
             allow_single_file = True,
         ),
         "out": attr.output(mandatory = True),
diff --git a/atest/bazel/resources/rules/device/single_local_device.sh b/atest/bazel/resources/rules/device/single_local_device.sh
new file mode 100644
index 0000000..c0083c9
--- /dev/null
+++ b/atest/bazel/resources/rules/device/single_local_device.sh
@@ -0,0 +1,3 @@
+TEST_EXECUTABLE="$1"
+shift
+"${TEST_EXECUTABLE}" "$@"
\ No newline at end of file
diff --git a/atest/bazel/resources/rules/soong_prebuilt.bzl b/atest/bazel/resources/rules/soong_prebuilt.bzl
index 749ea4b..d2cd475 100644
--- a/atest/bazel/resources/rules/soong_prebuilt.bzl
+++ b/atest/bazel/resources/rules/soong_prebuilt.bzl
@@ -20,6 +20,7 @@
 """
 
 load("//bazel/rules:platform_transitions.bzl", "device_transition")
+load("//bazel/rules:common_settings.bzl", "BuildSettingInfo")
 
 SoongPrebuiltInfo = provider(
     doc = "Info about a prebuilt Soong build module",
@@ -29,6 +30,7 @@
         # outputs.
         "transitive_runtime_outputs": "Files required in the runtime environment",
         "transitive_test_files": "Files of test modules",
+        "platform_flavor": "The platform flavor that this target will be built on",
     },
 )
 
@@ -89,6 +91,7 @@
     return [
         _make_soong_prebuilt_info(
             ctx.attr.module_name,
+            ctx.attr._platform_flavor[BuildSettingInfo].value,
             files = files,
             runtime_deps = ctx.attr.runtime_deps,
             static_deps = ctx.attr.static_deps,
@@ -118,6 +121,7 @@
             cfg = device_transition,
         ),
         "suites": attr.string_list(),
+        "_platform_flavor": attr.label(default = "//bazel/rules:platform_flavor"),
         # This attribute is required to use Starlark transitions. It allows
         # allowlisting usage of this rule. For more information, see
         # https://docs.bazel.build/versions/master/skylark/config.html#user-defined-transitions
@@ -138,6 +142,7 @@
     return [
         _make_soong_prebuilt_info(
             ctx.attr.module_name,
+            ctx.attr._platform_flavor[BuildSettingInfo].value,
             runtime_deps = ctx.attr.runtime_deps,
         ),
         DefaultInfo(
@@ -149,6 +154,7 @@
     attrs = {
         "module_name": attr.string(),
         "runtime_deps": attr.label_list(),
+        "_platform_flavor": attr.label(default = "//bazel/rules:platform_flavor"),
     },
     implementation = _soong_uninstalled_prebuilt_impl,
     doc = "A rule for targets with no runtime outputs",
@@ -156,6 +162,7 @@
 
 def _make_soong_prebuilt_info(
         module_name,
+        platform_flavor,
         files = [],
         runtime_deps = [],
         static_deps = [],
@@ -196,6 +203,7 @@
     ])
     return SoongPrebuiltInfo(
         module_name = module_name,
+        platform_flavor = platform_flavor,
         transitive_runtime_outputs = depset(files, transitive = transitive_runtime_outputs),
         transitive_test_files = depset(
             # Note that `suites` is never empty for test files. This because
diff --git a/atest/bazel/resources/rules/tradefed_test.bzl b/atest/bazel/resources/rules/tradefed_test.bzl
index cafc8a6..f38d4b3 100644
--- a/atest/bazel/resources/rules/tradefed_test.bzl
+++ b/atest/bazel/resources/rules/tradefed_test.bzl
@@ -20,6 +20,7 @@
 load("//bazel/rules:common_settings.bzl", "BuildSettingInfo")
 load(
     "//:constants.bzl",
+    "aapt2_label",
     "aapt_label",
     "adb_label",
     "atest_script_help_sh_label",
@@ -31,10 +32,19 @@
     "tradefed_test_framework_label",
     "vts_core_tradefed_harness_label",
 )
+load("//bazel/rules:device_test.bzl", "device_test")
+
+TradefedTestInfo = provider(
+    doc = "Info about a Tradefed test module",
+    fields = {
+        "module_name": "Name of the original Tradefed test module",
+    },
+)
 
 _BAZEL_WORK_DIR = "${TEST_SRCDIR}/${TEST_WORKSPACE}/"
 _PY_TOOLCHAIN = "@bazel_tools//tools/python:toolchain_type"
-_TOOLCHAINS = [_PY_TOOLCHAIN]
+_JAVA_TOOLCHAIN = "@bazel_tools//tools/jdk:runtime_toolchain_type"
+_TOOLCHAINS = [_PY_TOOLCHAIN, _JAVA_TOOLCHAIN]
 
 _TRADEFED_TEST_ATTRIBUTES = {
     "module_name": attr.string(),
@@ -182,6 +192,7 @@
 def _tradefed_device_test_impl(ctx):
     tradefed_deps = []
     tradefed_deps.extend(ctx.attr._aapt)
+    tradefed_deps.extend(ctx.attr._aapt2)
     tradefed_deps.extend(ctx.attr.tradefed_deps)
 
     test_device_deps = []
@@ -199,6 +210,7 @@
         test_host_deps = test_host_deps,
         path_additions = [
             _BAZEL_WORK_DIR + ctx.file._aapt.dirname,
+            _BAZEL_WORK_DIR + ctx.file._aapt2.dirname,
         ],
     )
 
@@ -224,6 +236,12 @@
                 cfg = host_transition,
                 aspects = [soong_prebuilt_tradefed_test_aspect],
             ),
+            "_aapt2": attr.label(
+                default = aapt2_label,
+                allow_single_file = True,
+                cfg = host_transition,
+                aspects = [soong_prebuilt_tradefed_test_aspect],
+            ),
         },
     ),
     test = True,
@@ -232,12 +250,23 @@
     doc = "A rule used to run device tests using Tradefed",
 )
 
-def tradefed_device_driven_test(test, tradefed_deps = [], suites = [], **attrs):
+def tradefed_device_driven_test(
+        name,
+        test,
+        tradefed_deps = [],
+        suites = [],
+        **attrs):
+    tradefed_test_name = "tradefed_test_%s" % name
     _tradefed_device_test(
+        name = tradefed_test_name,
         device_test = test,
         tradefed_deps = _get_tradefed_deps(suites, tradefed_deps),
         **attrs
     )
+    device_test(
+        name = name,
+        test = tradefed_test_name,
+    )
 
 def tradefed_host_driven_device_test(test, tradefed_deps = [], suites = [], **attrs):
     _tradefed_device_test(
@@ -283,8 +312,9 @@
     )
 
     py_paths, py_runfiles = _configure_python_toolchain(ctx)
-    path_additions = path_additions + py_paths
-    tradefed_runfiles = tradefed_runfiles.merge(py_runfiles)
+    java_paths, java_runfiles, java_home = _configure_java_toolchain(ctx)
+    path_additions = path_additions + java_paths + py_paths
+    tradefed_runfiles = tradefed_runfiles.merge_all([py_runfiles, java_runfiles])
 
     tradefed_test_dir = "%s_tradefed_test_dir" % ctx.label.name
     tradefed_test_files = []
@@ -314,17 +344,23 @@
             "{path_additions}": ":".join(path_additions),
             "{additional_tradefed_options}": " ".join(tradefed_options),
             "{result_reporters_config_file}": _abspath(result_reporters_config_file),
+            "{java_home}": java_home,
         },
     )
 
-    return [DefaultInfo(
-        executable = script,
-        runfiles = tradefed_runfiles.merge_all([
-            test_host_runfiles,
-            test_device_runfiles,
-            ctx.runfiles(tradefed_test_files),
-        ] + [ctx.runfiles(d.files.to_list()) for d in data]),
-    )]
+    return [
+        DefaultInfo(
+            executable = script,
+            runfiles = tradefed_runfiles.merge_all([
+                test_host_runfiles,
+                test_device_runfiles,
+                ctx.runfiles(tradefed_test_files),
+            ] + [ctx.runfiles(d.files.to_list()) for d in data]),
+        ),
+        TradefedTestInfo(
+            module_name = ctx.attr.module_name,
+        ),
+    ]
 
 def _get_tradefed_deps(suites, tradefed_deps = []):
     suite_to_deps = {
@@ -379,6 +415,12 @@
 
     ctx.actions.write(config_file, "\n".join(config_lines))
 
+def _configure_java_toolchain(ctx):
+    java_runtime = ctx.toolchains[_JAVA_TOOLCHAIN].java_runtime
+    java_home_path = _BAZEL_WORK_DIR + java_runtime.java_home
+    java_runfiles = ctx.runfiles(transitive_files = java_runtime.files)
+    return ([java_home_path + "/bin"], java_runfiles, java_home_path)
+
 def _configure_python_toolchain(ctx):
     py_toolchain_info = ctx.toolchains[_PY_TOOLCHAIN]
     py2_interpreter = py_toolchain_info.py2_runtime.interpreter
diff --git a/atest/bazel/resources/rules/tradefed_test.sh.template b/atest/bazel/resources/rules/tradefed_test.sh.template
index 397c280..8b15c93 100644
--- a/atest/bazel/resources/rules/tradefed_test.sh.template
+++ b/atest/bazel/resources/rules/tradefed_test.sh.template
@@ -10,6 +10,7 @@
 PATH_ADDITIONS="{path_additions}"
 TRADEFED_CLASSPATH="{tradefed_classpath}"
 RESULT_REPORTERS_CONFIG_FILE="{result_reporters_config_file}"
+ATEST_JAVA_HOME="{atest_java_home}"
 read -a ADDITIONAL_TRADEFED_OPTIONS <<< "{additional_tradefed_options}"
 
 # Export variables expected by the Atest launcher script.
@@ -17,13 +18,7 @@
 export TF_PATH="${TRADEFED_CLASSPATH}"
 export PATH="${PATH_ADDITIONS}:${PATH}"
 export ATEST_HELPER="${ATEST_HELPER}"
-
-# Prepend the REMOTE_JAVA_HOME environment variable to the path to ensure
-# that all Java invocations throughout the test execution flow use the same
-# version.
-if [ ! -z "${REMOTE_JAVA_HOME}" ]; then
-  export PATH="${REMOTE_JAVA_HOME}/bin:${PATH}"
-fi
+export JAVA_HOME="${ATEST_JAVA_HOME}"
 
 exit_code_file="$(mktemp /tmp/tf-exec-XXXXXXXXXX)"
 
@@ -42,7 +37,9 @@
     --bazel-exit-code-result-reporter:file=${exit_code_file} \
     --bazel-xml-result-reporter:file=${XML_OUTPUT_FILE} \
     --proto-output-file="${TEST_UNDECLARED_OUTPUTS_DIR}/proto-results" \
+    --use-delimited-api=true \
     --log-file-path="${TEST_UNDECLARED_OUTPUTS_DIR}" \
+    --compress-files=false \
     "$@"
 
 # Use the TF exit code if it terminates abnormally.
diff --git a/atest/bazel/runner/Android.bp b/atest/bazel/runner/Android.bp
index fdf5eb5..cc940da 100644
--- a/atest/bazel/runner/Android.bp
+++ b/atest/bazel/runner/Android.bp
@@ -38,10 +38,12 @@
         ],
         canonical_path_from_root: false,
     },
+    // shade guava to avoid conflicts with guava embedded in Error Prone.
+    jarjar_rules: "jarjar-rules.txt",
 }
 
 java_genrule_host {
-    name: "bazel-test-suite",
+    name: "empty-bazel-test-suite",
     cmd: "BAZEL_SUITE_DIR=$(genDir)/android-bazel-suite && " +
         "mkdir \"$${BAZEL_SUITE_DIR}\" && " +
         "mkdir \"$${BAZEL_SUITE_DIR}\"/tools && " +
@@ -50,8 +52,8 @@
         "cp $(location :compatibility-host-util) \"$${BAZEL_SUITE_DIR}\"/tools && " +
         "cp $(location :compatibility-tradefed) \"$${BAZEL_SUITE_DIR}\"/tools && " +
         "cp $(location :bazel-test-runner) \"$${BAZEL_SUITE_DIR}\"/testcases && " +
-        "$(location soong_zip) -o $(out) -d -C $(genDir) -D \"$${BAZEL_SUITE_DIR}\"",
-    out: ["bazel-test-suite.zip"],
+        "$(location soong_zip) -o $(out) -d -C $(genDir) -D \"$${BAZEL_SUITE_DIR}\" -sha256",
+    out: ["empty-bazel-test-suite.zip"],
     srcs: [
         ":tradefed",
         ":bazel-test-runner",
@@ -62,7 +64,7 @@
         "soong_zip",
     ],
     dist: {
-        targets: ["bazel-test-suite"],
+        targets: ["empty-bazel-test-suite"],
     },
 }
 
diff --git a/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery b/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery
new file mode 100644
index 0000000..1cc8ec0
--- /dev/null
+++ b/atest/bazel/runner/config/config/format_module_name_to_test_target.cquery
@@ -0,0 +1,14 @@
+def format(target):
+    """Return a pair of 'module_name target_label' for the given tradefed test target, '' otherwise."""
+    p = providers(target)
+    if not p:
+        return ""
+    tradefed_test_info = p.get(
+        "//bazel/rules:tradefed_test.bzl%TradefedTestInfo")
+    if tradefed_test_info:
+    # Use space as a delimiter as Bazel labels can use many spacial characters in their target
+    # labels. See: https://bazel.build/concepts/labels#target-names
+        return "%s %s" % (tradefed_test_info.module_name, target.label)
+    else:
+        return ""
+    return ""
diff --git a/atest/bazel/runner/jarjar-rules.txt b/atest/bazel/runner/jarjar-rules.txt
new file mode 100644
index 0000000..de5ffab
--- /dev/null
+++ b/atest/bazel/runner/jarjar-rules.txt
@@ -0,0 +1 @@
+rule com.google.protobuf.** com.android.tradefed.internal.protobuf.@1
\ No newline at end of file
diff --git a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java
index 99738ed..1df0abc 100644
--- a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java
+++ b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BazelTest.java
@@ -20,6 +20,8 @@
 import com.android.tradefed.config.OptionClass;
 import com.android.tradefed.device.DeviceNotAvailableException;
 import com.android.tradefed.invoker.TestInformation;
+import com.android.tradefed.invoker.tracing.CloseableTraceScope;
+import com.android.tradefed.invoker.tracing.TracePropagatingExecutorService;
 import com.android.tradefed.log.ITestLogger;
 import com.android.tradefed.log.LogUtil.CLog;
 import com.android.tradefed.result.FailureDescription;
@@ -35,47 +37,58 @@
 import com.android.tradefed.result.proto.TestRecordProto.TestRecord;
 import com.android.tradefed.testtype.IRemoteTest;
 import com.android.tradefed.util.ZipUtil;
-import com.android.tradefed.util.ZipUtil2;
 import com.android.tradefed.util.proto.TestRecordProtoUtil;
 
-import com.google.common.base.Throwables;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.SetMultimap;
 import com.google.common.io.CharStreams;
 import com.google.common.io.MoreFiles;
+import com.google.common.io.Resources;
 import com.google.devtools.build.lib.buildeventstream.BuildEventStreamProtos;
 import com.google.protobuf.Any;
 import com.google.protobuf.InvalidProtocolBufferException;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStreamReader;
+import java.io.FileOutputStream;
 import java.lang.ProcessBuilder.Redirect;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
 import java.time.Duration;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 import java.util.Map.Entry;
-import java.util.concurrent.Future;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 import java.util.zip.ZipFile;
 
 /** Test runner for executing Bazel tests. */
 @OptionClass(alias = "bazel-test")
 public final class BazelTest implements IRemoteTest {
 
-    public static final String QUERY_TARGETS = "query_targets";
+    public static final String QUERY_ALL_TARGETS = "query_all_targets";
+    public static final String QUERY_MAP_MODULES_TO_TARGETS = "query_map_modules_to_targets";
     public static final String RUN_TESTS = "run_tests";
 
+    // Add method excludes to TF's global filters since Bazel doesn't support target-specific
+    // arguments. See https://github.com/bazelbuild/rules_go/issues/2784.
+    // TODO(b/274787592): Integrate with Bazel's test filtering to filter specific test cases.
+    public static final String GLOBAL_EXCLUDE_FILTER_TEMPLATE =
+            "--test_arg=--global-filters:exclude-filter=%s";
+
     private static final Duration BAZEL_QUERY_TIMEOUT = Duration.ofMinutes(5);
     private static final String TEST_NAME = BazelTest.class.getName();
     // Bazel internally calls the test output archive file "test.outputs__outputs.zip", the double
@@ -85,21 +98,29 @@
 
     private final List<Path> mTemporaryPaths = new ArrayList<>();
     private final List<Path> mLogFiles = new ArrayList<>();
+    private final Properties mProperties;
     private final ProcessStarter mProcessStarter;
     private final Path mTemporaryDirectory;
     private final ExecutorService mExecutor;
 
     private Path mRunTemporaryDirectory;
 
+    private enum FilterType {
+        MODULE,
+        TEST_CASE
+    };
+
     @Option(
             name = "bazel-test-command-timeout",
             description = "Timeout for running the Bazel test.")
     private Duration mBazelCommandTimeout = Duration.ofHours(1L);
 
     @Option(
-            name = "bazel-workspace-archive",
-            description = "Location of the Bazel workspace archive.")
-    private File mBazelWorkspaceArchive;
+            name = "bazel-test-suite-root-dir",
+            description =
+                    "Name of the environment variable set by CtsTestLauncher indicating the"
+                            + " location of the root bazel-test-suite dir.")
+    private String mSuiteRootDirEnvVar = "BAZEL_SUITE_ROOT";
 
     @Option(
             name = "bazel-startup-options",
@@ -107,36 +128,36 @@
     private final List<String> mBazelStartupOptions = new ArrayList<>();
 
     @Option(
-            name = "bazel-test-target-patterns",
-            description =
-                    "Target labels for test targets to run, default is to query workspace archive"
-                            + " for all tests and run those.")
-    private final List<String> mTestTargetPatterns = new ArrayList<>();
-
-    @Option(
             name = "bazel-test-extra-args",
             description = "List of extra arguments to be passed to Bazel")
     private final List<String> mBazelTestExtraArgs = new ArrayList<>();
 
     @Option(
-            name = "extra-tradefed-jars",
-            description = "List of jars to add to Tradefed's classpath.")
-    private final List<File> mExtraTradefedJars = new ArrayList<>();
-
-    @Option(
             name = "bazel-max-idle-timout",
             description = "Max idle timeout in seconds for bazel commands.")
     private Duration mBazelMaxIdleTimeout = Duration.ofSeconds(5L);
 
+    @Option(name = "exclude-filter", description = "Test modules to exclude when running tests.")
+    private final List<String> mExcludeTargets = new ArrayList<>();
+
+    @Option(name = "include-filter", description = "Test modules to include when running tests.")
+    private final List<String> mIncludeTargets = new ArrayList<>();
+
+    @Option(
+            name = "report-cached-test-results",
+            description = "Whether or not to report cached test results.")
+    private boolean mReportCachedTestResults = true;
+
     public BazelTest() {
-        this(new DefaultProcessStarter(), Paths.get(System.getProperty("java.io.tmpdir")));
+        this(new DefaultProcessStarter(), System.getProperties());
     }
 
     @VisibleForTesting
-    BazelTest(ProcessStarter processStarter, Path tmpDir) {
+    BazelTest(ProcessStarter processStarter, Properties properties) {
         mProcessStarter = processStarter;
-        mTemporaryDirectory = tmpDir;
-        mExecutor = Executors.newFixedThreadPool(1);
+        mExecutor = TracePropagatingExecutorService.create(Executors.newCachedThreadPool());
+        mProperties = properties;
+        mTemporaryDirectory = Paths.get(properties.getProperty("java.io.tmpdir"));
     }
 
     @Override
@@ -175,9 +196,9 @@
             List<FailureDescription> runFailures)
             throws IOException, InterruptedException {
 
-        Path workspaceDirectory = extractWorkspace();
+        Path workspaceDirectory = resolveWorkspacePath();
 
-        List<String> testTargets = listTestTargets(workspaceDirectory);
+        Collection<String> testTargets = listTestTargets(workspaceDirectory);
         if (testTargets.isEmpty()) {
             throw new AbortRunException(
                     "No targets found, aborting",
@@ -189,33 +210,15 @@
 
         Process bazelTestProcess =
                 startTests(testInfo, listener, testTargets, workspaceDirectory, bepFile);
-        Future<?> testResult;
-        try (BepFileTailer tailer = BepFileTailer.create(bepFile)) {
-            testResult =
-                    mExecutor.submit(
-                            () -> {
-                                try {
-                                    waitForProcess(
-                                            bazelTestProcess, RUN_TESTS, mBazelCommandTimeout);
-                                } catch (InterruptedException e) {
-                                    Thread.currentThread().interrupt();
-                                    throw new AbortRunException(
-                                            "Bazel Test process interrupted",
-                                            FailureStatus.TEST_FAILURE,
-                                            TestErrorIdentifier.TEST_ABORTED);
-                                } finally {
-                                    tailer.stop();
-                                }
-                            });
 
+        try (BepFileTailer tailer = BepFileTailer.create(bepFile)) {
+            bazelTestProcess.onExit().thenRun(() -> tailer.stop());
             reportTestResults(listener, testInfo, runFailures, tailer);
         }
 
-        try {
-            testResult.get();
-        } catch (ExecutionException e) {
-            Throwables.throwIfUnchecked(e.getCause());
-        }
+        // Note that if Bazel exits without writing the 'last' BEP message marker we won't get to
+        // here since the above reporting code throws.
+        waitForProcess(bazelTestProcess, RUN_TESTS);
     }
 
     void reportTestResults(
@@ -225,9 +228,20 @@
             BepFileTailer tailer)
             throws InterruptedException, IOException {
 
+        try (CloseableTraceScope ignored = new CloseableTraceScope("reportTestResults")) {
+            reportTestResultsNoTrace(listener, testInfo, runFailures, tailer);
+        }
+    }
+
+    void reportTestResultsNoTrace(
+            ITestInvocationListener listener,
+            TestInformation testInfo,
+            List<FailureDescription> runFailures,
+            BepFileTailer tailer)
+            throws InterruptedException, IOException {
+
         ProtoResultParser resultParser =
                 new ProtoResultParser(listener, testInfo.getContext(), false, "tf-test-process-");
-        resultParser.setQuiet(false);
 
         BuildEventStreamProtos.BuildEvent event;
         while ((event = tailer.nextEvent()) != null) {
@@ -235,6 +249,14 @@
                 return;
             }
 
+            if (!event.hasTestResult()) {
+                continue;
+            }
+
+            if (!mReportCachedTestResults && isTestResultCached(event.getTestResult())) {
+                continue;
+            }
+
             try {
                 reportEventsInTestOutputsArchive(event.getTestResult(), resultParser);
             } catch (IOException | InterruptedException | URISyntaxException e) {
@@ -250,48 +272,24 @@
                 TestErrorIdentifier.OUTPUT_PARSER_ERROR);
     }
 
-    private Path extractWorkspace() throws IOException {
-        Path outputDirectory = createTemporaryDirectory("atest-bazel-workspace");
-        try {
-            ZipUtil2.extractZip(mBazelWorkspaceArchive, outputDirectory.toFile());
-        } catch (IOException e) {
-            AbortRunException extractException =
-                    new AbortRunException(
-                            String.format("Archive extraction failed: %s", e.getMessage()),
-                            FailureStatus.DEPENDENCY_ISSUE,
-                            TestErrorIdentifier.TEST_ABORTED);
-            extractException.initCause(e);
-            throw extractException;
-        }
-
-        // TODO(b/233885171): Remove resolve once workspace archive is updated.
-        Path workspaceDirectory = outputDirectory.resolve("out/atest_bazel_workspace");
-
-        // TODO(b/230764993): Switch to using this flag once implemented.
-        if (!mExtraTradefedJars.isEmpty()) {
-            copyExtraTradefedJars(workspaceDirectory);
-        }
-
-        return workspaceDirectory;
+    private static boolean isTestResultCached(BuildEventStreamProtos.TestResult result) {
+        return result.getCachedLocally() || result.getExecutionInfo().getCachedRemotely();
     }
 
     private ProcessBuilder createBazelCommand(Path workspaceDirectory, String tmpDirPrefix)
             throws IOException {
 
-        Path javaTmpDir = createTemporaryDirectory(String.format("%s-java-tmp-out", tmpDirPrefix));
-        Path bazelTmpDir =
-                createTemporaryDirectory(String.format("%s-bazel-tmp-out", tmpDirPrefix));
+        Path javaTmpDir = createTemporaryDirectory("%s-java-tmp-out".formatted(tmpDirPrefix));
+        Path bazelTmpDir = createTemporaryDirectory("%s-bazel-tmp-out".formatted(tmpDirPrefix));
 
         List<String> command = new ArrayList<>();
 
         command.add(workspaceDirectory.resolve("bazel.sh").toAbsolutePath().toString());
         command.add(
-                String.format(
-                        "--host_jvm_args=-Djava.io.tmpdir=%s",
-                        javaTmpDir.toAbsolutePath().toString()));
-        command.add(
-                String.format("--output_user_root=%s", bazelTmpDir.toAbsolutePath().toString()));
-        command.add(String.format("--max_idle_secs=%d", mBazelMaxIdleTimeout.toSeconds()));
+                "--host_jvm_args=-Djava.io.tmpdir=%s"
+                        .formatted(javaTmpDir.toAbsolutePath().toString()));
+        command.add("--output_user_root=%s".formatted(bazelTmpDir.toAbsolutePath().toString()));
+        command.add("--max_idle_secs=%d".formatted(mBazelMaxIdleTimeout.toSeconds()));
 
         ProcessBuilder builder = new ProcessBuilder(command);
 
@@ -300,35 +298,154 @@
         return builder;
     }
 
-    private List<String> listTestTargets(Path workspaceDirectory)
+    private Collection<String> listTestTargets(Path workspaceDirectory)
             throws IOException, InterruptedException {
 
-        if (!mTestTargetPatterns.isEmpty()) {
-            return mTestTargetPatterns;
+        try (CloseableTraceScope ignored = new CloseableTraceScope("listTestTargets")) {
+            return listTestTargetsNoTrace(workspaceDirectory);
+        }
+    }
+
+    private Collection<String> listTestTargetsNoTrace(Path workspaceDirectory)
+            throws IOException, InterruptedException {
+
+        // We need to query all tests targets first in a separate Bazel query call since 'cquery
+        // tests(...)' doesn't work in the Atest Bazel workspace.
+        List<String> allTestTargets = queryAllTestTargets(workspaceDirectory);
+        CLog.i("Found %d test targets in workspace", allTestTargets.size());
+
+        Map<String, String> moduleToTarget =
+                queryModulesToTestTargets(workspaceDirectory, allTestTargets);
+
+        Set<String> moduleExcludes = groupTargetsByType(mExcludeTargets).get(FilterType.MODULE);
+        Set<String> moduleIncludes = groupTargetsByType(mIncludeTargets).get(FilterType.MODULE);
+
+        if (!moduleIncludes.isEmpty() && !moduleExcludes.isEmpty()) {
+            throw new AbortRunException(
+                    "Invalid options: cannot set both module-level include filters and module-level"
+                            + " exclude filters.",
+                    FailureStatus.DEPENDENCY_ISSUE,
+                    TestErrorIdentifier.TEST_ABORTED);
         }
 
-        Path logFile = createLogFile(String.format("%s-log", QUERY_TARGETS));
+        if (!moduleIncludes.isEmpty()) {
+            return Maps.filterKeys(moduleToTarget, s -> moduleIncludes.contains(s)).values();
+        }
 
-        ProcessBuilder builder = createBazelCommand(workspaceDirectory, QUERY_TARGETS);
+        if (!moduleExcludes.isEmpty()) {
+            return Maps.filterKeys(moduleToTarget, s -> !moduleExcludes.contains(s)).values();
+        }
+
+        return moduleToTarget.values();
+    }
+
+    private List<String> queryAllTestTargets(Path workspaceDirectory)
+            throws IOException, InterruptedException {
+
+        Path logFile = createLogFile("%s-log".formatted(QUERY_ALL_TARGETS));
+
+        ProcessBuilder builder = createBazelCommand(workspaceDirectory, QUERY_ALL_TARGETS);
 
         builder.command().add("query");
         builder.command().add("tests(...)");
         builder.redirectError(Redirect.appendTo(logFile.toFile()));
 
-        Process process = startAndWaitForProcess(QUERY_TARGETS, builder, BAZEL_QUERY_TIMEOUT);
+        Process queryProcess = startProcess(QUERY_ALL_TARGETS, builder, BAZEL_QUERY_TIMEOUT);
+        List<String> queryLines = readProcessLines(queryProcess);
 
-        return CharStreams.readLines(new InputStreamReader(process.getInputStream()));
+        waitForProcess(queryProcess, QUERY_ALL_TARGETS);
+
+        return queryLines;
+    }
+
+    private Map<String, String> queryModulesToTestTargets(
+            Path workspaceDirectory, List<String> allTestTargets)
+            throws IOException, InterruptedException {
+
+        Path cqueryTestTargetsFile = createTemporaryFile("test_targets");
+        Files.write(cqueryTestTargetsFile, String.join("+", allTestTargets).getBytes());
+
+        Path cqueryFormatFile = createTemporaryFile("format_module_name_to_test_target");
+        try (FileOutputStream os = new FileOutputStream(cqueryFormatFile.toFile())) {
+            Resources.copy(
+                    Resources.getResource("config/format_module_name_to_test_target.cquery"), os);
+        }
+
+        Path logFile = createLogFile("%s-log".formatted(QUERY_MAP_MODULES_TO_TARGETS));
+        ProcessBuilder builder =
+                createBazelCommand(workspaceDirectory, QUERY_MAP_MODULES_TO_TARGETS);
+
+        builder.command().add("cquery");
+        builder.command().add("--query_file=%s".formatted(cqueryTestTargetsFile.toAbsolutePath()));
+        builder.command().add("--output=starlark");
+        builder.command().add("--starlark:file=%s".formatted(cqueryFormatFile.toAbsolutePath()));
+        builder.redirectError(Redirect.appendTo(logFile.toFile()));
+
+        Process process = startProcess(QUERY_MAP_MODULES_TO_TARGETS, builder, BAZEL_QUERY_TIMEOUT);
+
+        List<String> queryLines = readProcessLines(process);
+
+        waitForProcess(process, QUERY_MAP_MODULES_TO_TARGETS);
+
+        return parseModulesToTargets(queryLines);
+    }
+
+    private List<String> readProcessLines(Process process) throws IOException {
+        return CharStreams.readLines(process.inputReader());
+    }
+
+    private Map<String, String> parseModulesToTargets(Collection<String> lines) {
+        Map<String, String> moduleToTarget = new HashMap<>();
+        StringBuilder errorMessage = new StringBuilder();
+        for (String line : lines) {
+            // Query output format is: "module_name //bazel/test:target" if a test target is a
+            // TF test, "" otherwise, so only count proper targets.
+            if (line.isEmpty()) {
+                continue;
+            }
+
+            String[] splitLine = line.split(" ");
+
+            if (splitLine.length != 2) {
+                throw new AbortRunException(
+                        String.format(
+                                "Unrecognized output from %s command: %s",
+                                QUERY_MAP_MODULES_TO_TARGETS, line),
+                        FailureStatus.DEPENDENCY_ISSUE,
+                        TestErrorIdentifier.TEST_ABORTED);
+            }
+
+            String moduleName = splitLine[0];
+            String targetName = splitLine[1];
+
+            String duplicateEntry;
+            if ((duplicateEntry = moduleToTarget.get(moduleName)) != null) {
+                errorMessage.append(
+                        "Multiple test targets found for module %s: %s, %s\n"
+                                .formatted(moduleName, duplicateEntry, targetName));
+            }
+
+            moduleToTarget.put(moduleName, targetName);
+        }
+
+        if (errorMessage.length() != 0) {
+            throw new AbortRunException(
+                    errorMessage.toString(),
+                    FailureStatus.DEPENDENCY_ISSUE,
+                    TestErrorIdentifier.TEST_ABORTED);
+        }
+        return ImmutableMap.copyOf(moduleToTarget);
     }
 
     private Process startTests(
             TestInformation testInfo,
             ITestInvocationListener listener,
-            List<String> testTargets,
+            Collection<String> testTargets,
             Path workspaceDirectory,
             Path bepFile)
             throws IOException {
 
-        Path logFile = createLogFile(String.format("%s-log", RUN_TESTS));
+        Path logFile = createLogFile("%s-log".formatted(RUN_TESTS));
 
         ProcessBuilder builder = createBazelCommand(workspaceDirectory, RUN_TESTS);
 
@@ -336,59 +453,116 @@
         builder.command().add("test");
         builder.command().addAll(testTargets);
 
-        builder.command()
-                .add(String.format("--build_event_binary_file=%s", bepFile.toAbsolutePath()));
+        builder.command().add("--build_event_binary_file=%s".formatted(bepFile.toAbsolutePath()));
 
         builder.command().addAll(mBazelTestExtraArgs);
+
+        Set<String> testFilters = groupTargetsByType(mExcludeTargets).get(FilterType.TEST_CASE);
+        for (String test : testFilters) {
+            builder.command().add(GLOBAL_EXCLUDE_FILTER_TEMPLATE.formatted(test));
+        }
         builder.redirectErrorStream(true);
         builder.redirectOutput(Redirect.appendTo(logFile.toFile()));
 
-        return startProcess(RUN_TESTS, builder);
+        return startProcess(RUN_TESTS, builder, mBazelCommandTimeout);
+    }
+
+    private static SetMultimap<FilterType, String> groupTargetsByType(List<String> targets) {
+        Map<FilterType, List<String>> groupedMap =
+                targets.stream()
+                        .collect(
+                                Collectors.groupingBy(
+                                        s ->
+                                                s.contains(" ")
+                                                        ? FilterType.TEST_CASE
+                                                        : FilterType.MODULE));
+
+        SetMultimap<FilterType, String> groupedMultiMap = HashMultimap.create();
+        for (Entry<FilterType, List<String>> entry : groupedMap.entrySet()) {
+            groupedMultiMap.putAll(entry.getKey(), entry.getValue());
+        }
+
+        return groupedMultiMap;
     }
 
     private Process startAndWaitForProcess(
             String processTag, ProcessBuilder builder, Duration processTimeout)
             throws InterruptedException, IOException {
 
-        Process process = startProcess(processTag, builder);
+        Process process = startProcess(processTag, builder, processTimeout);
+        waitForProcess(process, processTag);
+        return process;
+    }
 
-        waitForProcess(process, processTag, processTimeout);
+    private Process startProcess(String processTag, ProcessBuilder builder, Duration timeout)
+            throws IOException {
+
+        CLog.i("Running command for %s: %s", processTag, new ProcessDebugString(builder));
+        String traceTag = "Process:" + processTag;
+        Process process = mProcessStarter.start(processTag, builder);
+
+        // We wait for the process in a separate thread so that we can trace its execution time.
+        // Another alternative could be to start/stop tracing with explicit calls but these would
+        // have to be done on the same thread as required by the tracing facility.
+        mExecutor.submit(
+                () -> {
+                    try (CloseableTraceScope unused = new CloseableTraceScope(traceTag)) {
+                        if (waitForProcessUninterruptibly(process, timeout)) {
+                            return;
+                        }
+
+                        CLog.e("%s command timed out and is being destroyed", processTag);
+                        process.destroy();
+
+                        // Give the process a grace period to properly shut down before forcibly
+                        // terminating it. We _could_ deduct this time from the total timeout but
+                        // it's overkill.
+                        if (!waitForProcessUninterruptibly(process, Duration.ofSeconds(5))) {
+                            CLog.w(
+                                    "%s command did not terminate normally after the grace period"
+                                            + " and is being forcibly destroyed",
+                                    processTag);
+                            process.destroyForcibly();
+                        }
+
+                        // We wait for the process as it may take it some time to terminate and
+                        // otherwise skew the trace results.
+                        waitForProcessUninterruptibly(process);
+                        CLog.i("%s command timed out and was destroyed", processTag);
+                    }
+                });
 
         return process;
     }
 
-    private Process startProcess(String processTag, ProcessBuilder builder) throws IOException {
+    private void waitForProcess(Process process, String processTag) throws InterruptedException {
 
-        CLog.i("Running command for %s: %s", processTag, new ProcessDebugString(builder));
-
-        return mProcessStarter.start(processTag, builder);
-    }
-
-    private void waitForProcess(Process process, String processTag, Duration processTimeout)
-            throws InterruptedException {
-        if (!process.waitFor(processTimeout.toMillis(), TimeUnit.MILLISECONDS)) {
-            process.destroy();
-            throw new AbortRunException(
-                    String.format("%s command timed out", processTag),
-                    FailureStatus.TIMED_OUT,
-                    TestErrorIdentifier.TEST_ABORTED);
+        if (process.waitFor() == 0) {
+            return;
         }
 
-        if (process.exitValue() != 0) {
-            throw new AbortRunException(
-                    String.format(
-                            "%s command failed. Exit code: %d", processTag, process.exitValue()),
-                    FailureStatus.DEPENDENCY_ISSUE,
-                    TestErrorIdentifier.TEST_ABORTED);
-        }
+        throw new AbortRunException(
+                String.format("%s command failed. Exit code: %d", processTag, process.exitValue()),
+                FailureStatus.DEPENDENCY_ISSUE,
+                TestErrorIdentifier.TEST_ABORTED);
     }
 
-
     private void reportEventsInTestOutputsArchive(
             BuildEventStreamProtos.TestResult result, ProtoResultParser resultParser)
             throws IOException, InvalidProtocolBufferException, InterruptedException,
                     URISyntaxException {
 
+        try (CloseableTraceScope ignored =
+                new CloseableTraceScope("reportEventsInTestOutputsArchive")) {
+            reportEventsInTestOutputsArchiveNoTrace(result, resultParser);
+        }
+    }
+
+    private void reportEventsInTestOutputsArchiveNoTrace(
+            BuildEventStreamProtos.TestResult result, ProtoResultParser resultParser)
+            throws IOException, InvalidProtocolBufferException, InterruptedException,
+                    URISyntaxException {
+
         BuildEventStreamProtos.File outputsFile =
                 result.getTestActionOutputList().stream()
                         .filter(file -> file.getName().equals(TEST_UNDECLARED_OUTPUTS_ARCHIVE_NAME))
@@ -407,15 +581,15 @@
             TestRecord record = TestRecordProtoUtil.readFromFile(protoResult);
 
             TestRecord.Builder recordBuilder = record.toBuilder();
-            recursivelyUpdateArtifactsRootPath(recordBuilder, outputFilesDir);
-            moveRootRecordArtifactsToFirstChild(recordBuilder);
+            //recursivelyUpdateArtifactsRootPath(recordBuilder, outputFilesDir);
+            //moveRootRecordArtifactsToFirstChild(recordBuilder);
             resultParser.processFinalizedProto(recordBuilder.build());
         } finally {
             MoreFiles.deleteRecursively(outputFilesDir);
         }
     }
 
-    private void recursivelyUpdateArtifactsRootPath(TestRecord.Builder recordBuilder, Path newRoot)
+    /*private void recursivelyUpdateArtifactsRootPath(TestRecord.Builder recordBuilder, Path newRoot)
             throws InvalidProtocolBufferException {
 
         Map<String, Any> updatedMap = new HashMap<>();
@@ -436,7 +610,7 @@
         for (ChildReference.Builder childBuilder : recordBuilder.getChildrenBuilderList()) {
             recursivelyUpdateArtifactsRootPath(childBuilder.getInlineTestRecordBuilder(), newRoot);
         }
-    }
+    }*/
 
     private Path findRelativeArtifactPath(Path originalPath) {
         // The log files are stored under
@@ -464,7 +638,7 @@
         return relativePath;
     }
 
-    private void moveRootRecordArtifactsToFirstChild(TestRecord.Builder recordBuilder) {
+    /*private void moveRootRecordArtifactsToFirstChild(TestRecord.Builder recordBuilder) {
         if (recordBuilder.getChildrenCount() == 0) {
             return;
         }
@@ -476,7 +650,7 @@
         }
 
         recordBuilder.clearArtifacts();
-    }
+    }*/
 
     private void reportRunFailures(
             List<FailureDescription> runFailures, ITestInvocationListener listener) {
@@ -500,6 +674,19 @@
                         .setErrorIdentifier(reportedFailure.getErrorIdentifier()));
     }
 
+    private Path resolveWorkspacePath() {
+        String suiteRootPath = mProperties.getProperty(mSuiteRootDirEnvVar);
+        if (suiteRootPath == null || suiteRootPath.isEmpty()) {
+            throw new AbortRunException(
+                    "Bazel Test Suite root directory not set, aborting",
+                    FailureStatus.DEPENDENCY_ISSUE,
+                    TestErrorIdentifier.TEST_ABORTED);
+        }
+
+        // TODO(b/233885171): Remove resolve once workspace archive is updated.
+        return Paths.get(suiteRootPath).resolve("android-bazel-suite/out/atest_bazel_workspace");
+    }
+
     private void addTestLogs(ITestLogger logger) {
         for (Path logFile : mLogFiles) {
             try (FileInputStreamSource source = new FileInputStreamSource(logFile.toFile(), true)) {
@@ -516,17 +703,6 @@
         }
     }
 
-    private void copyExtraTradefedJars(Path workspaceDirectory) throws IOException {
-        for (File jar : mExtraTradefedJars) {
-            Files.copy(
-                    jar.toPath(),
-                    workspaceDirectory
-                            .resolve("tools/tradefederation/core/tradefed/host/framework")
-                            .resolve(jar.getName()),
-                    StandardCopyOption.REPLACE_EXISTING);
-        }
-    }
-
     interface ProcessStarter {
         Process start(String processTag, ProcessBuilder builder) throws IOException;
     }
@@ -566,6 +742,45 @@
                 .setFailureStatus(FailureStatus.INFRA_FAILURE);
     }
 
+    private static boolean waitForProcessUninterruptibly(Process process, Duration timeout) {
+        long remainingNanos = timeout.toNanos();
+        long end = System.nanoTime() + remainingNanos;
+        boolean interrupted = false;
+
+        try {
+            while (true) {
+                try {
+                    return process.waitFor(remainingNanos, TimeUnit.NANOSECONDS);
+                } catch (InterruptedException e) {
+                    interrupted = true;
+                    remainingNanos = end - System.nanoTime();
+                }
+            }
+        } finally {
+            if (interrupted) {
+                Thread.currentThread().interrupt();
+            }
+        }
+    }
+
+    private static int waitForProcessUninterruptibly(Process process) {
+        boolean interrupted = false;
+
+        try {
+            while (true) {
+                try {
+                    return process.waitFor();
+                } catch (InterruptedException e) {
+                    interrupted = true;
+                }
+            }
+        } finally {
+            if (interrupted) {
+                Thread.currentThread().interrupt();
+            }
+        }
+    }
+
     private static final class AbortRunException extends RuntimeException {
         private final FailureDescription mFailureDescription;
 
diff --git a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java
index ca87a2c..96c12fc 100644
--- a/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java
+++ b/atest/bazel/runner/src/com/android/tradefed/testtype/bazel/BepFileTailer.java
@@ -42,22 +42,26 @@
 
     public BuildEvent nextEvent() throws InterruptedException, IOException {
         while (true) {
+            boolean stop = mStop;
+
+            // Mark the current position in the input stream.
             mIn.mark(Integer.MAX_VALUE);
 
             try {
                 BuildEvent event = BuildEvent.parseDelimitedFrom(mIn);
 
-                // When event is null we hit EOF, wait for an event to be written and try again.
+                // When event is null and we hit EOF, wait for an event to be written and try again.
                 if (event != null) {
                     return event;
                 }
-                if (mStop) {
+                if (stop) {
                     return null;
                 }
             } catch (InvalidProtocolBufferException e) {
-                if (mStop) {
+                if (stop) {
                     throw e;
                 }
+                // Partial read. Restore the old position in the input stream.
                 mIn.reset();
             }
             Thread.sleep(BEP_PARSE_SLEEP_TIME.toMillis());
diff --git a/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java b/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java
index d175b46..b30da9a 100644
--- a/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java
+++ b/atest/bazel/runner/tests/src/com/android/tradefed/testtype/bazel/BazelTestTest.java
@@ -25,6 +25,7 @@
 import static org.mockito.Mockito.contains;
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -33,29 +34,28 @@
 import com.android.tradefed.invoker.InvocationContext;
 import com.android.tradefed.invoker.TestInformation;
 import com.android.tradefed.log.LogUtil.CLog;
-import com.android.tradefed.result.error.ErrorIdentifier;
-import com.android.tradefed.result.error.TestErrorIdentifier;
 import com.android.tradefed.result.FailureDescription;
 import com.android.tradefed.result.ILogSaverListener;
 import com.android.tradefed.result.LogDataType;
 import com.android.tradefed.result.LogFile;
+import com.android.tradefed.result.TestDescription;
+import com.android.tradefed.result.error.ErrorIdentifier;
+import com.android.tradefed.result.error.TestErrorIdentifier;
 import com.android.tradefed.result.proto.FileProtoResultReporter;
 import com.android.tradefed.result.proto.TestRecordProto.FailureStatus;
-import com.android.tradefed.result.TestDescription;
 import com.android.tradefed.util.ZipUtil;
 
 import com.google.common.base.Splitter;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.io.MoreFiles;
 import com.google.common.util.concurrent.Uninterruptibles;
 import com.google.devtools.build.lib.buildeventstream.BuildEventStreamProtos;
 
 import org.junit.Before;
 import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.JUnit4;
-import org.junit.Test;
 import org.mockito.ArgumentMatcher;
 import org.mockito.InOrder;
 
@@ -72,13 +72,14 @@
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.TimeUnit;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.function.Function;
+import java.util.Properties;
 import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
@@ -88,12 +89,10 @@
     private ILogSaverListener mMockListener;
     private TestInformation mTestInfo;
     private Path mBazelTempPath;
-    private Map<String, String> mEnvironment;
-    private Path mWorkspaceArchive;
 
     private static final String BAZEL_TEST_TARGETS_OPTION = "bazel-test-target-patterns";
-    private static final String BAZEL_WORKSPACE_ARCHIVE_OPTION = "bazel-workspace-archive";
     private static final String BEP_FILE_OPTION_NAME = "--build_event_binary_file";
+    private static final String REPORT_CACHED_TEST_RESULTS_OPTION = "report-cached-test-results";
     private static final long RANDOM_SEED = 1234567890L;
 
     @Rule public final TemporaryFolder tempDir = new TemporaryFolder();
@@ -106,11 +105,6 @@
         mTestInfo = TestInformation.newBuilder().setInvocationContext(context).build();
         mBazelTempPath =
                 Files.createDirectory(tempDir.getRoot().toPath().resolve("bazel_temp_dir"));
-        mEnvironment = ImmutableMap.of("PATH", "/phony/path");
-        Path bazelArchive =
-                Files.createDirectory(tempDir.getRoot().toPath().resolve("atest_bazel_workspace"));
-        mWorkspaceArchive = tempDir.getRoot().toPath().resolve("atest_bazel_workspace.zip");
-        ZipUtil.createZip(bazelArchive.toFile(), mWorkspaceArchive.toFile());
     }
 
     @Test
@@ -139,12 +133,20 @@
         bazelTest.run(mTestInfo, mMockListener);
 
         verify(mMockListener)
-                .testLog(contains(String.format("%s-log", BazelTest.QUERY_TARGETS)), any(), any());
+                .testLog(
+                        contains(String.format("%s-log", BazelTest.QUERY_ALL_TARGETS)),
+                        any(),
+                        any());
+        verify(mMockListener)
+                .testLog(
+                        contains(String.format("%s-log", BazelTest.QUERY_MAP_MODULES_TO_TARGETS)),
+                        any(),
+                        any());
         verify(mMockListener)
                 .testLog(contains(String.format("%s-log", BazelTest.RUN_TESTS)), any(), any());
     }
 
-    @Test
+   /*@Test
     public void runSucceeds_testLogsReportedUnderModule() throws Exception {
         BazelTest bazelTest = newBazelTest();
 
@@ -157,7 +159,7 @@
         inOrder.verify(mMockListener)
                 .testLog(eq("tf-test-process-invocation-log"), eq(LogDataType.XML), any());
         inOrder.verify(mMockListener).testModuleEnded();
-    }
+    }*/
 
     @Test
     public void malformedProtoResults_runFails() throws Exception {
@@ -228,10 +230,14 @@
 
     @Test
     public void targetsNotSet_testsAllTargets() throws Exception {
-        String targetName = "customTestTarget";
         List<String> command = new ArrayList<>();
         FakeProcessStarter processStarter = newFakeProcessStarter();
-        processStarter.put(BazelTest.QUERY_TARGETS, newPassingProcessWithStdout(targetName));
+        processStarter.put(
+                BazelTest.QUERY_ALL_TARGETS,
+                newPassingProcessWithStdout("//bazel/target:default_target_host"));
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout("default_target //bazel/target:default_target_host"));
         processStarter.put(
                 BazelTest.RUN_TESTS,
                 builder -> {
@@ -242,16 +248,36 @@
 
         bazelTest.run(mTestInfo, mMockListener);
 
-        assertThat(command).contains(targetName);
+        assertThat(command).contains("//bazel/target:default_target_host");
     }
 
     @Test
-    public void archiveExtractionFails_runAborted() throws Exception {
-        BazelTest bazelTest = new BazelTest(newFakeProcessStarter(), mBazelTempPath);
-        OptionSetter setter = new OptionSetter(bazelTest);
-        setter.setOptionValue(
-                BAZEL_WORKSPACE_ARCHIVE_OPTION,
-                new File("non_existent_workspace.zip").getAbsolutePath());
+    public void archiveRootPathNotSet_runAborted() throws Exception {
+        Properties properties = bazelTestProperties();
+        properties.remove("BAZEL_SUITE_ROOT");
+        BazelTest bazelTest = newBazelTestWithProperties(properties);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
+    }
+
+    @Test
+    public void archiveRootPathEmptyString_runAborted() throws Exception {
+        Properties properties = bazelTestProperties();
+        properties.put("BAZEL_SUITE_ROOT", "");
+        BazelTest bazelTest = newBazelTestWithProperties(properties);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
+    }
+
+    @Test
+    public void bazelQueryAllTargetsFails_runAborted() throws Exception {
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(BazelTest.QUERY_ALL_TARGETS, newFailingProcess());
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
 
         bazelTest.run(mTestInfo, mMockListener);
 
@@ -259,9 +285,9 @@
     }
 
     @Test
-    public void bazelQueryFails_runAborted() throws Exception {
+    public void bazelQueryMapModuleToTargetsFails_runAborted() throws Exception {
         FakeProcessStarter processStarter = newFakeProcessStarter();
-        processStarter.put(BazelTest.QUERY_TARGETS, newFailingProcess());
+        processStarter.put(BazelTest.QUERY_MAP_MODULES_TO_TARGETS, newFailingProcess());
         BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
 
         bazelTest.run(mTestInfo, mMockListener);
@@ -286,12 +312,72 @@
 
         bazelTest.run(mTestInfo, mMockListener);
 
-        verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.TIMED_OUT));
+        verify(mMockListener).testRunFailed(hasFailureStatus(FailureStatus.DEPENDENCY_ISSUE));
     }
 
     @Test
-    public void customTargetOption_testsCustomTargets() throws Exception {
-        String targetName = "//my/custom:test";
+    public void includeTestModule_runsOnlyThatModule() throws Exception {
+        String moduleInclude = "custom_module";
+        List<String> command = new ArrayList<>();
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(
+                BazelTest.QUERY_ALL_TARGETS,
+                newPassingProcessWithStdout(
+                        "//bazel/target:default_target_host\n//bazel/target:custom_module_host"));
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout(
+                        "default_target //bazel/target:default_target_host\n"
+                                + "custom_module //bazel/target:custom_module_host"));
+        processStarter.put(
+                BazelTest.RUN_TESTS,
+                builder -> {
+                    command.addAll(builder.command());
+                    return new FakeBazelTestProcess(builder, mBazelTempPath);
+                });
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+        OptionSetter setter = new OptionSetter(bazelTest);
+        setter.setOptionValue("include-filter", moduleInclude);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        assertThat(command).contains("//bazel/target:custom_module_host");
+        assertThat(command).doesNotContain("//bazel/target:default_target_host");
+    }
+
+    @Test
+    public void excludeTestModule_doesNotRunTestModule() throws Exception {
+        String moduleExclude = "custom_module";
+        List<String> command = new ArrayList<>();
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(
+                BazelTest.QUERY_ALL_TARGETS,
+                newPassingProcessWithStdout(
+                        "//bazel/target:default_target_host\n//bazel/target:custom_module_host"));
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout(
+                        "default_target //bazel/target:default_target_host\n"
+                                + "custom_module //bazel/target:custom_module_host"));
+        processStarter.put(
+                BazelTest.RUN_TESTS,
+                builder -> {
+                    command.addAll(builder.command());
+                    return new FakeBazelTestProcess(builder, mBazelTempPath);
+                });
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+        OptionSetter setter = new OptionSetter(bazelTest);
+        setter.setOptionValue("exclude-filter", moduleExclude);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        assertThat(command).doesNotContain("//bazel/target:custom_module_host");
+        assertThat(command).contains("//bazel/target:default_target_host");
+    }
+
+    @Test
+    public void excludeTestFunction_generatesExcludeFilter() throws Exception {
+        String functionExclude = "custom_module custom_module.customClass#customFunction";
         List<String> command = new ArrayList<>();
         FakeProcessStarter processStarter = newFakeProcessStarter();
         processStarter.put(
@@ -302,17 +388,62 @@
                 });
         BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
         OptionSetter setter = new OptionSetter(bazelTest);
-        setter.setOptionValue(BAZEL_TEST_TARGETS_OPTION, targetName);
+        setter.setOptionValue("exclude-filter", functionExclude);
 
         bazelTest.run(mTestInfo, mMockListener);
 
-        assertThat(command).contains(targetName);
+        assertThat(command)
+                .contains(
+                        "--test_arg=--global-filters:exclude-filter=custom_module"
+                                + " custom_module.customClass#customFunction");
     }
 
     @Test
-    public void queryStdoutEmpty_abortsRun() throws Exception {
+    public void excludeAndIncludeFiltersSet_testRunAborted() throws Exception {
+        String moduleExclude = "custom_module";
+        BazelTest bazelTest = newBazelTest();
+        OptionSetter setter = new OptionSetter(bazelTest);
+        setter.setOptionValue("exclude-filter", moduleExclude);
+        setter.setOptionValue("include-filter", moduleExclude);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+    }
+
+    @Test
+    public void queryMapModulesToTargetsEmpty_abortsRun() throws Exception {
         FakeProcessStarter processStarter = newFakeProcessStarter();
-        processStarter.put(BazelTest.QUERY_TARGETS, newPassingProcessWithStdout(""));
+        processStarter.put(BazelTest.QUERY_MAP_MODULES_TO_TARGETS, newPassingProcessWithStdout(""));
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+    }
+
+    @Test
+    public void multipleTargetsMappedToSingleModule_abortsRun() throws Exception {
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout(
+                        "default_target //bazel/target:default_target_1\n"
+                                + "default_target //bazel/target:default_target_2"));
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener).testRunFailed(hasErrorIdentifier(TestErrorIdentifier.TEST_ABORTED));
+    }
+
+    @Test
+    public void queryMapModulesToTargetsBadOutput_abortsRun() throws Exception {
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout(
+                        "default_target //bazel/target:default_target incorrect_field"));
         BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
 
         bazelTest.run(mTestInfo, mMockListener);
@@ -368,6 +499,30 @@
         verify(mMockListener, times(testCount)).testStarted(any(), anyLong());
     }
 
+    @Test
+    public void reportCachedTestResultsDisabled_cachedTestResultNotReported() throws Exception {
+        FakeProcessStarter processStarter = newFakeProcessStarter();
+        processStarter.put(
+                BazelTest.RUN_TESTS,
+                builder -> {
+                    return new FakeBazelTestProcess(builder, mBazelTempPath) {
+                        @Override
+                        public void writeSingleTestResultEvent(File outputsZipFile, Path bepFile)
+                                throws IOException {
+
+                            writeSingleTestResultEvent(outputsZipFile, bepFile, /* cached */ true);
+                        }
+                    };
+                });
+        BazelTest bazelTest = newBazelTestWithProcessStarter(processStarter);
+        OptionSetter setter = new OptionSetter(bazelTest);
+        setter.setOptionValue(REPORT_CACHED_TEST_RESULTS_OPTION, "false");
+
+        bazelTest.run(mTestInfo, mMockListener);
+
+        verify(mMockListener, never()).testStarted(any(), anyLong());
+    }
+
     private static byte[] logFileContents() {
         // Seed Random to always get the same sequence of values.
         Random rand = new Random(RANDOM_SEED);
@@ -408,20 +563,28 @@
         };
     }
 
+    private BazelTest newBazelTestWithProperties(Properties properties) throws Exception {
+        return new BazelTest(newFakeProcessStarter(), properties);
+    }
+
     private BazelTest newBazelTestWithProcessStarter(BazelTest.ProcessStarter starter)
             throws Exception {
 
-        BazelTest bazelTest = new BazelTest(starter, mBazelTempPath);
-        OptionSetter setter = new OptionSetter(bazelTest);
-        setter.setOptionValue(
-                BAZEL_WORKSPACE_ARCHIVE_OPTION, mWorkspaceArchive.toAbsolutePath().toString());
-        return bazelTest;
+        return new BazelTest(starter, bazelTestProperties());
     }
 
     private BazelTest newBazelTest() throws Exception {
         return newBazelTestWithProcessStarter(newFakeProcessStarter());
     }
 
+    private Properties bazelTestProperties() {
+        Properties properties = new Properties();
+        properties.put("BAZEL_SUITE_ROOT", "/phony/path/to/bazel/test/suite");
+        properties.put("java.io.tmpdir", mBazelTempPath.toAbsolutePath().toString());
+
+        return properties;
+    }
+
     private static FailureDescription hasErrorIdentifier(ErrorIdentifier error) {
         return argThat(
                 new ArgumentMatcher<FailureDescription>() {
@@ -429,6 +592,11 @@
                     public boolean matches(FailureDescription right) {
                         return right.getErrorIdentifier().equals(error);
                     }
+
+                    @Override
+                    public String toString() {
+                        return "hasErrorIdentifier(" + error.toString() + ")";
+                    }
                 });
     }
 
@@ -439,12 +607,21 @@
                     public boolean matches(FailureDescription right) {
                         return right.getFailureStatus().equals(status);
                     }
+
+                    @Override
+                    public String toString() {
+                        return "hasFailureStatus(" + status.toString() + ")";
+                    }
                 });
     }
 
     private FakeProcessStarter newFakeProcessStarter() throws IOException {
+        String targetName = "//bazel/target:default_target_host";
         FakeProcessStarter processStarter = new FakeProcessStarter();
-        processStarter.put(BazelTest.QUERY_TARGETS, newPassingProcessWithStdout("default_target"));
+        processStarter.put(BazelTest.QUERY_ALL_TARGETS, newPassingProcessWithStdout(targetName));
+        processStarter.put(
+                BazelTest.QUERY_MAP_MODULES_TO_TARGETS,
+                newPassingProcessWithStdout("default_target " + targetName));
         processStarter.put(
                 BazelTest.RUN_TESTS,
                 builder -> {
@@ -485,14 +662,16 @@
 
     private abstract static class FakeProcess extends Process {
 
+        private volatile boolean destroyed;
+
         @Override
         public void destroy() {
-            return;
+            destroyed = true;
         }
 
         @Override
         public int exitValue() {
-            return 0;
+            return destroyed ? 42 : 0;
         }
 
         @Override
@@ -512,7 +691,7 @@
 
         @Override
         public int waitFor() {
-            return 0;
+            return exitValue();
         }
 
         public void start() throws IOException {
@@ -614,6 +793,11 @@
         }
 
         void writeSingleTestResultEvent(File outputsZipFile, Path bepFile) throws IOException {
+            writeSingleTestResultEvent(outputsZipFile, bepFile, false);
+        }
+
+        void writeSingleTestResultEvent(File outputsZipFile, Path bepFile, boolean cached)
+                throws IOException {
             try (FileOutputStream bepOutputStream = new FileOutputStream(bepFile.toFile(), true)) {
                 BuildEventStreamProtos.BuildEvent.newBuilder()
                         .setId(
@@ -629,6 +813,11 @@
                                                         .setName("test.outputs__outputs.zip")
                                                         .setUri(outputsZipFile.getAbsolutePath())
                                                         .build())
+                                        .setExecutionInfo(
+                                                BuildEventStreamProtos.TestResult.ExecutionInfo
+                                                        .newBuilder()
+                                                        .setCachedRemotely(cached)
+                                                        .build())
                                         .build())
                         .build()
                         .writeDelimitedTo(bepOutputStream);
diff --git a/atest/bazel/scripts/gen_workspace_archive.sh b/atest/bazel/scripts/gen_workspace_archive.sh
index e334302..915c85c 100755
--- a/atest/bazel/scripts/gen_workspace_archive.sh
+++ b/atest/bazel/scripts/gen_workspace_archive.sh
@@ -16,6 +16,12 @@
 
 # A script to generate an Atest Bazel workspace for execution on the Android CI.
 
+# Exit immediately on failures and disallow undefined variables.
+set -euo pipefail
+# List commands as they are executed. This helps debug the error
+# if the script exits mid-way through.
+set -x
+
 function check_env_var()
 {
   if [ ! -n "${!1}" ] ; then
@@ -36,9 +42,13 @@
 
 out=$(get_build_var PRODUCT_OUT)
 
+# ANDROID_BUILD_TOP is deprecated, so don't use it throughout the script.
+# But if someone sets it, we'll respect it.
+cd ${ANDROID_BUILD_TOP:-.}
+
 # Use the versioned Python binaries in prebuilts/ for a reproducible
 # build with minimal reliance on host tools.
-export PATH=${ANDROID_BUILD_TOP}/prebuilts/build-tools/path/linux-x86:${PATH}
+export PATH=`pwd`/prebuilts/build-tools/path/linux-x86:${PATH}
 
 export \
   ANDROID_PRODUCT_OUT=${out} \
@@ -46,39 +56,41 @@
   ANDROID_HOST_OUT=$(get_build_var HOST_OUT) \
   ANDROID_TARGET_OUT_TESTCASES=$(get_build_var TARGET_OUT_TESTCASES)
 
-if [ ! -n "$OUT_DIR" ] ; then
+if [ ! -n "${OUT_DIR:-}" ] ; then
   OUT_DIR=$(get_build_var "OUT_DIR")
 fi
 
-if [ ! -n "$DIST_DIR" ] ; then
+if [ ! -n "${DIST_DIR:-}" ] ; then
   echo "dist dir not defined, defaulting to OUT_DIR/dist."
   export DIST_DIR=${OUT_DIR}/dist
 fi
 
-# Build Atest from source to pick up the latest changes.
-${ANDROID_BUILD_TOP}/build/soong/soong_ui.bash --make-mode atest
+# Build:
+#  - Atest from source to pick up the latest changes
+#  - Bazel test suite needed by BazelTest
+#  - EXTRA_TARGETS requested on the commandline (used by git_master.gcl)
+targets="atest dist empty-bazel-test-suite ${EXTRA_TARGETS:-}"
+build/soong/soong_ui.bash --make-mode $targets
 
-# Build the Bazel test suite needed by BazelTest
-${ANDROID_BUILD_TOP}/build/soong/soong_ui.bash --make-mode dist bazel-test-suite
+# TODO(b/277656887): Fix the underlying atest issue that causes the workspace to not be
+# regenerated.
+rm -rf ${OUT_DIR}/atest_bazel_workspace
 
 # Generate the initial workspace via Atest Bazel mode.
-pushd ${ANDROID_BUILD_TOP}
 ${OUT_DIR}/host/linux-x86/bin/atest-dev \
   --bazel-mode \
   --host-unit-test-only \
   --host \
   -c \
   -b # Builds dependencies without running tests.
-popd
 
-pushd ${OUT_DIR}/atest_bazel_workspace
 
 # TODO(b/201242197): Create a stub workspace for the remote_coverage_tools
 # package so that Bazel does not attempt to fetch resources online which is not
 # allowed on build bots.
-mkdir remote_coverage_tools
-touch remote_coverage_tools/WORKSPACE
-cat << EOF > remote_coverage_tools/BUILD
+mkdir -p ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools
+touch ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools/WORKSPACE
+cat << EOF > ${OUT_DIR}/atest_bazel_workspace/remote_coverage_tools/BUILD
 package(default_visibility = ["//visibility:public"])
 
 filegroup(
@@ -87,11 +99,10 @@
 )
 EOF
 
-popd
-
-# Create the workspace archive which will be downloaded by the Tradefed hosts.
-${ANDROID_BUILD_TOP}/prebuilts/build-tools/linux-x86/bin/soong_zip \
+# Create the workspace archive.
+prebuilts/build-tools/linux-x86/bin/soong_zip \
   -o ${DIST_DIR}/atest_bazel_workspace.zip \
+  -P android-bazel-suite/ \
   -D out/atest_bazel_workspace/ \
   -f "out/atest_bazel_workspace/**/.*" \
   -symlinks=false  `# Follow symlinks and store the referenced files.` \
@@ -99,3 +110,14 @@
   `# Avoid failing for dangling symlinks since these are expected` \
   `# because we don't build all targets.` \
   -ignore_missing_files
+
+# Merge the workspace into bazel-test-suite.
+prebuilts/build-tools/linux-x86/bin/merge_zips \
+  ${DIST_DIR}/bazel-test-suite.zip \
+  ${DIST_DIR}/empty-bazel-test-suite.zip \
+  ${DIST_DIR}/atest_bazel_workspace.zip
+
+# Remove the old archives we no longer need
+rm -f \
+  ${DIST_DIR}/atest_bazel_workspace.zip \
+  ${DIST_DIR}/empty-bazel-test-suite.zip
diff --git a/atest/bazel_mode.py b/atest/bazel_mode.py
index 30f5872..58e183f 100644
--- a/atest/bazel_mode.py
+++ b/atest/bazel_mode.py
@@ -36,6 +36,7 @@
 import shlex
 import shutil
 import subprocess
+import tempfile
 import time
 import warnings
 
@@ -47,12 +48,15 @@
 from typing import Any, Callable, Dict, IO, List, Set
 from xml.etree import ElementTree as ET
 
+from google.protobuf.message import DecodeError
+
 from atest import atest_utils
 from atest import constants
 from atest import module_info
 
 from atest.atest_enum import DetectType, ExitCode
 from atest.metrics import metrics
+from atest.proto import file_md5_pb2
 from atest.test_finders import test_finder_base
 from atest.test_finders import test_info
 from atest.test_runners import test_runner_base as trb
@@ -82,6 +86,12 @@
         lambda arg_value: [item for sublist in arg_value for item in sublist]
 })
 
+# Maps Bazel configuration names to Soong variant names.
+_CONFIG_TO_VARIANT = {
+    'host': 'host',
+    'device': 'target',
+}
+
 
 class AbortRunException(Exception):
     pass
@@ -147,15 +157,25 @@
 
     src_root_path = Path(os.environ.get(constants.ANDROID_BUILD_TOP))
     workspace_path = get_bazel_workspace_dir()
+    resource_manager = ResourceManager(
+            src_root_path=src_root_path,
+            resource_root_path=_get_resource_root(),
+            product_out_path=Path(
+                os.environ.get(constants.ANDROID_PRODUCT_OUT)),
+            md5_checksum_file_path=workspace_path.joinpath(
+                'workspace_md5_checksum'),
+        )
+    jdk_path = _read_robolectric_jdk_path(
+        resource_manager.get_src_file_path(ROBOLECTRIC_CONFIG, True))
+
     workspace_generator = WorkspaceGenerator(
-        src_root_path,
-        workspace_path,
-        Path(os.environ.get(constants.ANDROID_PRODUCT_OUT)),
-        Path(os.environ.get(constants.ANDROID_HOST_OUT)),
-        Path(atest_utils.get_build_out_dir()),
-        mod_info,
-        _read_robolectric_jdk_path(src_root_path.joinpath(ROBOLECTRIC_CONFIG)),
-        enabled_features,
+        resource_manager=resource_manager,
+        workspace_out_path=workspace_path,
+        host_out_path=Path(os.environ.get(constants.ANDROID_HOST_OUT)),
+        build_out_dir=Path(atest_utils.get_build_out_dir()),
+        mod_info=mod_info,
+        jdk_path=jdk_path,
+        enabled_features=enabled_features,
     )
     workspace_generator.generate()
 
@@ -165,37 +185,179 @@
                          atest_utils.get_build_target())
 
 
+class ResourceManager:
+    """Class for managing files required to generate a Bazel Workspace."""
+
+    def __init__(self,
+                 src_root_path: Path,
+                 resource_root_path: Path,
+                 product_out_path: Path,
+                 md5_checksum_file_path: Path):
+        self._root_type_to_path = {
+            file_md5_pb2.RootType.SRC_ROOT: src_root_path,
+            file_md5_pb2.RootType.RESOURCE_ROOT: resource_root_path,
+            file_md5_pb2.RootType.ABS_PATH: Path(),
+            file_md5_pb2.RootType.PRODUCT_OUT: product_out_path,
+        }
+        self._md5_checksum_file = md5_checksum_file_path
+        self._file_checksum_list = file_md5_pb2.FileChecksumList()
+
+    def get_src_file_path(
+        self,
+        rel_path: Path=None,
+        affects_workspace: bool=False
+    ) -> Path:
+        """Get the abs file path from the relative path of source_root.
+
+        Args:
+            rel_path: A relative path of the source_root.
+            affects_workspace: A boolean of whether the file affects the
+            workspace.
+
+        Returns:
+            A abs path of the file.
+        """
+        return self._get_file_path(
+            file_md5_pb2.RootType.SRC_ROOT, rel_path, affects_workspace)
+
+    def get_resource_file_path(
+        self,
+        rel_path: Path=None,
+        affects_workspace: bool=False,
+    ) -> Path:
+        """Get the abs file path from the relative path of resource_root.
+
+        Args:
+            rel_path: A relative path of the resource_root.
+            affects_workspace: A boolean of whether the file affects the
+            workspace.
+
+        Returns:
+            A abs path of the file.
+        """
+        return self._get_file_path(
+            file_md5_pb2.RootType.RESOURCE_ROOT, rel_path, affects_workspace)
+
+    def get_product_out_file_path(
+        self,
+        rel_path: Path=None,
+        affects_workspace: bool=False
+    ) -> Path:
+        """Get the abs file path from the relative path of product out.
+
+        Args:
+            rel_path: A relative path to the product out.
+            affects_workspace: A boolean of whether the file affects the
+            workspace.
+
+        Returns:
+            An abs path of the file.
+        """
+        return self._get_file_path(
+            file_md5_pb2.RootType.PRODUCT_OUT, rel_path, affects_workspace)
+
+    def _get_file_path(
+        self,
+        root_type: file_md5_pb2.RootType,
+        rel_path: Path,
+        affects_workspace: bool=True
+    ) -> Path:
+        abs_path = self._root_type_to_path[root_type].joinpath(
+            rel_path or Path())
+
+        if not affects_workspace:
+            return abs_path
+
+        if abs_path.is_dir():
+            for file in abs_path.glob('**/*'):
+                self._register_file(root_type, file)
+        else:
+            self._register_file(root_type, abs_path)
+        return abs_path
+
+    def _register_file(
+        self,
+        root_type: file_md5_pb2.RootType,
+        abs_path: Path
+    ):
+        if not abs_path.is_file():
+            logging.debug(' ignore %s: not a file.', abs_path)
+            return
+
+        rel_path = abs_path
+        if abs_path.is_relative_to(self._root_type_to_path[root_type]):
+            rel_path = abs_path.relative_to(self._root_type_to_path[root_type])
+
+        self._file_checksum_list.file_checksums.append(
+            file_md5_pb2.FileChecksum(
+                root_type=root_type,
+                rel_path=str(rel_path),
+                md5sum=atest_utils.md5sum(abs_path)
+            )
+        )
+
+    def register_file_with_abs_path(self, abs_path: Path):
+        """Register a file which affects the workspace.
+
+        Args:
+            abs_path: A abs path of the file.
+        """
+        self._register_file(file_md5_pb2.RootType.ABS_PATH, abs_path)
+
+    def save_affects_files_md5(self):
+        with open(self._md5_checksum_file, 'wb') as f:
+            f.write(self._file_checksum_list.SerializeToString())
+
+    def check_affects_files_md5(self):
+        """Check all affect files are consistent with the actual MD5."""
+        if not self._md5_checksum_file.is_file():
+            return False
+
+        with open(self._md5_checksum_file, 'rb') as f:
+            file_md5_list = file_md5_pb2.FileChecksumList()
+
+            try:
+                file_md5_list.ParseFromString(f.read())
+            except DecodeError:
+                logging.warning(
+                    'Failed to parse the workspace md5 checksum file.')
+                return False
+
+            for file_md5 in file_md5_list.file_checksums:
+                abs_path = (Path(self._root_type_to_path[file_md5.root_type])
+                            .joinpath(file_md5.rel_path))
+                if not abs_path.is_file():
+                    return False
+                if atest_utils.md5sum(abs_path) != file_md5.md5sum:
+                    return False
+            return True
+
+
 class WorkspaceGenerator:
     """Class for generating a Bazel workspace."""
 
     # pylint: disable=too-many-arguments
     def __init__(self,
-                 src_root_path: Path,
+                 resource_manager: ResourceManager,
                  workspace_out_path: Path,
-                 product_out_path: Path,
                  host_out_path: Path,
                  build_out_dir: Path,
                  mod_info: module_info.ModuleInfo,
-                 jdk_path: Path,
+                 jdk_path: Path=None,
                  enabled_features: Set[Features] = None,
-                 resource_root = None,
                  ):
         """Initializes the generator.
 
         Args:
-            src_root_path: Path of the ANDROID_BUILD_TOP.
             workspace_out_path: Path where the workspace will be output.
-            product_out_path: Path of the ANDROID_PRODUCT_OUT.
             host_out_path: Path of the ANDROID_HOST_OUT.
             build_out_dir: Path of OUT_DIR
             mod_info: ModuleInfo object.
             enabled_features: Set of enabled features.
         """
         self.enabled_features = enabled_features or set()
-        self.src_root_path = src_root_path
-        self.resource_root = resource_root or _get_resource_root()
+        self.resource_manager = resource_manager
         self.workspace_out_path = workspace_out_path
-        self.product_out_path = product_out_path
         self.host_out_path = host_out_path
         self.build_out_dir = build_out_dir
         self.mod_info = mod_info
@@ -210,8 +372,6 @@
         reused.
         """
         start = time.time()
-        workspace_md5_checksum_file = self.workspace_out_path.joinpath(
-            'workspace_md5_checksum')
         enabled_features_file = self.workspace_out_path.joinpath(
             'atest_bazel_mode_enabled_features')
         enabled_features_file_contents = '\n'.join(sorted(
@@ -221,7 +381,7 @@
             # Update the file with the set of the currently enabled features to
             # make sure that changes are detected in the workspace checksum.
             enabled_features_file.write_text(enabled_features_file_contents)
-            if atest_utils.check_md5(workspace_md5_checksum_file):
+            if self.resource_manager.check_affects_files_md5():
                 return
 
             # We raise an exception if rmtree fails to avoid leaving stale
@@ -239,16 +399,13 @@
         # Note that we write the set of enabled features despite having written
         # it above since the workspace no longer exists at this point.
         enabled_features_file.write_text(enabled_features_file_contents)
-        atest_utils.save_md5(
-            [
-                self.mod_info.mod_info_file_path,
-                enabled_features_file,
-                # TODO(b/265320036): Re-generate the Bazel workspace when JDK
-                # change.
-                self.src_root_path.joinpath(ROBOLECTRIC_CONFIG)
-            ],
-            workspace_md5_checksum_file
-        )
+
+        self.resource_manager.get_product_out_file_path(
+            self.mod_info.mod_info_file_path.relative_to(
+                self.resource_manager.get_product_out_file_path()), True)
+        self.resource_manager.register_file_with_abs_path(
+            enabled_features_file)
+        self.resource_manager.save_affects_files_md5()
         metrics.LocalDetectEvent(
             detect_type=DetectType.FULL_GENERATE_BAZEL_WORKSPACE_TIME,
             result=int(time.time() - start))
@@ -431,6 +588,12 @@
         # Symlink to package with toolchain definitions.
         self._symlink(src='prebuilts/build-tools',
                       target='prebuilts/build-tools')
+
+        device_infra_path = 'vendor/google/tools/atest/device_infra'
+        if self.resource_manager.get_src_file_path(device_infra_path).exists():
+            self._symlink(src=device_infra_path,
+                          target=device_infra_path)
+
         self._create_constants_file()
 
         self._generate_robolectric_resources()
@@ -453,7 +616,7 @@
             JDK_NAME,
             lambda : FilegroupTarget(
                 JDK_PACKAGE_NAME, JDK_NAME,
-                self.src_root_path.joinpath(self.jdk_path))
+                self.resource_manager.get_src_file_path(self.jdk_path))
         )
 
     def _generate_android_all_resources(self):
@@ -480,7 +643,7 @@
         """
         symlink = self.workspace_out_path.joinpath(target)
         symlink.parent.mkdir(parents=True, exist_ok=True)
-        symlink.symlink_to(self.src_root_path.joinpath(src))
+        symlink.symlink_to(self.resource_manager.get_src_file_path(src))
 
     def _create_base_files(self):
         self._add_workspace_resource(src='WORKSPACE', dst='WORKSPACE')
@@ -491,7 +654,10 @@
     def _add_bazel_bootstrap_files(self):
         self._symlink(src='tools/asuite/atest/bazel/resources/bazel.sh',
                       target='bazel.sh')
-        # TODO(b/256924541): Consolidate JDK version with Roboleaf team.
+        # TODO(b/256924541): Consolidate the JDK with the version the Roboleaf
+        # team uses.
+        self._symlink(src='prebuilts/jdk/jdk17/BUILD.bazel',
+                      target='prebuilts/jdk/jdk17/BUILD.bazel')
         self._symlink(src='prebuilts/jdk/jdk17/linux-x86',
                       target='prebuilts/jdk/jdk17/linux-x86')
         self._symlink(src='prebuilts/bazel/linux-x86_64/bazel',
@@ -506,7 +672,7 @@
             dst: A string of a relative path to workspace root. This is the
                 destination file/dir path where the artifacts will be added.
         """
-        src = self.resource_root.joinpath(src)
+        src = self.resource_manager.get_resource_file_path(src, True)
         dst = self.workspace_out_path.joinpath(dst)
         dst.parent.mkdir(parents=True, exist_ok=True)
 
@@ -532,7 +698,7 @@
             targets.append(target)
 
         with self.workspace_out_path.joinpath(
-                'constants.bzl').open('w') as f:
+            'constants.bzl').open('w') as f:
             writer = IndentWriter(f)
             for target in targets:
                 writer.write_line(
@@ -712,6 +878,7 @@
     DEVICE_TEST_PREREQUISITES = frozenset(DEVICELESS_TEST_PREREQUISITES.union(
         frozenset({
             'aapt',
+            'aapt2',
             'compatibility-tradefed',
             'vts-core-tradefed-harness',
         })))
@@ -924,10 +1091,11 @@
 
         configs = [
             Config('host', gen.host_out_path),
-            Config('device', gen.product_out_path),
+            Config('device', gen.resource_manager.get_product_out_file_path()),
         ]
 
-        installed_paths = get_module_installed_paths(info, gen.src_root_path)
+        installed_paths = get_module_installed_paths(
+            info, gen.resource_manager.get_src_file_path())
         config_files = group_paths_by_config(configs, installed_paths)
 
         # For test modules, we only create symbolic link to the 'testcases'
@@ -944,25 +1112,35 @@
             config_files,
             Dependencies(
                 static_dep_refs = find_static_dep_refs(
-                    gen.mod_info, info, configs, gen.src_root_path,
-                    enabled_features),
+                    gen.mod_info, info, configs,
+                    gen.resource_manager.get_src_file_path(), enabled_features),
                 runtime_dep_refs = find_runtime_dep_refs(
-                    gen.mod_info, info, configs, gen.src_root_path,
-                    enabled_features),
+                    gen.mod_info, info, configs,
+                    gen.resource_manager.get_src_file_path(), enabled_features),
                 data_dep_refs = find_data_dep_refs(
-                    gen.mod_info, info, configs, gen.src_root_path),
+                    gen.mod_info, info, configs,
+                    gen.resource_manager.get_src_file_path()),
                 device_data_dep_refs = find_device_data_dep_refs(gen, info),
             ),
+            [
+                c for c in configs if c.name in map(
+                str.lower, info.get(constants.MODULE_SUPPORTED_VARIANTS, []))
+            ],
         )
 
-    def __init__(self, info: Dict[str, Any], package_name: str,
-                 config_files: Dict[Config, List[Path]], deps: Dependencies):
+    def __init__(self,
+                 info: Dict[str, Any],
+                 package_name: str,
+                 config_files: Dict[Config, List[Path]],
+                 deps: Dependencies,
+                 supported_configs: List[Config]):
         self._target_name = info[constants.MODULE_INFO_ID]
         self._module_name = info[constants.MODULE_NAME]
         self._package_name = package_name
         self.config_files = config_files
         self.deps = deps
         self.suites = info.get(constants.MODULE_COMPATIBILITY_SUITES, [])
+        self._supported_configs = supported_configs
 
     def name(self) -> str:
         return self._target_name
@@ -975,21 +1153,19 @@
             Import('//bazel/rules:soong_prebuilt.bzl', self._rule_name()),
         }
 
-    @functools.lru_cache(maxsize=None)
+    @functools.lru_cache(maxsize=128)
     def supported_configs(self) -> Set[Config]:
+        # We deduce the supported configs from the installed paths since the
+        # build exports incorrect metadata for some module types such as
+        # Robolectric. The information exported from the build is only used if
+        # the module does not have any installed paths.
+        # TODO(b/232929584): Remove this once all modules correctly export the
+        #  supported variants.
         supported_configs = set(self.config_files.keys())
-
         if supported_configs:
             return supported_configs
 
-        # If a target has no installed files, then it supports the same
-        # configurations as its dependencies. This is required because some
-        # build modules are just intermediate targets that don't produce any
-        # output but that still have transitive dependencies.
-        for ref in self.deps.runtime_dep_refs:
-            supported_configs.update(ref.target().supported_configs())
-
-        return supported_configs
+        return self._supported_configs
 
     def dependencies(self) -> List[ModuleRef]:
         all_deps = set(self.deps.runtime_dep_refs)
@@ -1203,10 +1379,11 @@
 ) -> List[ModuleRef]:
     """Return module references for device data dependencies."""
 
-    return _find_module_refs(gen.mod_info,
-                             [Config('device', gen.product_out_path)],
-                             gen.src_root_path,
-                             info.get(constants.MODULE_TARGET_DEPS, []))
+    return _find_module_refs(
+        gen.mod_info,
+        [Config('device', gen.resource_manager.get_product_out_file_path())],
+        gen.resource_manager.get_src_file_path(),
+        info.get(constants.MODULE_TARGET_DEPS, []))
 
 
 def find_static_dep_refs(
@@ -1374,13 +1551,27 @@
                                    finder.finder_info)
 
 
+class RunCommandError(subprocess.CalledProcessError):
+    """CalledProcessError but including debug information when it fails."""
+    def __str__(self):
+        return f'{super().__str__()}\n' \
+               f'stdout={self.stdout}\n\n' \
+               f'stderr={self.stderr}'
+
+
 def default_run_command(args: List[str], cwd: Path) -> str:
-    return subprocess.check_output(
+    result = subprocess.run(
         args=args,
         cwd=cwd,
         text=True,
-        stderr=subprocess.DEVNULL,
+        capture_output=True,
+        check=False,
     )
+    if result.returncode:
+        # Provide a more detailed log message including stdout and stderr.
+        raise RunCommandError(result.returncode, result.args, result.stdout,
+                              result.stderr)
+    return result.stdout
 
 
 @dataclasses.dataclass
@@ -1401,7 +1592,6 @@
                  results_dir,
                  mod_info: module_info.ModuleInfo,
                  extra_args: Dict[str, Any]=None,
-                 test_infos: List[test_info.TestInfo]=None,
                  src_top: Path=None,
                  workspace_path: Path=None,
                  run_command: Callable=default_run_command,
@@ -1410,7 +1600,6 @@
                  **kwargs):
         super().__init__(results_dir, **kwargs)
         self.mod_info = mod_info
-        self.test_infos = test_infos
         self.src_top = src_top or Path(os.environ.get(
             constants.ANDROID_BUILD_TOP))
         self.starlark_file = _get_resource_root().joinpath(
@@ -1490,25 +1679,51 @@
         if that changes.
         """
 
-    def get_test_runner_build_reqs(self) -> Set[str]:
-        if not self.test_infos:
+    def get_test_runner_build_reqs(self, test_infos) -> Set[str]:
+        if not test_infos:
             return set()
 
         deps_expression = ' + '.join(
-            sorted(self.test_info_target_label(i) for i in self.test_infos)
+            sorted(self.test_info_target_label(i) for i in test_infos)
         )
 
-        query_args = [
-            self.bazel_binary,
-            'cquery',
-            f'deps(tests({deps_expression}))',
-            '--output=starlark',
-            f'--starlark:file={self.starlark_file}',
-        ]
+        with tempfile.NamedTemporaryFile() as query_file:
+            with open(query_file.name, 'w', encoding='utf-8') as _query_file:
+                _query_file.write(f'deps(tests({deps_expression}))')
 
-        output = self.run_command(query_args, self.bazel_workspace)
+            query_args = [
+                str(self.bazel_binary),
+                'cquery',
+                f'--query_file={query_file.name}',
+                '--output=starlark',
+                f'--starlark:file={self.starlark_file}',
+            ]
 
-        return set(filter(bool, map(str.strip, output.splitlines())))
+            output = self.run_command(query_args, self.bazel_workspace)
+
+        targets = set()
+        robolectric_tests = set(filter(
+            self._is_robolectric_test_suite,
+            [test.test_name for test in test_infos]))
+
+        modules_to_variant = _parse_cquery_output(output)
+
+        for module, variants in modules_to_variant.items():
+
+            # Skip specifying the build variant for Robolectric test modules
+            # since they are special. Soong builds them with the `target`
+            # variant although are installed as 'host' modules.
+            if module in robolectric_tests:
+                targets.add(module)
+                continue
+
+            targets.add(_soong_target_for_variants(module, variants))
+
+        return targets
+
+    def _is_robolectric_test_suite(self, module_name: str) -> bool:
+        return self.mod_info.is_robolectric_test_suite(
+            self.mod_info.get_module_info(module_name))
 
     def test_info_target_label(self, test: test_info.TestInfo) -> str:
         module_name = test.test_name
@@ -1549,7 +1764,7 @@
         target_patterns = ' '.join(
             self.test_info_target_label(i) for i in test_infos)
 
-        bazel_args = self._parse_extra_args(test_infos, extra_args)
+        bazel_args = parse_args(test_infos, extra_args, self.mod_info)
 
         bazel_args.extend(
             self._get_bazel_feature_args(
@@ -1562,10 +1777,6 @@
                 extra_args,
                 self._get_remote_args))
 
-        # Default to --test_output=errors unless specified otherwise
-        if not any(arg.startswith('--test_output=') for arg in bazel_args):
-            bazel_args.append('--test_output=errors')
-
         # This is an alternative to shlex.join that doesn't exist in Python
         # versions < 3.8.
         bazel_args_str = ' '.join(shlex.quote(arg) for arg in bazel_args)
@@ -1578,40 +1789,81 @@
             f'test {target_patterns} {bazel_args_str}'
         ]
 
-    def _parse_extra_args(self, test_infos: List[test_info.TestInfo],
-                          extra_args: trb.ARGS) -> trb.ARGS:
-        args_to_append = []
-        # Make a copy of the `extra_args` dict to avoid modifying it for other
-        # Atest runners.
-        extra_args_copy = extra_args.copy()
 
-        # Remove the `--host` flag since we already pass that in the rule's
-        # implementation.
-        extra_args_copy.pop(constants.HOST, None)
+def parse_args(
+    test_infos: List[test_info.TestInfo],
+    extra_args: Dict[str, Any],
+    mod_info: module_info.ModuleInfo) -> Dict[str, Any]:
+    """Parse commandline args and passes supported args to bazel.
 
-        # Map args to their native Bazel counterparts.
-        for arg in _SUPPORTED_BAZEL_ARGS:
-            if arg not in extra_args_copy:
-                continue
-            args_to_append.extend(
-                self.map_to_bazel_args(arg, extra_args_copy[arg]))
-            # Remove the argument since we already mapped it to a Bazel option
-            # and no longer need it mapped to a Tradefed argument below.
-            del extra_args_copy[arg]
+    Args:
+        test_infos: A set of TestInfo instances.
+        extra_args: A Dict of extra args to append.
+        mod_info: A ModuleInfo object.
 
-        # TODO(b/215461642): Store the extra_args in the top-level object so
-        # that we don't have to re-parse the extra args to get BAZEL_ARG again.
-        tf_args, _ = tfr.extra_args_to_tf_args(
-            self.mod_info, test_infos, extra_args_copy)
+    Returns:
+        A list of args to append to the run command.
+    """
 
-        # Add ATest include filter argument to allow testcase filtering.
-        tf_args.extend(tfr.get_include_filter(test_infos))
+    args_to_append = []
+    # Make a copy of the `extra_args` dict to avoid modifying it for other
+    # Atest runners.
+    extra_args_copy = extra_args.copy()
 
-        args_to_append.extend([f'--test_arg={i}' for i in tf_args])
+    # Remove the `--host` flag since we already pass that in the rule's
+    # implementation.
+    extra_args_copy.pop(constants.HOST, None)
 
-        return args_to_append
+    # Map args to their native Bazel counterparts.
+    for arg in _SUPPORTED_BAZEL_ARGS:
+        if arg not in extra_args_copy:
+            continue
+        args_to_append.extend(
+            _map_to_bazel_args(arg, extra_args_copy[arg]))
+        # Remove the argument since we already mapped it to a Bazel option
+        # and no longer need it mapped to a Tradefed argument below.
+        del extra_args_copy[arg]
 
-    @staticmethod
-    def map_to_bazel_args(arg: str, arg_value: Any) -> List[str]:
-        return _SUPPORTED_BAZEL_ARGS[arg](
-            arg_value) if arg in _SUPPORTED_BAZEL_ARGS else []
+    # TODO(b/215461642): Store the extra_args in the top-level object so
+    # that we don't have to re-parse the extra args to get BAZEL_ARG again.
+    tf_args, _ = tfr.extra_args_to_tf_args(
+        mod_info, test_infos, extra_args_copy)
+
+    # Add ATest include filter argument to allow testcase filtering.
+    tf_args.extend(tfr.get_include_filter(test_infos))
+
+    args_to_append.extend([f'--test_arg={i}' for i in tf_args])
+
+    # Default to --test_output=errors unless specified otherwise
+    if not any(arg.startswith('--test_output=') for arg in args_to_append):
+        args_to_append.append('--test_output=errors')
+
+    return args_to_append
+
+def _map_to_bazel_args(arg: str, arg_value: Any) -> List[str]:
+    return _SUPPORTED_BAZEL_ARGS[arg](
+        arg_value) if arg in _SUPPORTED_BAZEL_ARGS else []
+
+
+def _parse_cquery_output(output: str) -> Dict[str, Set[str]]:
+    module_to_build_variants = defaultdict(set)
+
+    for line in filter(bool, map(str.strip, output.splitlines())):
+        module_name, build_variant = line.split(':')
+        module_to_build_variants[module_name].add(build_variant)
+
+    return module_to_build_variants
+
+
+def _soong_target_for_variants(
+    module_name: str,
+    build_variants: Set[str]) -> str:
+
+    if not build_variants:
+        raise Exception(f'Missing the build variants for module {module_name} '
+                        f'in cquery output!')
+
+    if len(build_variants) > 1:
+        return module_name
+
+    return f'{module_name}-{_CONFIG_TO_VARIANT[list(build_variants)[0]]}'
diff --git a/atest/bazel_mode_unittest.py b/atest/bazel_mode_unittest.py
index c9e8586..b61794d 100755
--- a/atest/bazel_mode_unittest.py
+++ b/atest/bazel_mode_unittest.py
@@ -27,6 +27,7 @@
 import tempfile
 import unittest
 
+from io import StringIO
 from pathlib import Path
 from typing import List
 from unittest import mock
@@ -54,26 +55,36 @@
     def setUp(self):
         self.setUpPyfakefs()
 
-        self.src_root_path = Path('/src')
-        self.out_dir_path = self.src_root_path.joinpath('out')
+        self._src_root_path = Path('/src')
+        self.out_dir_path = self._src_root_path.joinpath('out')
         self.out_dir_path.mkdir(parents=True)
         self.product_out_path = self.out_dir_path.joinpath('product')
         self.host_out_path = self.out_dir_path.joinpath('host')
         self.workspace_out_path = self.out_dir_path.joinpath('workspace')
 
-        self.resource_root = self.src_root_path.joinpath(
+        self._resource_root = self._src_root_path.joinpath(
             'tools/asuite/atest/bazel')
 
-        bazel_rules = self.resource_root.joinpath('rules')
-        bazel_rules.mkdir(parents=True)
-        bazel_rules.joinpath('rules.bzl').touch()
+        self.workspace_md5_checksum = self.workspace_out_path.joinpath(
+            'workspace_md5_checksum')
+        self.resource_manager = bazel_mode.ResourceManager(
+            src_root_path=self._src_root_path,
+            resource_root_path=self._resource_root,
+            product_out_path=self.product_out_path,
+            md5_checksum_file_path = self.workspace_md5_checksum
+        )
 
-        bazel_configs = self.resource_root.joinpath('configs')
+        bazel_rules = self.resource_manager.get_resource_file_path('rules')
+        bazel_rules.mkdir(parents=True)
+        self.rules_bzl_file = bazel_rules.joinpath('rules.bzl')
+        self.rules_bzl_file.touch()
+
+        bazel_configs = self.resource_manager.get_resource_file_path('configs')
         bazel_configs.mkdir(parents=True)
         bazel_configs.joinpath('configs.bzl').touch()
 
-        self.resource_root.joinpath('WORKSPACE').touch()
-        self.resource_root.joinpath('bazelrc').touch()
+        self.resource_manager.get_resource_file_path('WORKSPACE').touch()
+        self.resource_manager.get_resource_file_path('bazelrc').touch()
 
     def create_workspace_generator(
         self,
@@ -84,40 +95,36 @@
         mod_info = self.create_module_info(modules)
 
         generator = bazel_mode.WorkspaceGenerator(
-            self.src_root_path,
-            self.workspace_out_path,
-            self.product_out_path,
-            self.host_out_path,
-            self.out_dir_path,
-            mod_info,
-            enabled_features=enabled_features,
-            resource_root=self.resource_root,
+            resource_manager=self.resource_manager,
+            workspace_out_path=self.workspace_out_path,
+            host_out_path=self.host_out_path,
+            build_out_dir=self.out_dir_path,
+            mod_info=mod_info,
             jdk_path=jdk_path,
+            enabled_features=enabled_features,
         )
 
         return generator
 
     def run_generator(self, mod_info, enabled_features=None, jdk_path=None):
         generator = bazel_mode.WorkspaceGenerator(
-            self.src_root_path,
-            self.workspace_out_path,
-            self.product_out_path,
-            self.host_out_path,
-            self.out_dir_path,
-            mod_info,
-            enabled_features=enabled_features,
-            resource_root=self.resource_root,
+            resource_manager=self.resource_manager,
+            workspace_out_path=self.workspace_out_path,
+            host_out_path=self.host_out_path,
+            build_out_dir=self.out_dir_path,
+            mod_info=mod_info,
             jdk_path=jdk_path,
+            enabled_features=enabled_features,
         )
 
         generator.generate()
 
     # pylint: disable=protected-access
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
     def create_empty_module_info(self):
-        fake_temp_file_name = next(tempfile._get_candidate_names())
-        self.fs.create_file(fake_temp_file_name, contents='{}')
-        return module_info.ModuleInfo(module_file=fake_temp_file_name)
+        fake_temp_file = self.product_out_path.joinpath(
+            next(tempfile._get_candidate_names()))
+        self.fs.create_file(fake_temp_file, contents='{}')
+        return module_info.ModuleInfo(module_file=fake_temp_file)
 
     def create_module_info(self, modules=None):
         mod_info = self.create_empty_module_info()
@@ -310,13 +317,36 @@
         workspace_generator.generate()
         workspace_stat = workspace_generator.workspace_out_path.stat()
 
-        workspace_generator.mod_info.mod_info_file_path.unlink()
+        self.workspace_md5_checksum.unlink()
         workspace_generator = self.create_workspace_generator()
         workspace_generator.generate()
-
         new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
         self.assertNotEqual(workspace_stat, new_workspace_stat)
 
+    def test_regenerate_workspace_when_md5_file_is_broken(self):
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        self.workspace_md5_checksum.write_text('broken checksum file')
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        self.assertNotEqual(workspace_stat, new_workspace_stat)
+
+    def test_not_regenerate_workspace_when_workspace_files_unaffected(self):
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        new_workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        self.assertEqual(workspace_stat, new_workspace_stat)
+
     def test_scrub_old_workspace_when_regenerating(self):
         workspace_generator = self.create_workspace_generator()
         workspace_generator.generate()
@@ -324,13 +354,38 @@
         some_file.touch()
         self.assertTrue(some_file.is_file())
 
-        # Remove the md5 file to regenerate the workspace.
+        # Remove the module_info file to regenerate the workspace.
         workspace_generator.mod_info.mod_info_file_path.unlink()
         workspace_generator = self.create_workspace_generator()
         workspace_generator.generate()
 
         self.assertFalse(some_file.is_file())
 
+    def test_regenerate_workspace_when_resource_file_changed(self):
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        with open(self.rules_bzl_file, 'a', encoding='utf8') as f:
+            f.write(' ')
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+
+        new_workspace_stat = workspace_generator.workspace_out_path.stat()
+        self.assertNotEqual(workspace_stat, new_workspace_stat)
+
+    def test_not_regenerate_workspace_when_resource_file_only_touched(self):
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+        workspace_stat = workspace_generator.workspace_out_path.stat()
+
+        self.rules_bzl_file.touch()
+        workspace_generator = self.create_workspace_generator()
+        workspace_generator.generate()
+
+        new_workspace_stat = workspace_generator.workspace_out_path.stat()
+        self.assertEqual(workspace_stat, new_workspace_stat)
+
     def test_copy_workspace_resources(self):
         gen = self.create_workspace_generator()
 
@@ -704,14 +759,13 @@
 
     def setUp(self):
         super().setUp()
-        self.robolectric_template_path = self.src_root_path.joinpath(
-            bazel_mode.ROBOLECTRIC_CONFIG)
+        self.robolectric_template_path = self.resource_manager.\
+            get_resource_file_path(bazel_mode.ROBOLECTRIC_CONFIG, True)
         self.fs.create_file(self.robolectric_template_path, contents='')
-
-    def create_module_info(self, modules=None):
-        mod_info = super().create_module_info(modules)
-        mod_info.root_dir = self.src_root_path
-        return mod_info
+        # ResourceManager only calculates md5 when registering files. So, it is
+        # necessary to call get_resource_file_path() again after writing files.
+        self.resource_manager.get_resource_file_path(
+            bazel_mode.ROBOLECTRIC_CONFIG, True)
 
     def test_generate_robolectric_test_target(self):
         module_name = 'hello_world_test'
@@ -785,7 +839,7 @@
         self.assertSymlinkTo(
             self.workspace_out_path.joinpath(
                 f'{bazel_mode.JDK_PACKAGE_NAME}/{bazel_mode.JDK_NAME}_files'),
-            self.src_root_path.joinpath(f'{jdk_path}'))
+            self.resource_manager.get_src_file_path(f'{jdk_path}'))
 
     def test_generate_android_all_target(self):
         gen = self.create_workspace_generator(jdk_path=Path('jdk_src_root'))
@@ -1126,18 +1180,37 @@
 
     def test_generate_target_for_rlib_dependency(self):
         mod_info = self.create_module_info(modules=[
-            supported_test_module(dependencies=['libhello']),
-            rlib(module(name='libhello'))
+            multi_config(host_unit_suite(module(
+                name='hello_world_test',
+                dependencies=['libhost', 'libdevice']))),
+            rlib(module(name='libhost', supported_variants=['HOST'])),
+            rlib(module(name='libdevice', supported_variants=['DEVICE'])),
         ])
 
         self.run_generator(mod_info)
 
         self.assertInBuildFile(
             'soong_uninstalled_prebuilt(\n'
-            '    name = "libhello",\n'
-            '    module_name = "libhello",\n'
+            '    name = "libhost",\n'
+            '    module_name = "libhost",\n'
             ')\n'
         )
+        self.assertInBuildFile(
+            'soong_uninstalled_prebuilt(\n'
+            '    name = "libdevice",\n'
+            '    module_name = "libdevice",\n'
+            ')\n'
+        )
+        self.assertInBuildFile(
+            '    runtime_deps = select({\n'
+            '        "//bazel/rules:device": [\n'
+            '            "//:libdevice",\n'
+            '        ],\n'
+            '        "//bazel/rules:host": [\n'
+            '            "//:libhost",\n'
+            '        ],\n'
+            '    }),\n'
+        )
 
     def test_generate_target_for_rlib_dylib_dependency(self):
         mod_info = self.create_module_info(modules=[
@@ -1330,7 +1403,6 @@
         self.assertTargetNotInWorkspace('libdata')
 
 
-@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
 def create_empty_module_info():
     with fake_filesystem_unittest.Patcher() as patcher:
         # pylint: disable=protected-access
@@ -1405,7 +1477,10 @@
     return test(module(**kwargs))
 
 
+# TODO(b/274822450): Using a builder pattern to reduce the number of parameters
+#  instead of disabling the warning.
 # pylint: disable=too-many-arguments
+# pylint: disable=too-many-locals
 def module(
     name=None,
     path=None,
@@ -1421,6 +1496,7 @@
     host_dependencies=None,
     target_dependencies=None,
     test_options_tags=None,
+    supported_variants=None,
 ):
     name = name or 'libhello'
 
@@ -1441,6 +1517,7 @@
     m['host_dependencies'] = host_dependencies or []
     m['target_dependencies'] = target_dependencies or []
     m['test_options_tags'] = test_options_tags or []
+    m['supported_variants'] = supported_variants or []
     return m
 
 
@@ -1831,82 +1908,109 @@
             modules=[
                 supported_test_module(name='test1', path='path1'),
             ],
-            test_infos=[],
             run_command=run_command,
         )
 
-        reqs = runner.get_test_runner_build_reqs()
+        reqs = runner.get_test_runner_build_reqs([])
 
         self.assertFalse(reqs)
 
     def test_query_bazel_test_targets_deps_with_host_arg(self):
-        run_command = self.mock_run_command()
+        query_file_contents = StringIO()
+        def get_query_file_content(args: List[str], _) -> str:
+            query_file_contents.write(_get_query_file_content(args))
+            return ''
+
         runner = self.create_bazel_test_runner(
             modules=[
                 multi_config(host_unit_test_module(name='test1', path='path1')),
                 multi_config(host_unit_test_module(name='test2', path='path2')),
                 multi_config(test_module(name='test3', path='path3')),
             ],
-            test_infos = [
-                test_info_of('test2'),
-                test_info_of('test1'),  # Intentionally out of order.
-                test_info_of('test3'),
-            ],
-            run_command=run_command,
+            run_command=get_query_file_content,
             host=True,
         )
 
-        runner.get_test_runner_build_reqs()
+        runner.get_test_runner_build_reqs([
+            test_info_of('test2'),
+            test_info_of('test1'),  # Intentionally out of order.
+            test_info_of('test3'),
+        ])
 
-        call_args = run_command.call_args[0][0]
-        self.assertIn(
+        self.assertEqual(
             'deps(tests(//path1:test1_host + '
             '//path2:test2_host + '
             '//path3:test3_host))',
-            call_args,
-        )
+            query_file_contents.getvalue())
 
     def test_query_bazel_test_targets_deps_without_host_arg(self):
-        run_command = self.mock_run_command()
+        query_file_contents = StringIO()
+        def get_query_file_content(args: List[str], _) -> str:
+            query_file_contents.write(_get_query_file_content(args))
+            return ''
+
         runner = self.create_bazel_test_runner(
             modules=[
                 multi_config(host_unit_test_module(name='test1', path='path1')),
                 host_unit_test_module(name='test2', path='path2'),
                 multi_config(test_module(name='test3', path='path3')),
             ],
-            test_infos = [
-                test_info_of('test2'),
-                test_info_of('test1'),
-                test_info_of('test3'),
-            ],
-            run_command=run_command,
+            run_command=get_query_file_content,
         )
 
-        runner.get_test_runner_build_reqs()
+        runner.get_test_runner_build_reqs([
+            test_info_of('test2'),
+            test_info_of('test1'),
+            test_info_of('test3'),
+        ])
 
-        call_args = run_command.call_args[0][0]
-        call_args = run_command.call_args[0][0]
-        self.assertIn(
+        self.assertEqual(
             'deps(tests(//path1:test1_device + '
             '//path2:test2_host + '
             '//path3:test3_device))',
-            call_args,
-        )
+            query_file_contents.getvalue())
 
     def test_trim_whitespace_in_bazel_query_output(self):
         run_command = self.mock_run_command(
-            return_value='\n'.join(['  test1  ', 'test2  ', '  ']))
+            return_value='\n'.join(['  test1:host  ', 'test2:device  ', '  ']))
         runner = self.create_bazel_test_runner(
             modules=[
                 supported_test_module(name='test1', path='path1'),
             ],
-            test_infos = [test_info_of('test1')],
             run_command=run_command,
         )
 
-        reqs = runner.get_test_runner_build_reqs()
+        reqs = runner.get_test_runner_build_reqs([test_info_of('test1')])
 
-        self.assertSetEqual({'test1', 'test2'}, reqs)
+        self.assertSetEqual({'test1-host', 'test2-target'}, reqs)
+
+    def test_build_variants_in_bazel_query_output(self):
+        run_command = self.mock_run_command(
+            return_value='\n'.join([
+                'test1:host',
+                'test2:host', 'test2:device',
+                'test3:device',
+                'test4:host', 'test4:host',
+            ]))
+        runner = self.create_bazel_test_runner(
+            modules=[
+                supported_test_module(name='test1', path='path1'),
+                supported_test_module(name='test2', path='path2'),
+                supported_test_module(name='test3', path='path3'),
+                supported_test_module(name='test4', path='path4'),
+            ],
+            run_command = run_command,
+        )
+
+        reqs = runner.get_test_runner_build_reqs([
+            test_info_of('test1'),
+            test_info_of('test2'),
+            test_info_of('test3'),
+            test_info_of('test4')])
+
+        self.assertSetEqual(
+            {'test1-host', 'test2', 'test3-target', 'test4-host'},
+            reqs)
 
     def test_generate_single_run_command(self):
         test_infos = [test_info_of('test1')]
@@ -1926,7 +2030,6 @@
                 multi_config(host_unit_test_module(name='test2', path='path')),
                 multi_config(test_module(name='test3', path='path')),
             ],
-            test_infos,
             host=True
         )
 
@@ -1943,7 +2046,6 @@
                 multi_config(host_unit_test_module(name='test1', path='path')),
                 host_unit_test_module(name='test2', path='path'),
             ],
-            test_infos,
         )
 
         cmd = runner.generate_run_commands(test_infos, {})
@@ -2074,7 +2176,6 @@
 
     def create_bazel_test_runner(self,
                                  modules,
-                                 test_infos,
                                  run_command=None,
                                  host=False,
                                  build_metadata=None,
@@ -2082,7 +2183,6 @@
         return bazel_mode.BazelTestRunner(
             'result_dir',
             mod_info=create_module_info(modules),
-            test_infos=test_infos,
             src_top=Path('/src'),
             workspace_path=Path('/src/workspace'),
             run_command=run_command or self.mock_run_command(),
@@ -2098,7 +2198,6 @@
         return self.create_bazel_test_runner(
             modules=[supported_test_module(name=t.test_name, path='path')
                      for t in test_infos],
-            test_infos=test_infos,
             build_metadata=build_metadata,
             env=env
         )
@@ -2151,5 +2250,13 @@
         class_name, frozenset(methods) if methods else frozenset())
 
 
+def _get_query_file_content(args: List[str]) -> str:
+    for arg in args:
+        if arg.startswith('--query_file='):
+            return Path(arg.split('=')[1]).read_text()
+
+    raise Exception('Query file not found!')
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/atest/cli_translator.py b/atest/cli_translator.py
index 9c53853..c67587d 100644
--- a/atest/cli_translator.py
+++ b/atest/cli_translator.py
@@ -126,6 +126,12 @@
         test_name = test_identifier.test_name
         if not self._verified_mainline_modules(test_identifier):
             return test_infos
+        if self.mod_info and test in self.mod_info.roboleaf_tests:
+            # Roboleaf bazel will discover and build dependencies so we can
+            # skip finding dependencies.
+            print(f'Found \'{atest_utils.colorize(test, constants.GREEN)}\''
+                  ' as ROBOLEAF_CONVERTED_MODULE')
+            return [self.mod_info.roboleaf_tests[test]]
         find_methods = test_finder_handler.get_find_methods_for_test(
             self.mod_info, test)
         if self._bazel_mode:
@@ -535,12 +541,6 @@
 
         return tests, all_tests
 
-    def _gather_build_targets(self, test_infos):
-        targets = set()
-        for t_info in test_infos:
-            targets |= t_info.build_targets
-        return targets
-
     def _get_test_mapping_tests(self, args, exit_if_no_test_found=True):
         """Find the tests in TEST_MAPPING files.
 
@@ -715,13 +715,12 @@
             result=int(finished_time))
         for t_info in test_infos:
             logging.debug('%s\n', t_info)
-        build_targets = self._gather_build_targets(test_infos)
         if not self._bazel_mode:
             if host_unit_tests or self._has_host_unit_test(tests):
                 msg = (r"It is recommended to run host unit tests with "
                        r"--bazel-mode.")
                 atest_utils.colorful_print(msg, constants.YELLOW)
-        return build_targets, test_infos
+        return test_infos
 
 
 # TODO: (b/265359291) Raise Exception when the brackets are not in pair.
diff --git a/atest/cli_translator_unittest.py b/atest/cli_translator_unittest.py
index 1537761..4b61f58 100755
--- a/atest/cli_translator_unittest.py
+++ b/atest/cli_translator_unittest.py
@@ -232,8 +232,6 @@
                     test_detail2.options,
                     test_info.data[constants.TI_MODULE_ARG])
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_finder.ModuleFinder, 'get_fuzzy_searching_results')
     @mock.patch.object(metrics, 'FindTestFinishEvent')
     @mock.patch.object(test_finder_handler, 'get_find_methods_for_test')
@@ -261,9 +259,11 @@
         host_unit_tests.return_value = False
         self.args.tests = [uc.CLASS_NAME]
         self.args.host_unit_test_only = False
-        targets, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(
-            self, targets, uc.CLASS_BUILD_TARGETS)
+            self,
+            _gather_build_targets(test_infos),
+            uc.CLASS_BUILD_TARGETS)
         unittest_utils.assert_strict_equal(self, test_infos, {uc.CLASS_INFO})
 
     @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
@@ -277,9 +277,11 @@
         host_unit_tests.return_value = []
         self.args.tests = [uc.MODULE_NAME, uc.CLASS_NAME]
         self.args.host_unit_test_only = False
-        targets, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(
-            self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+            self,
+            _gather_build_targets(test_infos),
+            uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
         unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
                                                               uc.CLASS_INFO})
 
@@ -299,9 +301,11 @@
         self.args.host = False
         self.args.host_unit_test_only = False
         host_unit_tests.return_value = False
-        targets, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(
-            self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+            self,
+            _gather_build_targets(test_infos),
+            uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
         unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
                                                               uc.CLASS_INFO})
 
@@ -319,14 +323,17 @@
         self.args.test_mapping = True
         self.args.host = False
         host_unit_tests.return_value = False
-        targets, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(
-            self, targets, uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
+            self,
+            _gather_build_targets(test_infos),
+            uc.MODULE_CLASS_COMBINED_BUILD_TARGETS)
         unittest_utils.assert_strict_equal(self, test_infos, {uc.MODULE_INFO,
                                                               uc.CLASS_INFO})
 
     def test_find_tests_by_test_mapping_presubmit(self):
         """Test _find_tests_by_test_mapping method to locate presubmit tests."""
+        # TODO: (b/264015241) Stop mocking build variables.
         os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
         with mock.patch.dict('os.environ', os_environ_mock, clear=True):
             tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -343,6 +350,7 @@
     def test_find_tests_by_test_mapping_postsubmit(self):
         """Test _find_tests_by_test_mapping method to locate postsubmit tests.
         """
+        # TODO: (b/264015241) Stop mocking build variables.
         os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
         with mock.patch.dict('os.environ', os_environ_mock, clear=True):
             tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -361,6 +369,7 @@
     def test_find_tests_by_test_mapping_all_group(self):
         """Test _find_tests_by_test_mapping method to locate postsubmit tests.
         """
+        # TODO: (b/264015241) Stop mocking build variables.
         os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
         with mock.patch.dict('os.environ', os_environ_mock, clear=True):
             tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -379,6 +388,7 @@
 
     def test_find_tests_by_test_mapping_include_subdir(self):
         """Test _find_tests_by_test_mapping method to include sub directory."""
+        # TODO: (b/264015241) Stop mocking build variables.
         os_environ_mock = {constants.ANDROID_BUILD_TOP: uc.TEST_DATA_DIR}
         with mock.patch.dict('os.environ', os_environ_mock, clear=True):
             tests, all_tests = self.ctr._find_tests_by_test_mapping(
@@ -426,8 +436,6 @@
 
         self.assertEqual(test_mapping_dict, test_mapping_dict_gloden)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_info.ModuleInfo, 'get_testable_modules')
     def test_extract_testable_modules_by_wildcard(self, mock_mods):
         """Test _extract_testable_modules_by_wildcard method."""
@@ -471,7 +479,7 @@
         self.args.tests = []
         self.args.host = False
         self.args.host_unit_test_only = False
-        _, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(self,
                                            test_infos,
                                            {uc.MODULE_INFO,
@@ -499,7 +507,7 @@
         self.args.host = False
         self.args.test_mapping = True
         self.args.host_unit_test_only = False
-        _, test_infos = self.ctr.translate(self.args)
+        test_infos = self.ctr.translate(self.args)
         unittest_utils.assert_strict_equal(
             self,
             test_infos,
@@ -553,3 +561,10 @@
 
 if __name__ == '__main__':
     unittest.main()
+
+
+def _gather_build_targets(test_infos):
+    targets = set()
+    for t_info in test_infos:
+        targets |= t_info.build_targets
+    return targets
diff --git a/atest/coverage/coverage.py b/atest/coverage/coverage.py
index 466fd21..d1b80ee 100644
--- a/atest/coverage/coverage.py
+++ b/atest/coverage/coverage.py
@@ -18,7 +18,7 @@
 import subprocess
 
 from pathlib import Path
-from typing import List
+from typing import List, Set
 
 from atest import atest_utils
 from atest import constants
@@ -79,8 +79,10 @@
         for path in mod_info.get_paths(module):
             module_dir = soong_intermediates.joinpath(path, module)
             # Check for uninstrumented Java class files to report coverage.
-            jacoco_report_jars[module] = module_dir.glob(
-                '*cov*/jacoco-report-classes/*.jar')
+            classfiles = list(
+                module_dir.rglob('jacoco-report-classes/*.jar'))
+            if classfiles:
+                jacoco_report_jars[module] = classfiles
 
             # Check for unstripped native binaries to report coverage.
             unstripped_native_binaries.update(
@@ -101,7 +103,8 @@
 
     for info in test_infos:
         deps.add(info.raw_test_name)
-        deps |= mod_info.get_module_dependency(info.raw_test_name, deps)
+        deps |= _get_transitive_module_deps(
+            mod_info.get_module_info(info.raw_test_name), mod_info, deps)
 
         # Include dependencies of any Mainline modules specified as well.
         if not info.mainline_modules:
@@ -109,7 +112,41 @@
 
         for mainline_module in info.mainline_modules:
             deps.add(mainline_module)
-            deps |= mod_info.get_module_dependency(mainline_module, deps)
+            deps |= _get_transitive_module_deps(
+                mod_info.get_module_info(mainline_module), mod_info, deps)
+
+    return deps
+
+
+def _get_transitive_module_deps(info,
+                                mod_info: module_info.ModuleInfo,
+                                seen: Set[str]) -> Set[str]:
+    """Gets all dependencies of the module, including .impl versions."""
+    deps = set()
+
+    for dep in info.get(constants.MODULE_DEPENDENCIES, []):
+        if dep in seen:
+            continue
+
+        seen.add(dep)
+
+        dep_info = mod_info.get_module_info(dep)
+
+        # Mainline modules sometimes depend on `java_sdk_library` modules that
+        # generate synthetic build modules ending in `.impl` which do not appear
+        # in the ModuleInfo. Strip this suffix to prevent incomplete dependency
+        # information when generating coverage reports.
+        # TODO(olivernguyen): Reconcile this with
+        # ModuleInfo.get_module_dependency(...).
+        if not dep_info:
+            dep = dep.removesuffix('.impl')
+            dep_info = mod_info.get_module_info(dep)
+
+        if not dep_info:
+            continue
+
+        deps.add(dep)
+        deps |= _get_transitive_module_deps(dep_info, mod_info, seen)
 
     return deps
 
@@ -123,12 +160,22 @@
         info = mod_info.get_module_info(module)
         if not info:
             continue
+
+        # Do not report coverage for test modules.
+        if mod_info.is_testable_module(info):
+            continue
+
         src_paths.update(
             os.path.dirname(f) for f in info.get(constants.MODULE_SRCS, []))
 
+    src_paths = {p for p in src_paths if not _is_generated_code(p)}
     return src_paths
 
 
+def _is_generated_code(path):
+    return 'soong/.intermediates' in path
+
+
 def _generate_java_coverage_report(report_jars, src_paths, results_dir,
                                    mod_info):
     build_top = os.environ.get(constants.ANDROID_BUILD_TOP)
@@ -140,9 +187,12 @@
     jacoco_lcov = os.path.join(build_top, jacoco_lcov['installed'][0])
     lcov_reports = []
 
-    for name, report_jar in report_jars.items():
+    for name, classfiles in report_jars.items():
         dest = f'{out_dir}/{name}.info'
-        cmd = [jacoco_lcov, '-o', dest, '-classfiles', str(report_jar)]
+        cmd = [jacoco_lcov, '-o', dest]
+        for classfile in classfiles:
+            cmd.append('-classfiles')
+            cmd.append(str(classfile))
         for src_path in src_paths:
             cmd.append('-sourcepath')
             cmd.append(src_path)
@@ -181,7 +231,7 @@
         # to generate the unstripped binaries, but are stored in the same
         # directory as the actual output binary.
         if not binary.match('*.rsp'):
-            cmd.append(str(binary))
+            cmd.append(f'--object={str(binary)}')
 
     try:
         subprocess.run(cmd, check=True,
diff --git a/atest/logstorage/atest_gcp_utils.py b/atest/logstorage/atest_gcp_utils.py
index bd4d4c1..6202511 100644
--- a/atest/logstorage/atest_gcp_utils.py
+++ b/atest/logstorage/atest_gcp_utils.py
@@ -172,7 +172,8 @@
             flow=flow, storage=storage, flags=flags)
         return credentials
 
-    def _get_sso_access_token(self):
+    @staticmethod
+    def _get_sso_access_token():
         """Use stubby command line to exchange corp sso to a scoped oauth
         token.
 
@@ -244,10 +245,11 @@
         if os.path.exists(not_upload_file):
             os.remove(not_upload_file)
     else:
-        if extra_args.get(constants.DISABLE_UPLOAD_RESULT):
-            if os.path.exists(creds_f):
-                os.remove(creds_f)
-            Path(not_upload_file).touch()
+        # TODO(b/275113186): Change back to default upload after AnTS upload
+        #  extremely slow problem be solved.
+        if os.path.exists(creds_f):
+            os.remove(creds_f)
+        Path(not_upload_file).touch()
 
     # If DO_NOT_UPLOAD not exist, ATest will try to get the credential
     # from the file.
@@ -257,11 +259,12 @@
             client_secret=constants.CLIENT_SECRET,
             user_agent='atest').get_credential_with_auth_flow(creds_f)
 
+    # TODO(b/275113186): Change back the warning message after the bug solved.
     atest_utils.colorful_print(
-        'WARNING: In order to allow uploading local test results to AnTS, it '
-        'is recommended you add the option --request-upload-result. This option'
-        ' only needs to set once and takes effect until --disable-upload-result'
-        ' is set.', constants.YELLOW)
+        'WARNING: AnTS upload disabled by default due to upload slowly'
+        '(b/275113186). If you still want to upload test result to AnTS, '
+        'please add the option --request-upload-result manually.',
+        constants.YELLOW)
     return None
 
 def _prepare_data(creds):
diff --git a/atest/logstorage/atest_gcp_utils_unittest.py b/atest/logstorage/atest_gcp_utils_unittest.py
index 1364715..4cd02b3 100644
--- a/atest/logstorage/atest_gcp_utils_unittest.py
+++ b/atest/logstorage/atest_gcp_utils_unittest.py
@@ -135,5 +135,7 @@
             os.remove(not_upload_file)
 
         atest_gcp_utils.fetch_credential(tmp_folder, dict())
-        self.assertEqual(1, mock_get_credential_with_auth_flow.call_count)
-        self.assertFalse(os.path.exists(not_upload_file))
+        # TODO(b/275113186): Change back to assertEqual 1 and assertFalse after
+        #  switch back to default not upload.
+        self.assertEqual(0, mock_get_credential_with_auth_flow.call_count)
+        self.assertTrue(os.path.exists(not_upload_file))
diff --git a/atest/module_info.py b/atest/module_info.py
index f4c398d..31dfd78 100644
--- a/atest/module_info.py
+++ b/atest/module_info.py
@@ -54,7 +54,12 @@
 class ModuleInfo:
     """Class that offers fast/easy lookup for Module related details."""
 
-    def __init__(self, force_build=False, module_file=None, index_dir=None):
+    def __init__(
+        self,
+        force_build=False,
+        module_file=None,
+        index_dir=None,
+        no_generate=False):
         """Initialize the ModuleInfo object.
 
         Load up the module-info.json file and initialize the helper vars.
@@ -88,6 +93,9 @@
                          module_info file regardless if it's created or not.
             module_file: String of path to file to load up. Used for testing.
             index_dir: String of path to store testable module index and md5.
+            no_generate: Boolean to indicate if we should populate module info
+                         from the soong artifacts; setting to true will
+                         leave module info empty.
         """
         # TODO(b/263199608): Refactor the ModuleInfo constructor.
         # The module-info constructor does too much. We should never be doing
@@ -99,11 +107,13 @@
         # update_merge_info flag will merge dep files only when any of them have
         # changed even force_build == True.
         self.update_merge_info = False
+        self.roboleaf_tests = {}
+
         # Index and checksum files that will be used.
-        with tempfile.TemporaryDirectory() as temp_dir:
-            index_dir = (Path(index_dir)
-                         if index_dir else
-                         Path(temp_dir).joinpath('indexes'))
+        index_dir = (
+            Path(index_dir) if index_dir else
+            Path(os.getenv(constants.ANDROID_HOST_OUT)).joinpath('indexes')
+        )
         if not index_dir.is_dir():
             index_dir.mkdir(parents=True)
         self.module_index = index_dir.joinpath(constants.MODULE_INDEX)
@@ -118,6 +128,11 @@
             os.getenv(constants.ANDROID_PRODUCT_OUT, '')).joinpath(_MERGED_INFO)
 
         self.mod_info_file_path = Path(module_file) if module_file else None
+
+        if no_generate:
+            self.name_to_module_info = {}
+            return
+
         module_info_target, name_to_module_info = self._load_module_info_file(
             module_file)
         self.name_to_module_info = name_to_module_info
@@ -421,64 +436,55 @@
 
     def is_tradefed_testable_module(self, info: Dict[str, Any]) -> bool:
         """Check whether the module is a Tradefed executable test."""
+        if not info:
+            return False
         if not info.get(constants.MODULE_INSTALLED, []):
             return False
-        return bool(info.get(constants.MODULE_TEST_CONFIG, []) or
-                    info.get('auto_test_config', []))
+        return self.has_test_config(info)
 
-    # TODO(b/270106441): Refactor is_testable_module since it's unreliable and
-    # takes too much time for searching test config files under the module
-    # path.
-    def is_testable_module(self, mod_info):
+    def is_testable_module(self, info: Dict[str, Any]) -> bool:
         """Check if module is something we can test.
 
         A module is testable if:
-          - it's installed, or
+          - it's a tradefed testable module, or
           - it's a robolectric module (or shares path with one).
 
         Args:
-            mod_info: Dict of module info to check.
+            info: Dict of module info to check.
 
         Returns:
             True if we can test this module, False otherwise.
         """
-        if not mod_info:
+        if not info:
             return False
-        if all((mod_info.get(constants.MODULE_INSTALLED, []),
-                self.has_test_config(mod_info))):
+        if self.is_tradefed_testable_module(info):
             return True
-        if self.is_robolectric_test(mod_info.get(constants.MODULE_NAME)):
+        if self.is_legacy_robolectric_test(info.get(constants.MODULE_NAME)):
             return True
         return False
 
-    def has_test_config(self, mod_info):
+    def has_test_config(self, info: Dict[str, Any]) -> bool:
         """Validate if this module has a test config.
 
         A module can have a test config in the following manner:
-          - AndroidTest.xml at the module path.
           - test_config be set in module-info.json.
           - Auto-generated config via the auto_test_config key
             in module-info.json.
 
         Args:
-            mod_info: Dict of module info to check.
+            info: Dict of module info to check.
 
         Returns:
             True if this module has a test config, False otherwise.
         """
-        # Check if test_config in module-info is set.
-        for test_config in mod_info.get(constants.MODULE_TEST_CONFIG, []):
-            if os.path.isfile(os.path.join(self.root_dir, test_config)):
-                return True
-        # Check for AndroidTest.xml at the module path.
-        for path in mod_info.get(constants.MODULE_PATH, []):
-            if os.path.isfile(os.path.join(self.root_dir, path,
-                                           constants.MODULE_CONFIG)):
-                return True
-        # Check if the module has an auto-generated config.
-        return self.is_auto_gen_test_config(mod_info.get(constants.MODULE_NAME))
+        return bool(info.get(constants.MODULE_TEST_CONFIG, []) or
+                    info.get('auto_test_config', []))
 
-    def get_robolectric_test_name(self, module_name):
+    def is_legacy_robolectric_test(self, module_name: str) -> bool:
+        """Return whether the module_name is a legacy Robolectric test"""
+        return bool(self.get_robolectric_test_name(module_name))
+
+    def get_robolectric_test_name(self, module_name: str) -> str:
         """Returns runnable robolectric module name.
 
         This method is for legacy robolectric tests and returns one of associated
@@ -496,16 +502,23 @@
             String of the first-matched associated module that belongs to the
             actual robolectric module, None if nothing has been found.
         """
-        module_name_info = self.get_module_info(module_name)
-        if not module_name_info:
-            return None
-        module_paths = module_name_info.get(constants.MODULE_PATH, [])
-        if module_paths:
-            for mod in self.get_module_names(module_paths[0]):
-                mod_info = self.get_module_info(mod)
-                if self.is_robolectric_module(mod_info):
-                    return mod
-        return None
+        info = self.get_module_info(module_name) or {}
+        module_paths = info.get(constants.MODULE_PATH, [])
+        if not module_paths:
+            return ''
+        filtered_module_names = [
+            name
+            for name in self.get_module_names(module_paths[0])
+            if name.startswith("Run")
+        ]
+        return next(
+            (
+                name
+                for name in filtered_module_names
+                if self.is_legacy_robolectric_class(self.get_module_info(name))
+            ),
+            '',
+        )
 
     def is_robolectric_test(self, module_name):
         """Check if the given module is a robolectric test.
@@ -551,28 +564,17 @@
             1: a modern robolectric test(defined in Android.bp)
             2: a legacy robolectric test(defined in Android.mk)
         """
-        not_a_robo_test = 0
-        module_name_info = self.get_module_info(module_name)
-        if not module_name_info:
-            return not_a_robo_test
-        mod_path = module_name_info.get(constants.MODULE_PATH, [])
-        if mod_path:
-            # Check1: If the associated modules are "ROBOLECTRIC".
-            is_a_robotest = False
-            modules_in_path = self.get_module_names(mod_path[0])
-            for mod in modules_in_path:
-                mod_info = self.get_module_info(mod)
-                if self.is_robolectric_module(mod_info):
-                    is_a_robotest = True
-                    break
-            if not is_a_robotest:
-                return not_a_robo_test
-            # Check 2: The `robolectric-test` in the compatibility_suites, call
-            #          it a modern test.
-            if self.is_modern_robolectric_test(module_name_info):
-                return constants.ROBOTYPE_MODERN
+        info = self.get_module_info(module_name)
+        if not info:
+            return 0
+        # Some Modern mode Robolectric test has related module which compliant
+        # with the Legacy Robolectric test. In this case, the Modern mode
+        # Robolectric tests should prior to Legacy mode.
+        if self.is_modern_robolectric_test(info):
+            return constants.ROBOTYPE_MODERN
+        if self.is_legacy_robolectric_test(module_name):
             return constants.ROBOTYPE_LEGACY
-        return not_a_robo_test
+        return 0
 
     def get_instrumentation_target_apps(self, module_name: str) -> Dict:
         """Return target APKs of an instrumentation test.
@@ -682,22 +684,24 @@
             return auto_test_config and auto_test_config[0]
         return False
 
-    def is_robolectric_module(self, mod_info):
-        """Check if a module is a robolectric module.
+    def is_legacy_robolectric_class(self, info: Dict[str, Any]) -> bool:
+        """Check if the class is `ROBOLECTRIC`
 
         This method is for legacy robolectric tests that the associated modules
         contain:
             'class': ['ROBOLECTRIC']
 
         Args:
-            mod_info: ModuleInfo to check.
+            info: ModuleInfo to check.
 
         Returns:
-            True if module is a robolectric module, False otherwise.
+            True if the attribute class in mod_info is ROBOLECTRIC, False
+            otherwise.
         """
-        if mod_info:
-            return (mod_info.get(constants.MODULE_CLASS, [None])[0] ==
-                    constants.MODULE_CLASS_ROBOLECTRIC)
+        if info:
+            module_classes = info.get(constants.MODULE_CLASS, [])
+            return (module_classes and
+                    module_classes[0] == constants.MODULE_CLASS_ROBOLECTRIC)
         return False
 
     def is_native_test(self, module_name):
@@ -819,7 +823,7 @@
         """
         merge_items = [constants.MODULE_DEPENDENCIES, constants.MODULE_SRCS,
                        constants.MODULE_LIBS, constants.MODULE_STATIC_LIBS,
-                       constants.MODULE_STATIC_DEPS]
+                       constants.MODULE_STATIC_DEPS, constants.MODULE_PATH]
         for module_name, dep_info in mod_bp_infos.items():
             mod_info = name_to_module_info.setdefault(module_name, {})
             for merge_item in merge_items:
diff --git a/atest/module_info_unittest.py b/atest/module_info_unittest.py
index 290ba9d..7afe4ad 100755
--- a/atest/module_info_unittest.py
+++ b/atest/module_info_unittest.py
@@ -19,6 +19,7 @@
 # pylint: disable=invalid-name
 # pylint: disable=line-too-long
 # pylint: disable=missing-function-docstring
+# pylint: disable=too-many-lines
 
 import os
 import shutil
@@ -73,12 +74,14 @@
                constants.MODULE_CLASS: ['random_class']}
 NAME_TO_MODULE_INFO = {'random_name' : MODULE_INFO}
 # Mocking path allows str only, use os.path instead of Path.
-BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
+with tempfile.TemporaryDirectory() as temp_dir:
+    BUILD_TOP_DIR = temp_dir
 SOONG_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/soong')
 PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
 HOST_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/host/linux-x86')
 
-#pylint: disable=protected-access
+# TODO: (b/263199608) Suppress too-many-public-methods after refactoring.
+#pylint: disable=protected-access, too-many-public-methods
 class ModuleInfoUnittests(unittest.TestCase):
     """Unit tests for module_info.py"""
 
@@ -98,6 +101,8 @@
         if self.merged_dep_path.is_file():
             os.remove(self.merged_dep_path)
 
+    # TODO: (b/264015241) Stop mocking build variables.
+    # TODO: (b/263199608) Re-write the test after refactoring module-info.py
     @mock.patch.object(module_info.ModuleInfo, 'need_update_merged_file')
     @mock.patch('json.load', return_value={})
     @mock.patch('builtins.open', new_callable=mock.mock_open)
@@ -140,8 +145,6 @@
             self.assertEqual(custom_abs_out_dir_mod_targ,
                              mod_info.module_info_target)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_info.ModuleInfo, '_load_module_info_file')
     def test_get_path_to_module_info(self, mock_load_module):
         """Test that we correctly create the path to module info dict."""
@@ -163,8 +166,6 @@
         self.assertDictEqual(path_to_mod_info,
                              mod_info._get_path_to_module_info(mod_info_dict))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_is_module(self):
         """Test that we get the module when it's properly loaded."""
         # Load up the test json file and check that module is in it
@@ -172,8 +173,6 @@
         self.assertTrue(mod_info.is_module(EXPECTED_MOD_TARGET))
         self.assertFalse(mod_info.is_module(UNEXPECTED_MOD_TARGET))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_get_path(self):
         """Test that we get the module path when it's properly loaded."""
         # Load up the test json file and check that module is in it
@@ -182,8 +181,6 @@
                          EXPECTED_MOD_TARGET_PATH)
         self.assertEqual(mod_info.get_paths(MOD_NO_PATH), [])
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_get_module_names(self):
         """test that we get the module name properly."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -193,8 +190,6 @@
             self, mod_info.get_module_names(PATH_TO_MULT_MODULES),
             MULT_MOODULES_WITH_SHARED_PATH)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_path_to_mod_info(self):
         """test that we get the module name properly."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -205,8 +200,6 @@
         TESTABLE_MODULES_WITH_SHARED_PATH.sort()
         self.assertEqual(module_list, TESTABLE_MODULES_WITH_SHARED_PATH)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_is_suite_in_compatibility_suites(self):
         """Test is_suite_in_compatibility_suites."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -220,6 +213,8 @@
         self.assertTrue(mod_info.is_suite_in_compatibility_suites("vts10", info3))
         self.assertFalse(mod_info.is_suite_in_compatibility_suites("ats", info3))
 
+    # TODO: (b/264015241) Stop mocking build variables.
+    # TODO: (b/263199608) Re-write the test after refactoring module-info.py
     @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
                                     constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR,
                                     constants.ANDROID_HOST_OUT:HOST_OUT_DIR})
@@ -246,104 +241,6 @@
 
     @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
                                     constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
-    @mock.patch.object(module_info.ModuleInfo, 'has_test_config')
-    @mock.patch.object(module_info.ModuleInfo, 'is_robolectric_test')
-    def test_is_testable_module(self, mock_is_robo_test, mock_has_test_config):
-        """Test is_testable_module."""
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        mock_is_robo_test.return_value = False
-        mock_has_test_config.return_value = True
-        installed_module_info = {constants.MODULE_INSTALLED:
-                                 uc.DEFAULT_INSTALL_PATH}
-        non_installed_module_info = {constants.MODULE_NAME: 'rand_name'}
-        # Empty mod_info or a non-installed module.
-        self.assertFalse(mod_info.is_testable_module(non_installed_module_info))
-        self.assertFalse(mod_info.is_testable_module({}))
-        # Testable Module or is a robo module for non-installed module.
-        self.assertTrue(mod_info.is_testable_module(installed_module_info))
-        mock_has_test_config.return_value = False
-        self.assertFalse(mod_info.is_testable_module(installed_module_info))
-        mock_is_robo_test.return_value = True
-        self.assertTrue(mod_info.is_testable_module(non_installed_module_info))
-
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
-    @mock.patch.object(module_info.ModuleInfo, 'is_auto_gen_test_config')
-    def test_has_test_config(self, mock_is_auto_gen):
-        """Test has_test_config."""
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        info = {constants.MODULE_PATH:[uc.TEST_DATA_DIR]}
-        mock_is_auto_gen.return_value = True
-        # Validate we see the config when it's auto-generated.
-        self.assertTrue(mod_info.has_test_config(info))
-        self.assertTrue(mod_info.has_test_config({}))
-        # Validate when actual config exists and there's no auto-generated config.
-        mock_is_auto_gen.return_value = False
-        info = {constants.MODULE_PATH:[uc.TEST_DATA_DIR]}
-        self.assertTrue(mod_info.has_test_config(info))
-        self.assertFalse(mod_info.has_test_config({}))
-        # Validate the case mod_info MODULE_TEST_CONFIG be set
-        info2 = {constants.MODULE_PATH:[uc.TEST_CONFIG_DATA_DIR],
-                 constants.MODULE_TEST_CONFIG:[os.path.join(
-                     uc.TEST_CONFIG_DATA_DIR, "a.xml.data")]}
-        self.assertTrue(mod_info.has_test_config(info2))
-
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
-    @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
-    def test_get_robolectric_test_name(self, mock_get_module_names):
-        """Test get_robolectric_test_name."""
-        # Happy path testing, make sure we get the run robo target.
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        mod_info.name_to_module_info = MOD_NAME_INFO_DICT
-        mod_info.path_to_module_info = MOD_PATH_INFO_DICT
-        mock_get_module_names.return_value = [ASSOCIATED_ROBO_MODULE, ROBO_MODULE]
-        self.assertEqual(mod_info.get_robolectric_test_name(
-            ROBO_MODULE), ASSOCIATED_ROBO_MODULE)
-        # Let's also make sure we don't return anything when we're not supposed
-        # to.
-        mock_get_module_names.return_value = [ROBO_MODULE]
-        self.assertEqual(mod_info.get_robolectric_test_name(
-            ROBO_MODULE), None)
-
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
-    @mock.patch.object(module_info.ModuleInfo, 'is_modern_robolectric_test')
-    @mock.patch.object(module_info.ModuleInfo, 'is_robolectric_module')
-    @mock.patch('os.path.isfile', return_value=False)
-    @mock.patch.object(module_info.ModuleInfo, 'get_module_info')
-    @mock.patch.object(module_info.ModuleInfo, 'get_module_names')
-    def test_get_robolectric_type(self, mock_get_module_names, mock_get_module_info,
-        mock_isfile, mock_is_robo_mod, mock_is_modern_robolectric_test):
-        """Test get_robolectric_type."""
-        # Happy path testing, make sure we get the run robo target.
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        mod_info.name_to_module_info = MOD_NAME_INFO_DICT
-        mod_info.path_to_module_info = MOD_PATH_INFO_DICT
-        mock_isfile.return_value = False
-        mock_get_module_names.return_value = [ASSOCIATED_ROBO_MODULE, ROBO_MODULE]
-        mock_get_module_info.return_value = ASSOCIATED_ROBO_MODULE_INFO
-        mock_is_modern_robolectric_test.return_value = False
-        # Test on an legacy associated robo module.
-        self.assertEqual(
-            mod_info.get_robolectric_type(ASSOCIATED_ROBO_MODULE), constants.ROBOTYPE_LEGACY)
-        # Test on a legacy robo module.
-        self.assertEqual(
-            mod_info.get_robolectric_type(ROBO_MODULE), constants.ROBOTYPE_LEGACY)
-        # Test on a modern robo module.
-        mock_is_modern_robolectric_test.return_value = True
-        self.assertEqual(
-            mod_info.get_robolectric_type(ROBO_MODULE), constants.ROBOTYPE_MODERN)
-        # Two situations that are not a robolectric test:
-        # 1. Not is_robolectric_module:
-        mock_is_robo_mod.return_value = False
-        self.assertEqual(mod_info.get_robolectric_type(ROBO_MODULE), 0)
-        # 2. The path in the mod_info is inexistent.
-        mod_info.path_to_module_info = {'/inexist': ['Foo', 'RunFoo']}
-        self.assertEqual(mod_info.get_robolectric_type(ROBO_MODULE), 0)
-
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_info.ModuleInfo, 'get_robolectric_type')
     def test_is_robolectric_test(self, mock_type):
         """Test is_robolectric_test."""
@@ -355,8 +252,6 @@
         mock_type.return_value = 0
         self.assertFalse(mod_info.is_robolectric_test(ROBO_MODULE))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_info.ModuleInfo, 'is_module')
     def test_is_auto_gen_test_config(self, mock_is_module):
         """Test is_auto_gen_test_config correctly detects the module."""
@@ -375,21 +270,6 @@
         self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME3))
         self.assertFalse(mod_info.is_auto_gen_test_config(MOD_NAME4))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
-    def test_is_robolectric_module(self):
-        """Test is_robolectric_module correctly detects the module."""
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        is_robolectric_module = {'class': ['ROBOLECTRIC']}
-        is_not_robolectric_module = {'class': ['OTHERS']}
-        MOD_INFO_DICT[MOD_NAME1] = is_robolectric_module
-        MOD_INFO_DICT[MOD_NAME2] = is_not_robolectric_module
-        mod_info.name_to_module_info = MOD_INFO_DICT
-        self.assertTrue(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME1]))
-        self.assertFalse(mod_info.is_robolectric_module(MOD_INFO_DICT[MOD_NAME2]))
-
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_merge_build_system_infos(self):
         """Test _merge_build_system_infos."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -403,8 +283,6 @@
             name_to_mod_info['module_1'].get(constants.MODULE_DEPENDENCIES),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_merge_build_system_infos_missing_keys(self):
         """Test _merge_build_system_infos for keys missing from module-info.json."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -416,8 +294,6 @@
             name_to_mod_info['not_in_module_info'].get(constants.MODULE_DEPENDENCIES),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_merge_dependency_with_ori_dependency(self):
         """Test _merge_dependency."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -448,7 +324,7 @@
             instrumentation_for: "AmSlam"
         }"""
         bp_file = os.path.join(uc.TEST_DATA_DIR, 'foo/bar/AmSlam/test/Android.bp')
-        with open(bp_file, 'w') as cache:
+        with open(bp_file, 'w', encoding='utf-8') as cache:
             cache.write(bp_context)
         self.assertEqual(
             mod_info.get_instrumentation_target_apps('AmSlamTests'), artifacts)
@@ -498,8 +374,6 @@
             mod_info.get_filepath_from_module('AmSlamTests', 'AndroidManifest.xml'),
             expected_filepath)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_get_module_dependency(self):
         """Test get_module_dependency."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -511,8 +385,6 @@
             mod_info.get_module_dependency('dep_test_module'),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_get_module_dependency_w_loop(self):
         """Test get_module_dependency with problem dep file."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -527,8 +399,6 @@
             mod_info.get_module_dependency('dep_test_module'),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_get_install_module_dependency(self):
         """Test get_install_module_dependency."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -539,8 +409,6 @@
             mod_info.get_install_module_dependency('dep_test_module'),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_cc_merge_build_system_infos(self):
         """Test _merge_build_system_infos for cc."""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
@@ -554,8 +422,6 @@
             name_to_mod_info['module_cc_1'].get(constants.MODULE_DEPENDENCIES),
             expect_deps)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_is_unit_test(self):
         """Test is_unit_test."""
         module_name = 'myModule'
@@ -565,9 +431,6 @@
                                           index_dir=HOST_OUT_DIR)
         self.assertTrue(mod_info.is_unit_test(maininfo_with_unittest))
 
-    @mock.patch.dict('os.environ',
-                     {constants.ANDROID_BUILD_TOP: '/',
-                      constants.ANDROID_PRODUCT_OUT: PRODUCT_OUT_DIR})
     def test_is_host_unit_test(self):
         """Test is_host_unit_test."""
         module_name = 'myModule'
@@ -584,8 +447,6 @@
 
         self.assertTrue(mod_info.is_host_unit_test(maininfo_with_host_unittest))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_is_device_driven_test(self):
         module_name = 'myModule'
         maininfo_with_device_driven_test = {
@@ -599,8 +460,6 @@
 
         self.assertTrue(mod_info.is_device_driven_test(maininfo_with_device_driven_test))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_not_device_driven_test_when_suite_is_robolectric_test(self):
         module_name = 'myModule'
         maininfo_with_device_driven_test = {
@@ -615,23 +474,32 @@
 
         self.assertFalse(mod_info.is_device_driven_test(maininfo_with_device_driven_test))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_is_host_driven_test(self):
         """Test is_host_driven_test."""
-        module_name = 'myModule'
-        maininfo_with_host_driven_test = {
-            constants.MODULE_NAME: module_name,
+        test_name = 'myModule'
+        expected_host_driven_info  = {
+            constants.MODULE_NAME: test_name,
             constants.MODULE_TEST_CONFIG:[os.path.join(
                 uc.TEST_CONFIG_DATA_DIR, "a.xml.data")],
             constants.MODULE_INSTALLED: uc.DEFAULT_INSTALL_PATH,
             'supported_variants': ['HOST']
         }
+        mod_info = create_module_info([
+            module(
+                name=test_name,
+                test_config=[os.path.join(uc.TEST_CONFIG_DATA_DIR,
+                             "a.xml.data")],
+                installed=uc.DEFAULT_INSTALL_PATH,
+                supported_variants=['HOST']
+            )
+        ])
 
-        mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH)
+        return_value = mod_info.is_host_driven_test(expected_host_driven_info)
 
-        self.assertTrue(mod_info.is_host_driven_test(maininfo_with_host_driven_test))
+        self.assertTrue(return_value)
 
+    # TODO: (b/264015241) Stop mocking build variables.
+    # TODO: (b/263199608) Re-write the test after refactoring module-info.py
     @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:os.path.dirname(__file__),
                                     constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     def test_has_mainline_modules(self):
@@ -650,6 +518,8 @@
         # cannot be found in both 'test_mainline_modules' and 'test_config'.
         self.assertFalse(mod_info.has_mainline_modules(name3, mainline_module2))
 
+    # TODO: (b/264015241) Stop mocking build variables.
+    # TODO: (b/263199608) Re-write the test after refactoring module-info.py
     @mock.patch.dict('os.environ',
                      {constants.ANDROID_BUILD_TOP:os.path.dirname(__file__),
                       constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
@@ -749,7 +619,6 @@
         self.setUpPyfakefs()
 
     # pylint: disable=protected-access
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/'})
     def create_empty_module_info(self):
         fake_temp_file_name = next(tempfile._get_candidate_names())
         self.fs.create_file(fake_temp_file_name, contents='{}')
@@ -761,46 +630,361 @@
 
         for m in modules:
             mod_info.name_to_module_info[m['module_name']] = m
+            for path in m['path']:
+                if path in mod_info.path_to_module_info:
+                    mod_info.path_to_module_info[path].append(m)
+                else:
+                    mod_info.path_to_module_info[path] = [m]
 
         return mod_info
 
 
+class HasTestConfonfigTest(ModuleInfoTestFixture):
+    """Tests has_test_config in various conditions."""
+
+    def test_return_true_if_test_config_is_not_empty(self):
+        test_module_info = module(test_config=['config_file'])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.has_test_config(test_module_info)
+
+        self.assertTrue(return_value)
+
+    def test_return_true_if_auto_test_config_is_not_empty(self):
+        test_module_info = module(auto_test_config=['no_empty'])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.has_test_config(test_module_info)
+
+        self.assertTrue(return_value)
+
+    def test_return_false_if_auto_test_config_and_test_config_empty(self):
+        test_module_info = module(test_config=[],
+                                  auto_test_config=[])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.has_test_config(test_module_info)
+
+        self.assertFalse(return_value)
+
+
 class ModuleInfoCompatibilitySuiteTest(ModuleInfoTestFixture):
     """Tests the compatibility suite in the module info."""
 
     def test_return_true_if_suite_in_test(self):
-        test_module = module(compatibility_suites=['test_suite'])
+        test_module_info = module(compatibility_suites=['test_suite'])
         mod_info = self.create_module_info()
 
         return_value = mod_info.is_suite_in_compatibility_suites(
-            'test_suite', test_module)
+            'test_suite', test_module_info)
 
         self.assertTrue(return_value)
 
     def test_return_false_if_suite_not_in_test(self):
-        test_module = module(compatibility_suites=['no_suite'])
+        test_module_info = module(compatibility_suites=['no_suite'])
         mod_info = self.create_module_info()
 
         return_value = mod_info.is_suite_in_compatibility_suites(
-            'test_suite', test_module)
+            'test_suite', test_module_info)
 
         self.assertFalse(return_value)
 
     def test_return_false_when_mod_info_is_empty(self):
-        test_module = None
+        test_module_info = None
         mod_info = self.create_module_info()
 
         return_value = mod_info.is_suite_in_compatibility_suites(
-            'test_suite', test_module)
+            'test_suite', test_module_info)
 
         self.assertFalse(return_value)
 
     def test_return_false_when_mod_info_is_not_a_dict(self):
-        test_module = ['no_a_dict']
+        test_module_info = ['no_a_dict']
         mod_info = self.create_module_info()
 
         return_value = mod_info.is_suite_in_compatibility_suites(
-            'test_suite', test_module)
+            'test_suite', test_module_info)
+
+        self.assertFalse(return_value)
+
+
+class RobolectricTestNameTest(ModuleInfoTestFixture):
+    """Tests the Robolectric test name in the module info."""
+
+    def test_return_empty_for_a_modern_robolectric_test(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            modern_robolectric_test_module(name=f'{module_name}'),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual('', return_module)
+
+    def test_return_related_robolectric_run_module_name(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            robolectric_class_test_module(name=f'{run_module_name}',
+                                          path=module_path),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual(run_module_name, return_module)
+
+    def test_return_empty_when_no_related_robolectic_class_module(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            test_module(name=f'{run_module_name}',
+                        path=module_path),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual('', return_module)
+
+    def test_return_empty_if_related_module_name_not_start_with_Run(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Not_Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            robolectric_class_test_module(name=f'{run_module_name}',
+                                          path=module_path),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual('', return_module)
+
+    def test_return_itself_for_a_robolectric_class_test_module(self):
+        module_name = 'Run_hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            robolectric_class_test_module(name=f'{module_name}'),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual(module_name, return_module)
+
+    def test_return_empty_if_robolectric_class_module_not_start_with_Run(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            robolectric_class_test_module(name=f'{module_name}'),
+        ])
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual('', return_module)
+
+    def test_return_0_when_no_mod_info(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info()
+
+        return_module = mod_info.get_robolectric_test_name(module_name)
+
+        self.assertEqual('', return_module)
+
+
+class RobolectricTestTypeTest(ModuleInfoTestFixture):
+    """Tests the Robolectric test type in the module info."""
+
+    def test_modern_robolectric_test_type(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            modern_robolectric_test_module(name=f'{module_name}'),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, constants.ROBOTYPE_MODERN)
+
+    def test_return_modern_if_compliant_with_modern_and_legacy(self):
+        module_name = 'hello_world_test'
+        module_path = 'robolectric_path'
+        run_module_name = f'Run{module_name}'
+        mod_info = self.create_module_info(modules=[
+            modern_robolectric_test_module(name=f'{module_name}',
+                        path=module_path),
+            robolectric_class_test_module(name=f'{run_module_name}',
+                                          path=module_path),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, constants.ROBOTYPE_MODERN)
+
+    def test_not_modern_robolectric_test_if_suite_is_not_robolectric(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        compatibility_suites='not_robolectric_tests'),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, 0)
+
+    def test_legacy_robolectric_test_type(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            robolectric_class_test_module(name=f'{run_module_name}',
+                                          path=module_path),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, constants.ROBOTYPE_LEGACY)
+
+    def test_robolectric_class_test_module(self):
+        module_name = 'Run_hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            robolectric_class_test_module(name=f'{module_name}'),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, constants.ROBOTYPE_LEGACY)
+
+    def test_not_robolectric_test_if_module_name_not_start_with_Run(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info(modules=[
+            robolectric_class_test_module(name=f'{module_name}'),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, 0)
+
+    def test_return_0_when_no_related_robolectic_class_module(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            test_module(name=f'{run_module_name}',
+                        path=module_path),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, 0)
+
+    def test_return_0_when_no_related_module_name_start_with_Run(self):
+        module_name = 'hello_world_test'
+        run_module_name = f'Not_Run{module_name}'
+        module_path = 'robolectric_path'
+        mod_info = self.create_module_info(modules=[
+            test_module(name=f'{module_name}',
+                        path=module_path),
+            robolectric_class_test_module(name=f'{run_module_name}',
+                                          path=module_path),
+        ])
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, 0)
+
+    def test_return_0_when_no_mod_info(self):
+        module_name = 'hello_world_test'
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.get_robolectric_type(module_name)
+
+        self.assertEqual(return_value, 0)
+
+
+class IsLegacyRobolectricClassTest(ModuleInfoTestFixture):
+    """Tests is_legacy_robolectric_class in various conditions."""
+
+    def test_return_true_if_module_class_is_robolectric(self):
+        test_module_info = module(classes=[constants.MODULE_CLASS_ROBOLECTRIC])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+        self.assertTrue(return_value)
+
+    def test_return_false_if_module_class_is_not_robolectric(self):
+        test_module_info = module(classes=['not_robolectric'])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+        self.assertFalse(return_value)
+
+    def test_return_false_if_module_class_is_empty(self):
+        test_module_info = module(classes=[])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_legacy_robolectric_class(test_module_info)
+
+        self.assertFalse(return_value)
+
+
+class IsTestableModuleTest(ModuleInfoTestFixture):
+    """Tests is_testable_module in various conditions."""
+
+    def test_return_true_for_tradefed_testable_module(self):
+        info = test_module()
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
+
+        self.assertTrue(return_value)
+
+    def test_return_true_for_modern_robolectric_test_module(self):
+        info = modern_robolectric_test_module()
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
+
+        self.assertTrue(return_value)
+
+    def test_return_true_for_legacy_robolectric_test_module(self):
+        info = legacy_robolectric_test_module()
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
+
+        self.assertTrue(return_value)
+
+    def test_return_false_for_non_tradefed_testable_module(self):
+        info = module(auto_test_config=[], test_config=[],
+                      installed=['installed_path'])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
+
+        self.assertFalse(return_value)
+
+    def test_return_false_for_no_installed_path_module(self):
+        info = module(auto_test_config=['true'], installed=[])
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
+
+        self.assertFalse(return_value)
+
+    def test_return_false_if_module_info_is_empty(self):
+        info = {}
+        mod_info = self.create_module_info()
+
+        return_value = mod_info.is_testable_module(info)
 
         self.assertFalse(return_value)
 
@@ -824,13 +1008,34 @@
     return mod_info
 
 
-# pylint: disable=too-many-arguments
+def test_module(**kwargs):
+    kwargs.setdefault('name', 'hello_world_test')
+    return test(module(**kwargs))
+
+
+def modern_robolectric_test_module(**kwargs):
+    kwargs.setdefault('name', 'hello_world_test')
+    return test(robolectric_tests_suite(module(**kwargs)))
+
+
+def legacy_robolectric_test_module(**kwargs):
+    kwargs.setdefault('name', 'Run_hello_world_test')
+    return test(robolectric_tests_suite(module(**kwargs)))
+
+
+def robolectric_class_test_module(**kwargs):
+    kwargs.setdefault('name', 'hello_world_test')
+    return test(robolectric_class(module(**kwargs)))
+
+
+# pylint: disable=too-many-arguments, too-many-locals
 def module(
     name=None,
     path=None,
     installed=None,
     classes=None,
     auto_test_config=None,
+    test_config=None,
     shared_libs=None,
     dependencies=None,
     runtime_dependencies=None,
@@ -839,17 +1044,19 @@
     compatibility_suites=None,
     host_dependencies=None,
     srcs=None,
+    supported_variants=None
 ):
     name = name or 'libhello'
 
     m = {}
 
     m['module_name'] = name
-    m['class'] = classes
+    m['class'] = classes or ['ETC']
     m['path'] = [path or '']
     m['installed'] = installed or []
     m['is_unit_test'] = 'false'
     m['auto_test_config'] = auto_test_config or []
+    m['test_config'] = test_config or []
     m['shared_libs'] = shared_libs or []
     m['runtime_dependencies'] = runtime_dependencies or []
     m['dependencies'] = dependencies or []
@@ -858,7 +1065,26 @@
     m['compatibility_suites'] = compatibility_suites or []
     m['host_dependencies'] = host_dependencies or []
     m['srcs'] = srcs or []
+    m['supported_variants'] = supported_variants or []
     return m
 
+
+def test(info):
+    info['auto_test_config'] = ['true']
+    info['installed'] = ['installed_path']
+    return info
+
+
+def robolectric_class(info):
+    info['class'] = ['ROBOLECTRIC']
+    return info
+
+
+def robolectric_tests_suite(info):
+    info = test(info)
+    info.setdefault('compatibility_suites', []).append('robolectric-tests')
+    return info
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/atest/proto/file_md5.proto b/atest/proto/file_md5.proto
new file mode 100644
index 0000000..b3be4ae
--- /dev/null
+++ b/atest/proto/file_md5.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+enum RootType {
+  SRC_ROOT = 0;
+  RESOURCE_ROOT = 1;
+  ABS_PATH = 2;
+  PRODUCT_OUT = 3;
+}
+
+message FileChecksum {
+  RootType root_type = 1;
+  string rel_path = 2;
+  string md5sum = 3;
+}
+
+message FileChecksumList {
+  repeated FileChecksum file_checksums = 1;
+}
diff --git a/atest/test_data/test_commands.json b/atest/test_data/test_commands.json
index 6bcad74..5e41c83 100644
--- a/atest/test_data/test_commands.json
+++ b/atest/test_data/test_commands.json
@@ -662,5 +662,25 @@
 "log_saver=template/log/atest_log_saver",
 "template/atest_local_min",
 "test=atest"
+],
+"pts-bot:PAN/GN/MISC/UUID/BV-01-C": [
+"--atest-include-filter",
+"--include-filter",
+"--log-level",
+"--log-level-display",
+"--logcat-on-failure",
+"--no-early-device-release",
+"--no-enable-granular-attempts",
+"--skip-loading-config-jar",
+"--template:map",
+"--template:map",
+"VERBOSE",
+"VERBOSE",
+"atest_tradefed.sh",
+"log_saver=template/log/atest_log_saver",
+"pts-bot",
+"pts-bot:PAN/GN/MISC/UUID/BV-01-C",
+"template/atest_local_min",
+"test=atest"
 ]
 }
\ No newline at end of file
diff --git a/atest/test_finder_handler.py b/atest/test_finder_handler.py
index a565291..94f79d9 100644
--- a/atest/test_finder_handler.py
+++ b/atest/test_finder_handler.py
@@ -22,15 +22,14 @@
 
 import inspect
 import logging
-import sys
 
 from enum import unique, Enum
 
-from atest import atest_enum
 from atest import constants
 
 from atest.test_finders import cache_finder
 from atest.test_finders import test_finder_base
+from atest.test_finders import test_finder_utils
 from atest.test_finders import suite_plan_finder
 from atest.test_finders import tf_integration_finder
 from atest.test_finders import module_finder
@@ -196,11 +195,9 @@
     if '.' in ref:
         ref_end = ref.rsplit('.', 1)[-1]
         ref_end_is_upper = ref_end[0].isupper()
-    if ':' in ref:
-        if ref.count(':') > 1:
-            logging.error('More than 1 colon (:) in the test reference(%s). '
-                          'Please correct it and try again.', ref)
-            sys.exit(atest_enum.ExitCode.INPUT_TEST_REFERENCE_ERROR)
+    # parse_test_reference() will return none empty dictionary if input test
+    # reference match $module:$package_class.
+    if test_finder_utils.parse_test_reference(ref):
         if '.' in ref:
             if ref_end_is_upper:
                 # Module:fully.qualified.Class or Integration:fully.q.Class
diff --git a/atest/test_finders/module_finder.py b/atest/test_finders/module_finder.py
index 4643440..0fd406d 100644
--- a/atest/test_finders/module_finder.py
+++ b/atest/test_finders/module_finder.py
@@ -658,9 +658,15 @@
         Returns:
             A list of populated TestInfo namedtuple if found, else None.
         """
-        if ':' not in module_class:
+        parse_result = test_finder_utils.parse_test_reference(module_class)
+        if not parse_result:
             return None
-        module_name, class_name = module_class.split(':')
+        module_name =  parse_result['module_name']
+        class_name = parse_result['pkg_class_name']
+        method_name = parse_result.get('method_name', '')
+        if method_name:
+            class_name = class_name + '#' + method_name
+
         # module_infos is a list with at most 1 element.
         module_infos = self.find_test_by_module_name(module_name)
         module_info = module_infos[0] if module_infos else None
@@ -736,7 +742,15 @@
         Returns:
             A list of populated TestInfo namedtuple if found, else None.
         """
-        module_name, package = module_package.split(':')
+        parse_result = test_finder_utils.parse_test_reference(module_package)
+        if not parse_result:
+            return None
+        module_name =  parse_result['module_name']
+        package = parse_result['pkg_class_name']
+        method = parse_result.get('method_name', '')
+        if method:
+            package = package + '#' + method
+
         # module_infos is a list with at most 1 element.
         module_infos = self.find_test_by_module_name(module_name)
         module_info = module_infos[0] if module_infos else None
@@ -954,7 +968,8 @@
                         return [tinfo]
         return None
 
-    def _is_comparted_src(self, path):
+    @staticmethod
+    def _is_comparted_src(path):
         """Check if the input path need to match srcs information in module.
 
         If path is a folder or android build file, we don't need to compart
diff --git a/atest/test_finders/module_finder_unittest.py b/atest/test_finders/module_finder_unittest.py
index 67f096d..0ee7c1c 100755
--- a/atest/test_finders/module_finder_unittest.py
+++ b/atest/test_finders/module_finder_unittest.py
@@ -108,11 +108,9 @@
 
     def setUp(self):
         self.setUpPyfakefs()
-        self.build_top = Path('/top')
-        self.out_dir = self.build_top.joinpath('out')
-        self.out_dir.mkdir(parents=True)
-        self.product_out = self.out_dir.joinpath('product')
-        self.host_out = self.out_dir.joinpath('host')
+        self.build_top = Path('/')
+        self.product_out = self.build_top.joinpath('out/product')
+        self.product_out.mkdir(parents=True, exist_ok=True)
         self.module_info_file = self.product_out.joinpath('atest_merged_dep.json')
         self.fs.create_file(
             self.module_info_file,
@@ -128,10 +126,8 @@
                 }''')
             )
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/top',
-                                    constants.ANDROID_HOST_OUT: '/top/hout'})
     @mock.patch('builtins.input', return_value='1')
-    def test_find_test_by_module_name_w_multiple_config(self, _get_arg):
+    def test_find_test_by_module_name_w_multiple_config(self, _):
         """Test find_test_by_module_name (test_config_select)"""
         atest_configs.GLOBAL_ARGS = mock.Mock()
         atest_configs.GLOBAL_ARGS.test_config_select = True
@@ -163,8 +159,6 @@
         unittest_utils.assert_equal_testinfos(self,
             t_infos[0], expected_test_info)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/top',
-                                    constants.ANDROID_HOST_OUT: '/top/hout'})
     def test_find_test_by_module_name_w_multiple_config_all(self):
         """Test find_test_by_module_name."""
         atest_configs.GLOBAL_ARGS = mock.Mock()
@@ -206,6 +200,74 @@
         unittest_utils.assert_equal_testinfos(self,
             t_infos[1], expected_test_info[1])
 
+class ModuleFinderFindTestByPath(fake_filesystem_unittest.TestCase):
+    """Test cases that invoke find_test_by_path."""
+    def setUp(self):
+        self.setUpPyfakefs()
+
+    # pylint: disable=protected-access
+    def create_empty_module_info(self):
+        fake_temp_file_name = next(tempfile._get_candidate_names())
+        self.fs.create_file(fake_temp_file_name, contents='{}')
+        return module_info.ModuleInfo(module_file=fake_temp_file_name)
+
+    def create_module_info(self, modules=None):
+        mod_info = self.create_empty_module_info()
+        modules = modules or []
+
+        for m in modules:
+            mod_info.name_to_module_info[m['module_name']] = m
+            for path in m['path']:
+                if path in mod_info.path_to_module_info:
+                    mod_info.path_to_module_info[path].append(m)
+                else:
+                    mod_info.path_to_module_info[path] = [m]
+
+        return mod_info
+
+    # TODO: remove below mocks and hide unnecessary information.
+    @mock.patch.object(module_finder.ModuleFinder, '_get_test_info_filter')
+    @mock.patch.object(test_finder_utils, 'find_parent_module_dir',
+                       return_value=None)
+    @mock.patch('os.path.exists')
+    #pylint: disable=unused-argument
+    def test_find_test_by_path_belong_to_dependencies(
+            self, _mock_exists, _mock_find_parent, _mock_test_filter):
+        """Test find_test_by_path if belong to test dependencies."""
+        test1 = module(name='test1',
+                       classes=['class'],
+                       dependencies=['lib1'],
+                       installed=['install/test1'],
+                       auto_test_config=[True])
+        test2 = module(name='test2',
+                       classes=['class'],
+                       dependencies=['lib2'],
+                       installed=['install/test2'],
+                       auto_test_config=[True])
+        lib1 = module(name='lib1',
+                      srcs=['path/src1'])
+        lib2 = module(name='lib2',
+                      srcs=['path/src2'])
+        mod_info = self.create_module_info(
+            [test1, test2, lib1, lib2])
+        mod_finder = module_finder.ModuleFinder(module_info=mod_info)
+        _mock_exists.return_value = True
+        test1_filter = test_info.TestFilter('test1Filter', frozenset())
+        _mock_test_filter.return_value = test1_filter
+
+        t_infos = mod_finder.find_test_by_path('path/src1')
+
+        unittest_utils.assert_equal_testinfos(
+            self,
+            test_info.TestInfo(
+                'test1',
+                atf_tr.AtestTradefedTestRunner.NAME,
+                {'test1', 'MODULES-IN-'},
+                {constants.TI_FILTER: test1_filter,
+                 constants.TI_REL_CONFIG: 'AndroidTest.xml'},
+                module_class=['class']),
+            t_infos[0])
+
 #pylint: disable=protected-access
 class ModuleFinderUnittests(unittest.TestCase):
     """Unit tests for module_finder.py"""
@@ -530,6 +592,7 @@
         self.mod_finder.module_info.get_module_info.return_value = mod_info
         self.assertIsNone(self.mod_finder.find_test_by_module_and_package(bad_pkg))
 
+    # TODO: Move and rewite it to ModuleFinderFindTestByPath.
     @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
                        return_value=[])
     @mock.patch.object(test_finder_utils, 'get_cc_class_info', return_value={})
@@ -625,6 +688,7 @@
         unittest_utils.assert_equal_testinfos(
             self, uc.CC_PATH_INFO2, t_infos[0])
 
+    # TODO: Move and rewite it to ModuleFinderFindTestByPath.
     @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets',
                        return_value=copy.deepcopy(uc.MODULE_BUILD_TARGETS))
     @mock.patch.object(module_finder.ModuleFinder, '_is_vts_module',
@@ -668,50 +732,6 @@
         unittest_utils.assert_equal_testinfos(
             self, uc.CC_PATH_INFO, t_infos[0])
 
-    @mock.patch.object(module_finder.ModuleFinder, '_get_build_targets')
-    @mock.patch.object(module_finder.ModuleFinder, '_get_test_info_filter')
-    @mock.patch.object(test_finder_utils, 'find_parent_module_dir',
-                       return_value=None)
-    @mock.patch('os.path.exists')
-    #pylint: disable=unused-argument
-    def test_find_test_by_path_belong_to_dependencies(
-            self, _mock_exists, _mock_find_parent, _mock_test_filter,
-            _build_targets):
-        """Test find_test_by_path if belong to test dependencies."""
-        test1 = module(name='test1',
-                       classes=['class'],
-                       dependencies=['lib1'],
-                       installed=['install/test1'],
-                       auto_test_config=[True])
-        test2 = module(name='test2',
-                       classes=['class'],
-                       dependencies=['lib2'],
-                       installed=['install/test2'],
-                       auto_test_config=[True])
-        lib1 = module(name='lib1',
-                      srcs=['path/src1'])
-        lib2 = module(name='lib2',
-                      srcs=['path/src2'])
-        self.mod_finder.module_info = create_module_info(
-            [test1, test2, lib1, lib2])
-        _mock_exists.return_value = True
-        test1_filter = test_info.TestFilter('test1Filter', frozenset())
-        _mock_test_filter.return_value = test1_filter
-        _build_targets.return_value = {'test1_target'}
-
-        t_infos = self.mod_finder.find_test_by_path('path/src1')
-
-        unittest_utils.assert_equal_testinfos(
-            self,
-            test_info.TestInfo(
-                'test1',
-                atf_tr.AtestTradefedTestRunner.NAME,
-                {'test1_target'},
-                {constants.TI_FILTER: test1_filter,
-                 constants.TI_REL_CONFIG: 'AndroidTest.xml'},
-                module_class=['class']),
-            t_infos[0])
-
     @mock.patch.object(module_finder.test_finder_utils, 'get_cc_class_info')
     @mock.patch.object(test_finder_utils, 'find_host_unit_tests',
                        return_value=[])
@@ -1353,8 +1373,6 @@
             None)
 
 
-@mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP: '/',
-                                constants.ANDROID_HOST_OUT: '/tmp'})
 def create_empty_module_info():
     with fake_filesystem_unittest.Patcher() as patcher:
         # pylint: disable=protected-access
diff --git a/atest/test_finders/test_finder_utils.py b/atest/test_finders/test_finder_utils.py
index 45bbddc..cfaaf3f 100644
--- a/atest/test_finders/test_finder_utils.py
+++ b/atest/test_finders/test_finder_utils.py
@@ -34,6 +34,7 @@
 from contextlib import contextmanager
 from enum import unique, Enum
 from pathlib import Path
+from typing import Any, Dict
 
 from atest import atest_error
 from atest import atest_utils
@@ -581,7 +582,7 @@
             return rel_dir
         # Check module_info if auto_gen config or robo (non-config) here
         for mod in module_info.path_to_module_info.get(rel_dir, []):
-            if module_info.is_robolectric_module(mod):
+            if module_info.is_legacy_robolectric_class(mod):
                 return rel_dir
             for test_config in mod.get(constants.MODULE_TEST_CONFIG, []):
                 # If the test config doesn's exist until it was auto-generated
@@ -1392,3 +1393,22 @@
             return parent.absolute()
         parent = parent.parent
     return build_top
+
+
+def parse_test_reference(test_ref: str) -> Dict[str, str]:
+    """Parse module, class/pkg, and method name from the given test reference.
+
+    The result will be a none empty dictionary only if input test reference
+    match $module:$pkg_class or $module:$pkg_class:$method.
+
+    Args:
+        test_ref: A string of the input test reference from command line.
+
+    Returns:
+        Dict includes module_name, pkg_class_name and method_name.
+    """
+    ref_match = re.match(
+        r'^(?P<module_name>[^:#]+):(?P<pkg_class_name>[^#]+)'
+        r'#?(?P<method_name>.*)$', test_ref)
+
+    return ref_match.groupdict(default=dict()) if ref_match else dict()
diff --git a/atest/test_finders/test_finder_utils_unittest.py b/atest/test_finders/test_finder_utils_unittest.py
index f905006..3b4c9f8 100755
--- a/atest/test_finders/test_finder_utils_unittest.py
+++ b/atest/test_finders/test_finder_utils_unittest.py
@@ -99,6 +99,7 @@
 HOST_OUT_DIR = tempfile.NamedTemporaryFile().name
 
 #pylint: disable=protected-access
+#pylint: disable=too-many-public-methods
 #pylint: disable=unnecessary-comprehension
 class TestFinderUtilsUnittests(unittest.TestCase):
     """Unit tests for test_finder_utils.py"""
@@ -185,6 +186,7 @@
         self.assertFalse(test_finder_utils.has_method_in_file(
             test_path, frozenset(['testMethod'])))
 
+    # TODO: (b/263330492) Stop mocking build environment variables.
     def test_has_method_in_kt_file(self):
         """Test has_method_in_file method with kt class path."""
         test_path = os.path.join(uc.TEST_DATA_DIR, 'class_file_path_testing',
@@ -362,7 +364,7 @@
         """
         abs_class_dir = '/%s' % CLASS_DIR
         mock_module_info = mock.Mock(spec=module_info.ModuleInfo)
-        mock_module_info.is_robolectric_module.return_value = True
+        mock_module_info.is_legacy_robolectric_class.return_value = True
         rel_class_dir_path = os.path.relpath(abs_class_dir, uc.ROOT)
         mock_module_info.path_to_module_info = {rel_class_dir_path: [{}]}
         unittest_utils.assert_strict_equal(
@@ -430,7 +432,6 @@
                                                        mock_module_info),
             VTS_XML_TARGETS)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
     @mock.patch('builtins.input', return_value='0')
     def test_search_integration_dirs(self, mock_input):
         """Test search_integration_dirs."""
@@ -534,7 +535,6 @@
         self.assertTrue(cpp_class in cc_tmp_test_result)
         self.assertTrue(cc_class in cc_tmp_test_result)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
     @mock.patch('builtins.input', return_value='0')
     @mock.patch.object(test_finder_utils, 'get_dir_path_and_filename')
     @mock.patch('os.path.exists', return_value=True)
@@ -596,8 +596,9 @@
         self.assertEqual(test_finder_utils.get_levenshtein_distance(uc.MOD3, uc.FUZZY_MOD3,
                                                                     dir_costs=(1, 2, 1)), 8)
 
-    def test_is_parameterized_java_class(self):
-        """Test is_parameterized_java_class method."""
+    @staticmethod
+    def test_is_parameterized_java_class():
+        """Test is_parameterized_java_class method. """
         matched_contents = (['@RunWith(Parameterized.class)'],
                             [' @RunWith( Parameterized.class ) '],
                             ['@RunWith(TestParameterInjector.class)'],
@@ -699,7 +700,8 @@
         self.assertEqual(package_name,
                          test_finder_utils.get_package_name(target_kt))
 
-    def get_paths_side_effect(self, module_name):
+    @staticmethod
+    def _get_paths_side_effect(module_name):
         """Mock return values for module_info.get_paths."""
         if module_name == UNIT_TEST_MODULE_1:
             return [IT_TEST_MATCHED_1_PATH]
@@ -709,8 +711,6 @@
             return [UNIT_TEST_NOT_MATCHED_1_PATH]
         return []
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(module_info.ModuleInfo, 'get_all_host_unit_tests',
                        return_value=[UNIT_TEST_MODULE_1,
                                      UNIT_TEST_MODULE_2,
@@ -719,7 +719,7 @@
     def test_find_host_unit_tests(self, _get_paths, _mock_get_unit_tests):
         """Test find_host_unit_tests"""
         mod_info = module_info.ModuleInfo(module_file=JSON_FILE_PATH, index_dir=HOST_OUT_DIR)
-        _get_paths.side_effect = self.get_paths_side_effect
+        _get_paths.side_effect = self._get_paths_side_effect
         expect_unit_tests = [UNIT_TEST_MODULE_1, UNIT_TEST_MODULE_2]
         self.assertEqual(
             sorted(expect_unit_tests),
@@ -737,8 +737,6 @@
         expect_methods.sort()
         self.assertEqual(expect_methods, real_methods)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
     def test_get_test_config_use_androidtestxml(self, _isfile):
         """Test get_test_config_and_srcs using default AndroidTest.xml"""
@@ -751,8 +749,6 @@
         result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
         self.assertEqual(expect_config, result)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
     def test_get_test_config_single_config(self, _isfile):
         """Test get_test_config_and_srcs manualy set it's config"""
@@ -765,8 +761,6 @@
         result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
         self.assertEqual(expect_config, result)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
     def test_get_test_config_main_multiple_config(self, _isfile):
         """Test get_test_config_and_srcs which is the main module of multiple config"""
@@ -779,8 +773,6 @@
         result, _ = test_finder_utils.get_test_config_and_srcs(t_info, mod_info)
         self.assertEqual(expect_config, result)
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch('os.path.isfile', side_effect=unittest_utils.isfile_side_effect)
     def test_get_test_config_subtest_in_multiple_config(self, _isfile):
         """Test get_test_config_and_srcs not the main module of multiple config"""
@@ -802,5 +794,110 @@
 
         self.assertEqual(exist, False)
 
+    def test_parse_test_reference_input_module_class_method_match(self):
+        test_module = 'myModule'
+        test_class = 'myClass'
+        test_method = 'myTest::Method'
+        test_ref = f'{test_module}:{test_class}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual(test_method, result['method_name'])
+
+    def test_parse_test_reference_input_module_class_match(self):
+        test_module = 'myModule'
+        test_class = 'myClass'
+        test_ref = f'{test_module}:{test_class}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual('', result.get('method_name', ''))
+
+    def test_parse_test_reference_input_module_class_parameter_method_match(
+            self):
+        test_module = 'myModule'
+        test_class = 'myClass'
+        test_method = 'myTest::Method[0]'
+        test_ref = f'{test_module}:{test_class}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual(test_method, result['method_name'])
+
+    def test_parse_test_reference_input_module_class_multiple_methods_match(
+            self):
+        test_module = 'myModule'
+        test_class = 'myClass'
+        test_method = 'myTest::Method[0],myTest::Method[1]'
+        test_ref = f'{test_module}:{test_class}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual(test_method, result['method_name'])
+
+    def test_parse_test_reference_input_class_method_not_match(
+        self):
+        test_class = 'myClass'
+        test_method = 'myTest::Method'
+        test_ref = f'{test_class}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(result, dict())
+
+    def test_parse_test_reference_input_module_dashed_match(self):
+        test_module = 'my-module'
+        test_class = 'BR/EI/ZH'
+        test_ref = f'{test_module}:{test_class}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+
+    def test_parse_test_reference_input_module_pkg_method_match(self):
+        test_module = 'myModule'
+        test_package = 'my.package'
+        test_method = 'myTest::Method'
+        test_ref = f'{test_module}:{test_package}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_package, result['pkg_class_name'])
+        self.assertEqual(test_method, result['method_name'])
+
+    def test_parse_test_reference_input_plan_class_match(self):
+        test_module = 'my/Module'
+        test_class = 'class'
+        test_ref = f'{test_module}:{test_class}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual('', result.get('method_name', ''))
+
+    def test_parse_test_reference_input_module_parameter_class_and_method_match(
+        self):
+        test_module = 'myModule'
+        test_class = 'myClass/abc0'
+        test_method = 'myTest0/Method[0]'
+        test_ref = f'{test_module}:{test_class}#{test_method}'
+
+        result = test_finder_utils.parse_test_reference(test_ref)
+
+        self.assertEqual(test_module, result['module_name'])
+        self.assertEqual(test_class, result['pkg_class_name'])
+        self.assertEqual(test_method, result['method_name'])
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/atest/test_finders/tf_integration_finder.py b/atest/test_finders/tf_integration_finder.py
index a69aa69..ce5cc4b 100644
--- a/atest/test_finders/tf_integration_finder.py
+++ b/atest/test_finders/tf_integration_finder.py
@@ -173,8 +173,13 @@
             A populated TestInfo namedtuple if test found, else None
         """
         class_name = None
-        if ':' in name:
-            name, class_name = name.split(':')
+        parse_result = test_finder_utils.parse_test_reference(name)
+        if parse_result:
+            name =  parse_result['module_name']
+            class_name = parse_result['pkg_class_name']
+            method = parse_result.get('method_name', '')
+            if method:
+                class_name = class_name + '#' + method
         test_files = self._search_integration_dirs(name)
         if not test_files:
             # Check prebuilt jars if input name is in jars.
diff --git a/atest/test_finders/tf_integration_finder_unittest.py b/atest/test_finders/tf_integration_finder_unittest.py
index 90fb93d..eec7547 100755
--- a/atest/test_finders/tf_integration_finder_unittest.py
+++ b/atest/test_finders/tf_integration_finder_unittest.py
@@ -99,7 +99,6 @@
         self.assertEqual(
             self.tf_finder.find_test_by_integration_name('NotIntName'), [])
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
     @mock.patch.object(tf_integration_finder.TFIntegrationFinder,
                        '_get_build_targets', return_value=set())
     @mock.patch('os.path.realpath',
diff --git a/atest/test_plans/INTEGRATION_TESTS b/atest/test_plans/INTEGRATION_TESTS
index 630302f..3d2a736 100644
--- a/atest/test_plans/INTEGRATION_TESTS
+++ b/atest/test_plans/INTEGRATION_TESTS
@@ -24,6 +24,7 @@
 CtsAnimationTestCases:AnimatorTest
 CtsSampleDeviceTestCases:SampleDeviceTest#testSharedPreferences
 CtsSampleDeviceTestCases:android.sample.cts.SampleDeviceReportLogTest
+pts-bot:PAN/GN/MISC/UUID/BV-01-C
 
 
 ###[Test Finder: QUALIFIED_CLASS, Test Runner:AtestTradefedTestRunner]###
diff --git a/atest/test_runner_handler.py b/atest/test_runner_handler.py
index 58f6085..d569a08 100644
--- a/atest/test_runner_handler.py
+++ b/atest/test_runner_handler.py
@@ -37,6 +37,7 @@
 from atest.metrics import metrics_utils
 from atest.test_finders import test_info
 from atest.test_runners import atest_tf_test_runner
+from atest.test_runners import roboleaf_test_runner
 from atest.test_runners import robolectric_test_runner
 from atest.test_runners import suite_plan_test_runner
 from atest.test_runners import vts_tf_test_runner
@@ -47,6 +48,7 @@
     suite_plan_test_runner.SuitePlanTestRunner.NAME: suite_plan_test_runner.SuitePlanTestRunner,
     vts_tf_test_runner.VtsTradefedTestRunner.NAME: vts_tf_test_runner.VtsTradefedTestRunner,
     bazel_mode.BazelTestRunner.NAME: bazel_mode.BazelTestRunner,
+    roboleaf_test_runner.RoboleafTestRunner.NAME: roboleaf_test_runner.RoboleafTestRunner,
 }
 
 
@@ -112,9 +114,8 @@
         test_runner_build_req |= test_runner(
             unused_result_dir,
             mod_info=mod_info,
-            test_infos=tests,
             extra_args=extra_args or {},
-        ).get_test_runner_build_reqs()
+        ).get_test_runner_build_reqs(tests)
     return test_runner_build_req
 
 
diff --git a/atest/test_runner_handler_unittest.py b/atest/test_runner_handler_unittest.py
index e5df586..7c2430d 100755
--- a/atest/test_runner_handler_unittest.py
+++ b/atest/test_runner_handler_unittest.py
@@ -26,7 +26,6 @@
 from unittest import mock
 
 from atest import atest_error
-from atest import constants
 from atest import module_info
 from atest import test_runner_handler
 from atest import unittest_constants as uc
@@ -51,8 +50,7 @@
 MODULE_INFO_B_AGAIN = test_info.TestInfo(MODULE_NAME_B_AGAIN, FAKE_TR_NAME_B,
                                          set())
 BAD_TESTINFO = test_info.TestInfo('bad_name', MISSING_TR_NAME, set())
-BUILD_TOP_DIR = tempfile.TemporaryDirectory().name
-PRODUCT_OUT_DIR = os.path.join(BUILD_TOP_DIR, 'out/target/product/vsoc_x86_64')
+
 
 class FakeTestRunnerA(tr_base.TestRunnerBase):
     """Fake test runner A."""
@@ -66,7 +64,7 @@
     def host_env_check(self):
         pass
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos):
         return FAKE_TR_A_REQS
 
     def generate_run_commands(self, test_infos, extra_args, port=None):
@@ -81,7 +79,7 @@
     def run_tests(self, test_infos, extra_args, reporter):
         return 1
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos):
         return FAKE_TR_B_REQS
 
 
@@ -126,8 +124,6 @@
             test_runner_handler.get_test_runner_reqs(empty_module_info,
                                                      test_infos))
 
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/',
-                                    constants.ANDROID_PRODUCT_OUT:PRODUCT_OUT_DIR})
     @mock.patch.object(metrics, 'RunnerFinishEvent')
     def test_run_all_tests(self, _mock_runner_finish):
         """Test that the return value as we expected."""
diff --git a/atest/test_runners/atest_tf_test_runner.py b/atest/test_runners/atest_tf_test_runner.py
index 08ad7d8..460f06c 100644
--- a/atest/test_runners/atest_tf_test_runner.py
+++ b/atest/test_runners/atest_tf_test_runner.py
@@ -29,7 +29,7 @@
 
 from functools import partial
 from pathlib import Path
-from typing import Any, List, Tuple
+from typing import Any, Dict, List, Set, Tuple
 
 from atest import atest_configs
 from atest import atest_error
@@ -125,8 +125,10 @@
         super().__init__(results_dir, **kwargs)
         self.module_info = mod_info
         self.log_path = os.path.join(results_dir, LOG_FOLDER_NAME)
-        if not os.path.exists(self.log_path):
-            os.makedirs(self.log_path)
+        # (b/275537997) results_dir could be '' in test_runner_handler; only
+        # mkdir when it is invoked by run_tests.
+        if results_dir:
+            Path(self.log_path).mkdir(parents=True, exist_ok=True)
         log_args = {'log_root_option_name': constants.LOG_ROOT_OPTION_NAME,
                     'log_ext_option': constants.LOG_SAVER_EXT_OPTION,
                     'log_path': self.log_path,
@@ -465,13 +467,16 @@
         root_dir = os.environ.get(constants.ANDROID_BUILD_TOP, '')
         return os.path.commonprefix([output, root_dir]) != root_dir
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
-        build_req = self._BUILD_REQ
+        build_req = self._BUILD_REQ.copy()
         # Use different base build requirements if google-tf is around.
         if self.module_info.is_module(constants.GTF_MODULE):
             build_req = {constants.GTF_TARGET}
@@ -482,8 +487,34 @@
             if self._is_missing_exec(executable):
                 if self.module_info.is_module(executable):
                     build_req.add(executable)
+
+        # Force rebuilt all jars under $ANDROID_HOST_OUT to prevent old version
+        # host jars break the test.
+        build_req |= self._get_host_framework_targets()
+
+        build_req |= trb.gather_build_targets(test_infos)
         return build_req
 
+    def _get_host_framework_targets(self) -> Set[str]:
+        """Get the build targets for all the existing jars under host framework.
+
+        Returns:
+            A set of build target name under $(ANDROID_HOST_OUT)/framework.
+        """
+        host_targets = set()
+        if not self.module_info:
+            return host_targets
+
+        framework_host_dir = Path(
+            os.environ.get(constants.ANDROID_HOST_OUT)).joinpath('framework')
+        if framework_host_dir.is_dir():
+            jars = framework_host_dir.glob('*.jar')
+            for jar in jars:
+                if self.module_info.is_module(jar.stem):
+                    host_targets.add(jar.stem)
+            logging.debug('Found exist host framework target:%s', host_targets)
+        return host_targets
+
     def _parse_extra_args(self, test_infos, extra_args):
         """Convert the extra args into something tf can understand.
 
@@ -591,6 +622,10 @@
         log_level = 'VERBOSE'
         test_args.extend(['--log-level-display', log_level])
         test_args.extend(['--log-level', log_level])
+
+        # TODO(b/275110259) Remove this once TF not going to get bugreport.
+        test_args.extend(['--skip-all-system-status-check=true'])
+
         # Set no-early-device-release by default to speed up TF teardown time.
         if not constants.TF_EARLY_DEVICE_RELEASE in extra_args:
             test_args.extend(['--no-early-device-release'])
@@ -1002,9 +1037,11 @@
     return annotation_filter_args
 
 
-def extra_args_to_tf_args(mod_info: module_info.ModuleInfo,
-                          test_infos: List[test_info.TestInfo],
-                          extra_args: trb.ARGS) -> Tuple[trb.ARGS, trb.ARGS]:
+def extra_args_to_tf_args(
+    mod_info: module_info.ModuleInfo,
+    test_infos: List[test_info.TestInfo],
+    extra_args: Dict[str, Any],
+) -> Tuple[Dict[str, Any], Dict[str, Any]]:
     """Convert the extra args into atest_tf_test_runner supported args.
 
     Args:
diff --git a/atest/test_runners/atest_tf_test_runner_unittest.py b/atest/test_runners/atest_tf_test_runner_unittest.py
index 5ef32eb..26709c0 100755
--- a/atest/test_runners/atest_tf_test_runner_unittest.py
+++ b/atest/test_runners/atest_tf_test_runner_unittest.py
@@ -50,7 +50,8 @@
 METRICS_DIR_ARG = '--metrics-folder %s ' % METRICS_DIR
 # TODO(147567606): Replace {serial} with {extra_args} for general extra
 # arguments testing.
-RUN_CMD_ARGS = ('{metrics}--log-level-display VERBOSE --log-level VERBOSE'
+RUN_CMD_ARGS = ('{metrics}--log-level-display VERBOSE --log-level VERBOSE '
+                '--skip-all-system-status-check=true'
                 '{device_early_release}{serial}')
 LOG_ARGS = atf_tr.AtestTradefedTestRunner._LOG_ARGS.format(
     log_root_option_name=constants.LOG_ROOT_OPTION_NAME,
@@ -193,7 +194,6 @@
 
     #pylint: disable=arguments-differ
     @mock.patch.object(atf_tr.AtestTradefedTestRunner, '_get_ld_library_path')
-    @mock.patch.dict('os.environ', {constants.ANDROID_BUILD_TOP:'/'})
     def setUp(self, mock_get_ld_library_path):
         mock_get_ld_library_path.return_value = RUN_ENV_STR
         self.tr = atf_tr.AtestTradefedTestRunner(results_dir=uc.TEST_INFO_DIR)
diff --git a/atest/test_runners/example_test_runner.py b/atest/test_runners/example_test_runner.py
index 1fe6240..f70a084 100644
--- a/atest/test_runners/example_test_runner.py
+++ b/atest/test_runners/example_test_runner.py
@@ -15,6 +15,9 @@
 """Example test runner class."""
 
 
+from typing import List
+
+from atest.test_finders import test_info
 from atest.test_runners import test_runner_base
 
 
@@ -49,9 +52,12 @@
         """
         pass
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
diff --git a/atest/test_runners/regression_test_runner.py b/atest/test_runners/regression_test_runner.py
index ba03251..c9db3e4 100644
--- a/atest/test_runners/regression_test_runner.py
+++ b/atest/test_runners/regression_test_runner.py
@@ -16,8 +16,11 @@
 Regression Detection test runner class.
 """
 
+from typing import List
+
 from atest import constants
 
+from atest.test_finders import test_info
 from atest.test_runners import test_runner_base
 
 
@@ -63,9 +66,12 @@
         """
         pass
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
diff --git a/atest/test_runners/roboleaf_test_runner.py b/atest/test_runners/roboleaf_test_runner.py
new file mode 100644
index 0000000..bef462a
--- /dev/null
+++ b/atest/test_runners/roboleaf_test_runner.py
@@ -0,0 +1,274 @@
+# Copyright 2023, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Test runner for Roboleaf mode.
+
+This runner is used to run the tests that have been fully converted to Bazel.
+"""
+
+import enum
+import shlex
+import os
+import logging
+import json
+import subprocess
+
+from typing import Any, Dict, List, Set
+
+from atest import atest_utils
+from atest import constants
+from atest import bazel_mode
+from atest import result_reporter
+
+from atest.atest_enum import ExitCode
+from atest.test_finders.test_info import TestInfo
+from atest.test_runners import test_runner_base
+from atest.tools.singleton import Singleton
+
+# Roboleaf maintains allow lists that identify which modules have been
+# fully converted to bazel.  Users of atest can use
+# --roboleaf-mode=[PROD/STAGING/DEV] to filter by these allow lists.
+# PROD (default) is the only mode expected to be fully converted and passing.
+_ALLOW_LIST_PROD_PATH = ('/soong/soong_injection/allowlists/'
+                    'mixed_build_prod_allowlist.txt')
+_ALLOW_LIST_STAGING_PATH = ('/soong/soong_injection/allowlists/'
+                       'mixed_build_staging_allowlist.txt')
+_ROBOLEAF_MODULE_MAP_PATH = ('/soong/soong_injection/metrics/'
+                             'converted_modules_path_map.json')
+_ROBOLEAF_BUILD_CMD = 'build/soong/soong_ui.bash'
+
+
+@enum.unique
+class BazelBuildMode(enum.Enum):
+    "Represents different bp2build allow lists to use whening running bazel (b)"
+    OFF = 'off'
+    DEV = 'dev'
+    STAGING = 'staging'
+    PROD = 'prod'
+
+
+class RoboleafModuleMap(metaclass=Singleton):
+    """Roboleaf Module Map Singleton class."""
+
+    def __init__(self,
+                 module_map_location: str = ''):
+        self._module_map = _generate_map(module_map_location)
+        self.modules_prod = _read_allow_list(_ALLOW_LIST_PROD_PATH)
+        self.modules_staging = _read_allow_list(_ALLOW_LIST_STAGING_PATH)
+
+    def get_map(self) -> Dict[str, str]:
+        """Return converted module map.
+
+        Returns:
+            A dictionary of test names that bazel paths for eligible tests,
+            for example { "test_a": "//platform/test_a" }.
+        """
+        return self._module_map
+
+def _generate_map(module_map_location: str = '') -> Dict[str, str]:
+    """Generate converted module map.
+
+    Args:
+        module_map_location: Path of the module_map_location to check.
+
+    Returns:
+        A dictionary of test names that bazel paths for eligible tests,
+        for example { "test_a": "//platform/test_a" }.
+    """
+    if not module_map_location:
+        module_map_location = (
+            atest_utils.get_build_out_dir() + _ROBOLEAF_MODULE_MAP_PATH)
+
+    # TODO(b/274161649): It is possible it could be stale on first run.
+    # Invoking m or b test will check/recreate this file.  Bug here is
+    # to determine if we can check staleness without a large time penalty.
+    if not os.path.exists(module_map_location):
+        logging.warning('The roboleaf converted modules file: %s was not '
+                        'found.', module_map_location)
+        # Attempt to generate converted modules file.
+        try:
+            cmd = _generate_bp2build_command()
+            env_vars = os.environ.copy()
+            logging.info(
+                'Running `bp2build` to generate converted modules file.'
+                '\n%s', ' '.join(cmd))
+            subprocess.check_call(cmd,  env=env_vars)
+        except subprocess.CalledProcessError as e:
+            logging.error(e)
+            return {}
+
+    with open(module_map_location, 'r', encoding='utf8') as robo_map:
+        return json.load(robo_map)
+
+def _read_allow_list(allow_list_location: str = '') -> List[str]:
+    """Generate a list of modules based on an allow list file.
+    The expected file format is a text file that has a module name on each line.
+    Lines that start with '#' or '//' are considered comments and skipped.
+
+    Args:
+        location: Path of the allow_list file to parse.
+
+    Returns:
+        A list of module names.
+    """
+
+    allow_list_location = (
+            atest_utils.get_build_out_dir() + allow_list_location)
+
+    if not os.path.exists(allow_list_location):
+        logging.error('The roboleaf allow list file: %s was not '
+                        'found.', allow_list_location)
+        return []
+    with open(allow_list_location,  encoding='utf-8') as f:
+        allowed = []
+        for module_name in f.read().splitlines():
+            if module_name.startswith('#') or module_name.startswith('//'):
+                continue
+            allowed.append(module_name)
+        return allowed
+
+def _generate_bp2build_command() -> List[str]:
+    """Build command to run bp2build.
+
+    Returns:
+        A list of commands to run bp2build.
+    """
+    soong_ui = (
+        f'{os.environ.get(constants.ANDROID_BUILD_TOP, os.getcwd())}/'
+        f'{_ROBOLEAF_BUILD_CMD}')
+    return [soong_ui, '--make-mode', 'bp2build']
+
+
+class AbortRunException(Exception):
+    """Roboleaf Abort Run Exception Class."""
+
+
+class RoboleafTestRunner(test_runner_base.TestRunnerBase):
+    """Roboleaf Test Runner class."""
+    NAME = 'RoboleafTestRunner'
+    EXECUTABLE = 'b'
+
+    # pylint: disable=unused-argument
+    def generate_run_commands(self,
+                              test_infos: Set[Any],
+                              extra_args: Dict[str, Any],
+                              port: int = None) -> List[str]:
+        """Generate a list of run commands from TestInfos.
+
+        Args:
+            test_infos: A set of TestInfo instances.
+            extra_args: A Dict of extra args to append.
+            port: Optional. An int of the port number to send events to.
+
+        Returns:
+            A list of run commands to run the tests.
+        """
+        target_patterns = ' '.join(
+            self.test_info_target_label(i) for i in test_infos)
+        bazel_args = bazel_mode.parse_args(test_infos, extra_args, None)
+        bazel_args.append('--config=android')
+        bazel_args.append(
+            '--//build/bazel/rules/tradefed:runmode=host_driven_test'
+        )
+        bazel_args_str = ' '.join(shlex.quote(arg) for arg in bazel_args)
+        command = f'{self.EXECUTABLE} test {target_patterns} {bazel_args_str}'
+        results = [command]
+        logging.info("Roboleaf test runner command:\n"
+                     "\n".join(results))
+        return results
+
+    def test_info_target_label(self, test: TestInfo) -> str:
+        """ Get bazel path of test
+
+        Args:
+            test: An object of TestInfo.
+
+        Returns:
+            The bazel path of the test.
+        """
+        module_map = RoboleafModuleMap().get_map()
+        return f'{module_map[test.test_name]}:{test.test_name}'
+
+    def run_tests(self,
+                  test_infos: List[TestInfo],
+                  extra_args: Dict[str, Any],
+                  reporter: result_reporter.ResultReporter) -> int:
+        """Run the list of test_infos.
+
+        Args:
+            test_infos: List of TestInfo.
+            extra_args: Dict of extra args to add to test run.
+            reporter: An instance of result_reporter.ResultReporter.
+        """
+        reporter.register_unsupported_runner(self.NAME)
+        ret_code = ExitCode.SUCCESS
+        try:
+            run_cmds = self.generate_run_commands(test_infos, extra_args)
+        except AbortRunException as e:
+            atest_utils.colorful_print(f'Stop running test(s): {e}',
+                                       constants.RED)
+            return ExitCode.ERROR
+        for run_cmd in run_cmds:
+            subproc = self.run(run_cmd, output_to_stdout=True)
+            ret_code |= self.wait_for_subprocess(subproc)
+        return ret_code
+
+    def get_test_runner_build_reqs(
+        self,
+        test_infos: List[TestInfo]) -> Set[str]:
+        return set()
+
+    def host_env_check(self) -> None:
+        """Check that host env has everything we need.
+
+        We actually can assume the host env is fine because we have the same
+        requirements that atest has. Update this to check for android env vars
+        if that changes.
+        """
+
+    def roboleaf_eligible_tests(
+        self,
+        mode: BazelBuildMode,
+        module_names: List[str]) -> Dict[str, TestInfo]:
+        """Filter the given module_names to only ones that are currently
+        fully converted with roboleaf (b test) and then filter further by the
+        given allow list specified in BazelBuildMode.
+
+        Args:
+            mode: A BazelBuildMode value to filter by allow list.
+            module_names: A list of module names to check for roboleaf support.
+
+        Returns:
+            A dictionary keyed by test name and value of Roboleaf TestInfo.
+        """
+        if not module_names:
+            return {}
+
+        mod_map = RoboleafModuleMap()
+        supported_modules = set(filter(
+            lambda m: m in mod_map.get_map(), module_names))
+
+
+        if mode == BazelBuildMode.PROD:
+            supported_modules = set(filter(
+            lambda m: m in supported_modules, mod_map.modules_prod))
+        elif mode == BazelBuildMode.STAGING:
+            supported_modules = set(filter(
+            lambda m: m in supported_modules, mod_map.modules_staging))
+
+        return {
+            module: TestInfo(module, RoboleafTestRunner.NAME, set())
+            for module in supported_modules
+        }
diff --git a/atest/test_runners/roboleaf_test_runner_unittest.py b/atest/test_runners/roboleaf_test_runner_unittest.py
new file mode 100644
index 0000000..75fd597
--- /dev/null
+++ b/atest/test_runners/roboleaf_test_runner_unittest.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python3
+#
+# Copyright 2023, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for roboleaf_test_runner."""
+
+import json
+import unittest
+import subprocess
+import logging
+
+from pathlib import Path
+from unittest import mock
+from pyfakefs import fake_filesystem_unittest
+
+from atest import atest_utils
+from atest import unittest_constants
+from atest.test_finders.test_info import TestInfo
+from atest.test_runners import roboleaf_test_runner
+from atest.test_runners.roboleaf_test_runner import RoboleafTestRunner
+from atest.test_runners.roboleaf_test_runner import RoboleafModuleMap
+
+# TODO(b/274706697): Refactor to remove disable=protected-access
+# pylint: disable=protected-access
+class RoboleafTestRunnerUnittests(fake_filesystem_unittest.TestCase):
+    """Unit tests for roboleaf_test_runner.py"""
+    def setUp(self):
+        self.test_runner = RoboleafTestRunner(results_dir='')
+        self.setUpPyfakefs()
+        out_dir = atest_utils.get_build_out_dir()
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ROBOLEAF_MODULE_MAP_PATH,
+            contents="{}")
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ALLOW_LIST_PROD_PATH,
+            contents="")
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ALLOW_LIST_STAGING_PATH,
+            contents="")
+
+    def tearDown(self):
+        RoboleafModuleMap()._module_map = {}
+        mock.patch.stopall()
+
+    def test_read_allow_list(self):
+        """Test _read_allow_list method"""
+        self.fs.create_file(
+            atest_utils.get_build_out_dir()+"allow_list",
+            contents="""test1\ntest2\n#comment1\n//comment2""")
+
+        self.assertEqual(
+            roboleaf_test_runner._read_allow_list("allow_list"),
+            ['test1','test2'])
+
+    def test_roboleaf_eligible_tests_filtering(self):
+        """Test roboleaf_eligible_tests method when _module_map has entries"""
+        RoboleafModuleMap._instances = {}
+
+        self.setUpPyfakefs()
+        out_dir = atest_utils.get_build_out_dir()
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ROBOLEAF_MODULE_MAP_PATH,
+            contents=json.dumps({
+            'test1': "//a",
+            'test2': "//a/b",
+            'test3': "//a/b",
+        }))
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ALLOW_LIST_STAGING_PATH,
+            contents="test1\ntest2")
+        self.fs.create_file(
+            out_dir+roboleaf_test_runner._ALLOW_LIST_PROD_PATH,
+            contents="test1")
+        module_names = [
+            'test1',
+            'test2',
+            'test3',
+            'test4',
+        ]
+
+        eligible_tests = self.test_runner.roboleaf_eligible_tests(
+            roboleaf_test_runner.BazelBuildMode.DEV,
+            module_names)
+
+        self.assertEqual(len(eligible_tests), 3)
+        self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+        self.assertEqual(eligible_tests["test1"].test_runner,
+                         RoboleafTestRunner.NAME)
+        self.assertEqual(eligible_tests["test2"].test_name, 'test2')
+        self.assertEqual(eligible_tests["test2"].test_runner,
+                         RoboleafTestRunner.NAME)
+        self.assertEqual(eligible_tests["test3"].test_name, 'test3')
+        self.assertEqual(eligible_tests["test3"].test_runner,
+                         RoboleafTestRunner.NAME)
+
+        eligible_tests = self.test_runner.roboleaf_eligible_tests(
+            roboleaf_test_runner.BazelBuildMode.STAGING,
+            module_names)
+
+        self.assertEqual(len(eligible_tests), 2)
+        self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+        self.assertEqual(eligible_tests["test1"].test_runner,
+                         RoboleafTestRunner.NAME)
+        self.assertEqual(eligible_tests["test2"].test_name, 'test2')
+        self.assertEqual(eligible_tests["test2"].test_runner,
+                         RoboleafTestRunner.NAME)
+
+        eligible_tests = self.test_runner.roboleaf_eligible_tests(
+            roboleaf_test_runner.BazelBuildMode.PROD,
+            module_names)
+
+        self.assertEqual(len(eligible_tests), 1)
+        self.assertEqual(eligible_tests["test1"].test_name, 'test1')
+        self.assertEqual(eligible_tests["test1"].test_runner,
+                         RoboleafTestRunner.NAME)
+
+    def test_roboleaf_eligible_tests_empty_map(self):
+        """Test roboleaf_eligible_tests method when _module_map is empty"""
+        module_names = [
+            'test1',
+            'test2',
+        ]
+        RoboleafModuleMap()._module_map = {}
+
+        eligible_tests = self.test_runner.roboleaf_eligible_tests(
+            roboleaf_test_runner.BazelBuildMode.DEV,
+            module_names)
+        self.assertEqual(eligible_tests, {})
+
+    def test_generate_bp2build_command(self):
+        """Test generate_bp2build method."""
+        cmd = roboleaf_test_runner._generate_bp2build_command()
+
+        self.assertTrue('build/soong/soong_ui.bash --make-mode bp2build' in
+                        ' '.join(cmd))
+
+    def test_get_map(self):
+        """Test get_map method."""
+        data = {
+            "test1": "//platform/a",
+            "test2": "//platform/b"
+        }
+        RoboleafModuleMap()._module_map = data
+
+        self.assertEqual(RoboleafModuleMap().get_map(), data)
+
+    @mock.patch.object(subprocess, "check_call")
+    def test_generate_map(self, mock_subprocess):
+        """Test test_generate_map method fomr file."""
+        module_map_location = Path(unittest_constants.TEST_DATA_DIR).joinpath(
+            "roboleaf_testing/converted_modules_path_map.json"
+        )
+        self.fs.create_file(
+            module_map_location,
+            contents=json.dumps({
+            "test1": "//platform/a",
+            "test2": "//platform/b"
+        }))
+
+        data = roboleaf_test_runner._generate_map(module_map_location)
+
+        # Expected to not call a subprocess with the roboleaf bp2build
+        # command since file already exists.
+        self.assertEqual(mock_subprocess.called, False)
+        self.assertEqual(data, {
+            "test1": "//platform/a",
+            "test2": "//platform/b"
+        })
+
+    @mock.patch('builtins.open', mock.mock_open(read_data=json.dumps(
+        {"test3": "//a/b"})))
+    @mock.patch.object(subprocess, "check_call")
+    def test_generate_map_with_command(self, mock_subprocess):
+        """Test that _generate_map runs the bp2build command"""
+        module_map_location = Path(unittest_constants.TEST_DATA_DIR).joinpath(
+            "roboleaf_testing/does_not_exist.json"
+        )
+
+        # Disable expected warning log message "converted modules file was not
+        # found." to reduce noise during tests.
+        logging.disable(logging.WARNING)
+        data = roboleaf_test_runner._generate_map(module_map_location)
+        logging.disable(logging.NOTSET)
+
+        self.assertEqual(mock_subprocess.called, True)
+        self.assertEqual(data, {"test3": "//a/b"})
+
+    def test_info_target_label(self):
+        """Test info_target_label method."""
+        RoboleafModuleMap()._module_map = {
+            "test1": "//a",
+        }
+
+        target_label = self.test_runner.test_info_target_label(
+            TestInfo(
+                "test1",
+                RoboleafTestRunner.NAME,
+                set()),
+        )
+
+        self.assertEqual(target_label, "//a:test1")
+
+    def test_generate_run_commands(self):
+        """Test generate_run_commands method."""
+        RoboleafModuleMap()._module_map = {
+            "test1": "//a",
+            "test2": "//b",
+        }
+        test_infos = (
+            TestInfo(
+                "test1",
+                RoboleafTestRunner.NAME,
+                set()),
+            TestInfo(
+                "test2",
+                RoboleafTestRunner.NAME,
+                set()),
+        )
+
+        cmds = self.test_runner.generate_run_commands(test_infos, extra_args={})
+
+        self.assertEqual(len(cmds), 1)
+        self.assertTrue('b test //a:test1 //b:test2' in cmds[0])
+
+    @mock.patch.object(RoboleafTestRunner, 'run')
+    def test_run_tests(self, mock_run):
+        """Test run_tests_raw method."""
+        RoboleafModuleMap()._module_map = {
+            "test1": "//a",
+            "test2": "//b",
+        }
+        test_infos = (
+            TestInfo(
+                "test1",
+                RoboleafTestRunner.NAME,
+                set()),
+            TestInfo(
+                "test2",
+                RoboleafTestRunner.NAME,
+                set()),
+        )
+        extra_args = {}
+        mock_subproc = mock.Mock()
+        mock_run.return_value = mock_subproc
+        mock_subproc.returncode = 0
+        mock_reporter = mock.Mock()
+
+        result = self.test_runner.run_tests(
+            test_infos, extra_args, mock_reporter)
+
+        self.assertEqual(result, 0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/atest/test_runners/robolectric_test_runner.py b/atest/test_runners/robolectric_test_runner.py
index a18548f..2516607 100644
--- a/atest/test_runners/robolectric_test_runner.py
+++ b/atest/test_runners/robolectric_test_runner.py
@@ -30,11 +30,13 @@
 
 from functools import partial
 from pathlib import Path
+from typing import List
 
 from atest import atest_utils
 from atest import constants
 
 from atest.atest_enum import ExitCode
+from atest.test_finders import test_info
 from atest.test_runners import test_runner_base
 from atest.test_runners.event_handler import EventHandler
 
@@ -244,13 +246,18 @@
         """
         pass
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
-        return set()
+        build_targets = set()
+        build_targets |= test_runner_base.gather_build_targets(test_infos)
+        return build_targets
 
     # pylint: disable=unused-argument
     def generate_run_commands(self, test_infos, extra_args, port=None):
diff --git a/atest/test_runners/robolectric_test_runner_unittest.py b/atest/test_runners/robolectric_test_runner_unittest.py
index 013c1c9..4e90699 100755
--- a/atest/test_runners/robolectric_test_runner_unittest.py
+++ b/atest/test_runners/robolectric_test_runner_unittest.py
@@ -19,7 +19,6 @@
 # pylint: disable=line-too-long
 
 import json
-import platform
 import subprocess
 import tempfile
 import unittest
@@ -67,15 +66,20 @@
         """Test _exec_with_robo_polling method."""
         event_name = 'TEST_STARTED'
         event_data = {'className':'SomeClass', 'testName':'SomeTestName'}
-
         json_event_data = json.dumps(event_data)
-        data = '%s %s\n\n' %(event_name, json_event_data)
-        event_file = tempfile.NamedTemporaryFile(delete=True)
-        subprocess.call("echo '%s' -n >> %s" %(data, event_file.name), shell=True)
-        robo_proc = subprocess.Popen("sleep %s" %str(self.polling_time * 2), shell=True)
-        self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
-        calls = [mock.call.process_event(event_name, event_data)]
-        mock_pe.assert_has_calls(calls)
+        data = f'{event_name} {json_event_data}\n\n'
+        with tempfile.NamedTemporaryFile() as event_file:
+            subprocess.run(f"echo '{data}' -n >> {event_file.name}",
+                           shell=True, check=True)
+            robo_proc = subprocess.Popen(
+                f'sleep {str(self.polling_time * 2)}',
+                shell=True
+            )
+
+            self.suite_tr._exec_with_robo_polling(event_file, robo_proc, mock_pe)
+            calls = [mock.call.process_event(event_name, event_data)]
+
+            mock_pe.assert_has_calls(calls)
 
     @mock.patch.object(event_handler.EventHandler, 'process_event')
     def test_exec_with_robo_polling_with_partial_info(self, mock_pe):
@@ -83,21 +87,21 @@
         event_name = 'TEST_STARTED'
         event1 = '{"className":"SomeClass","test'
         event2 = 'Name":"SomeTestName"}\n\n'
-        data1 = '%s %s'%(event_name, event1)
+        data1 = f'{event_name} {event1}'
         data2 = event2
-        event_file = tempfile.NamedTemporaryFile(delete=True)
-        subprocess.Popen("echo -n '%s' >> %s" %(data1, event_file.name), shell=True)
-        robo_proc = subprocess.Popen("echo '%s' >> %s && sleep %s"
-                                     %(data2,
-                                       event_file.name,
-                                       str(self.polling_time*5)),
-                                     shell=True)
-        self.suite_tr. _exec_with_robo_polling(event_file, robo_proc, mock_pe)
-        calls = [mock.call.process_event(event_name,
-                                         json.loads(event1 + event2))]
-        # (b/147569951) subprocessing 'echo'  behaves differently between
-        # linux/darwin. Ensure it is not called in MacOS.
-        if platform.system() == 'Linux':
+        with tempfile.NamedTemporaryFile() as event_file:
+            subprocess.run(f"echo -n '{data1}' >> {event_file.name}",
+                           shell=True, check=True)
+            robo_proc = subprocess.Popen(
+                f"echo '{data2}' >> {event_file.name} && "
+                f"sleep {str(self.polling_time * 5)}",
+                shell=True
+            )
+
+            self.suite_tr._exec_with_robo_polling(event_file, robo_proc, mock_pe)
+            calls = [mock.call.process_event(event_name,
+                                             json.loads(event1 + event2))]
+
             mock_pe.assert_has_calls(calls)
 
     @mock.patch.object(event_handler.EventHandler, 'process_event')
diff --git a/atest/test_runners/suite_plan_test_runner.py b/atest/test_runners/suite_plan_test_runner.py
index 700c3c1..30b9bad 100644
--- a/atest/test_runners/suite_plan_test_runner.py
+++ b/atest/test_runners/suite_plan_test_runner.py
@@ -20,6 +20,8 @@
 import logging
 import os
 
+from typing import List
+
 from atest import atest_utils
 from atest import constants
 
@@ -27,6 +29,7 @@
 from atest.logstorage import atest_gcp_utils
 from atest.logstorage import logstorage_utils
 from atest.metrics import metrics
+from atest.test_finders import test_info
 from atest.test_runners import atest_tf_test_runner
 
 class SuitePlanTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
@@ -42,14 +45,17 @@
                              'test': '',
                              'args': ''}
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
         build_req = set()
-        build_req |= super().get_test_runner_build_reqs()
+        build_req |= super().get_test_runner_build_reqs(test_infos)
         return build_req
 
     def run_tests(self, test_infos, extra_args, reporter):
diff --git a/atest/test_runners/test_runner_base.py b/atest/test_runners/test_runner_base.py
index 1e37d0f..cfddfe0 100644
--- a/atest/test_runners/test_runner_base.py
+++ b/atest/test_runners/test_runner_base.py
@@ -28,11 +28,12 @@
 import os
 
 from collections import namedtuple
-from typing import Any, Dict
+from typing import Any, Dict, List, Set
 
 from atest import atest_error
 from atest import atest_utils
 from atest import constants
+from atest.test_finders import test_info
 
 OLD_OUTPUT_ENV_VAR = 'ATEST_OLD_OUTPUT'
 
@@ -48,8 +49,6 @@
 IGNORED_STATUS = 'IGNORED'
 ERROR_STATUS = 'ERROR'
 
-ARGS = Dict[str, Any]
-
 
 class TestRunnerBase:
     """Base Test Runner class."""
@@ -188,7 +187,7 @@
         """Checks that host env has met requirements."""
         raise NotImplementedError
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Returns a list of build targets required by the test runner."""
         raise NotImplementedError
 
@@ -205,3 +204,21 @@
             A list of run commands to run the tests.
         """
         raise NotImplementedError
+
+
+def gather_build_targets(
+        test_infos: List[test_info.TestInfo]) -> Set[str]:
+    """Gets all build targets for the given tests.
+
+    Args:
+        test_infos: List of TestInfo.
+
+    Returns:
+        Set of build targets.
+    """
+    build_targets = set()
+
+    for t_info in test_infos:
+        build_targets |= t_info.build_targets
+
+    return build_targets
diff --git a/atest/test_runners/vts_tf_test_runner.py b/atest/test_runners/vts_tf_test_runner.py
index b285b41..a909b4d 100644
--- a/atest/test_runners/vts_tf_test_runner.py
+++ b/atest/test_runners/vts_tf_test_runner.py
@@ -19,10 +19,13 @@
 import copy
 import logging
 
+from typing import List
+
 from atest import atest_utils
 from atest import constants
 
 from atest.atest_enum import ExitCode
+from atest.test_finders import test_info
 from atest.test_runners import atest_tf_test_runner
 
 class VtsTradefedTestRunner(atest_tf_test_runner.AtestTradefedTestRunner):
@@ -42,14 +45,17 @@
                              'test': '',
                              'args': ''}
 
-    def get_test_runner_build_reqs(self):
+    def get_test_runner_build_reqs(self, test_infos: List[test_info.TestInfo]):
         """Return the build requirements.
 
+        Args:
+            test_infos: List of TestInfo.
+
         Returns:
             Set of build targets.
         """
         build_req = self._BUILD_REQ
-        build_req |= super().get_test_runner_build_reqs()
+        build_req |= super().get_test_runner_build_reqs(test_infos)
         return build_req
 
     def run_tests(self, test_infos, extra_args, reporter):
diff --git a/atest/tools/atest_tools.py b/atest/tools/atest_tools.py
index df709cf..d92bd3f 100755
--- a/atest/tools/atest_tools.py
+++ b/atest/tools/atest_tools.py
@@ -145,17 +145,24 @@
     # Support scanning bind mounts as well.
     updatedb_cmd.extend(['--prune-bind-mounts', 'no'])
 
-    if not has_command(UPDATEDB):
-        return
     logging.debug('Running updatedb... ')
     try:
         full_env_vars = os.environ.copy()
         logging.debug('Executing: %s', updatedb_cmd)
         if not os.path.isdir(constants.INDEX_DIR):
             os.makedirs(constants.INDEX_DIR)
-        subprocess.call(updatedb_cmd, env=full_env_vars)
+        subprocess.run(updatedb_cmd, env=full_env_vars, check=True)
     except (KeyboardInterrupt, SystemExit):
         logging.error('Process interrupted or failure.')
+    # Delete indexes when plocate.db is locked() or other CalledProcessError.
+    # (b/141588997)
+    except subprocess.CalledProcessError as err:
+        logging.error('Executing %s error.', UPDATEDB)
+        metrics_utils.handle_exc_and_send_exit_event(
+            constants.PLOCATEDB_LOCKED)
+        if err.output:
+            logging.error(err.output)
+        os.remove(output_cache)
 
 
 def _dump_index(dump_file, output, output_re, key, value):
@@ -335,43 +342,47 @@
     """The entrypoint of indexing targets.
 
     Utilise plocate database to index reference types of CLASS, CC_CLASS,
-    PACKAGE and QUALIFIED_CLASS. Testable module for tab completion is also
-    generated in this method.
+    PACKAGE and QUALIFIED_CLASS.
+
+    (b/206886222) The checksum and file size of plocate.db may differ even the
+    src is not changed at all; therefore, it will skip indexing when both
+    conditions are fulfilled:
+      - not undergo `repo sync` before running atest.
+      - file numbers recorded in current and previous plocate.db are the same.
 
     Args:
         output_cache: A file path of the updatedb cache
                       (e.g. /path/to/plocate.db).
     """
-    if not has_command(LOCATE):
-        logging.debug('command %s is unavailable; skip indexing.', LOCATE)
+    unavailable_cmds = [
+        cmd for cmd in [UPDATEDB, LOCATE] if not has_command(cmd)]
+    if unavailable_cmds:
+        logging.debug('command %s is unavailable; skip indexing...',
+                      ' '.join(unavailable_cmds))
         return
-    pre_md5sum = au.md5sum(constants.LOCATE_CACHE)
-    pre_size = sys.maxsize
-    if Path(constants.LOCATE_CACHE).is_file():
-        pre_size = Path(constants.LOCATE_CACHE).stat().st_size
-    try:
-        # Step 0: generate plocate database prior to indexing targets.
-        run_updatedb(SEARCH_TOP, output_cache)
-        # (b/206886222) checksum may be different even the src is not changed.
-        # check filesize as well to tell whether there are src changes or just
-        # metadata changes.
-        if any((pre_md5sum == au.md5sum(constants.LOCATE_CACHE),
-                pre_size == Path(constants.LOCATE_CACHE).stat().st_size)):
-            logging.debug('%s remains the same. Ignore indexing', output_cache)
-            return
-        # Step 1: generate output string for indexing targets when needed.
-        logging.debug('Indexing targets... ')
-        au.run_multi_proc(func=get_java_result, args=[output_cache])
-        au.run_multi_proc(func=get_cc_result, args=[output_cache])
-    # Delete indexes when plocate.db is locked() or other CalledProcessError.
-    # (b/141588997)
-    except subprocess.CalledProcessError as err:
-        logging.error('Executing %s error.', UPDATEDB)
-        metrics_utils.handle_exc_and_send_exit_event(
-            constants.PLOCATEDB_LOCKED)
-        if err.output:
-            logging.error(err.output)
-        _delete_indexes()
+
+    # Get the amount of indexed files.
+    get_num_cmd = f'{LOCATE} -d{output_cache} --count /'
+    ret, pre_number = subprocess.getstatusoutput(get_num_cmd)
+    if ret != 0:
+        logging.debug('Failed to run %s', get_num_cmd)
+        pre_number = sys.maxsize
+
+    run_updatedb(SEARCH_TOP, output_cache)
+    checksum_file = os.path.join(constants.INDEX_DIR, 'repo_sync.md5')
+    repo_syncd = not au.check_md5(checksum_file, missing_ok=False)
+    if repo_syncd:
+        repo_file = Path(SEARCH_TOP).joinpath(
+            '.repo/.repo_fetchtimes.json')
+        au.run_multi_proc(
+            func=au.save_md5,
+            args=[[repo_file], checksum_file])
+    if not repo_syncd and pre_number == subprocess.getoutput(get_num_cmd):
+        logging.debug('%s remains the same. Ignore indexing', output_cache)
+        return
+    logging.debug('Indexing targets... ')
+    au.run_multi_proc(func=get_java_result, args=[output_cache])
+    au.run_multi_proc(func=get_cc_result, args=[output_cache])
 
 
 def acloud_create(report_file, args, no_metrics_notice=True):
@@ -426,8 +437,8 @@
         else:
             A tuple of (None, None)
     """
-    target = os.getenv('TARGET_PRODUCT', "")
-    if not '_cf_' in target:
+    target = os.getenv('TARGET_PRODUCT')
+    if not re.match(r'^(aosp_|)cf_.*', target):
         au.colorful_print(
             f'{target} is not in cuttlefish family; will not create any AVD.',
             constants.RED)
diff --git a/atest/tools/singleton.py b/atest/tools/singleton.py
new file mode 100644
index 0000000..aace379
--- /dev/null
+++ b/atest/tools/singleton.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# Copyright 2023 - The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A meta class for singleton pattern.
+
+ Usage:
+        from atest.tools.singleton import Singleton
+
+        class AClass(BaseClass, metaclass=Singleton):
+            pass
+"""
+
+
+class Singleton(type):
+    """A singleton metaclass that returns the same instance when called."""
+    _instances = {}
+
+    def __call__(cls, *args, **kwds):
+        """Initialize a singleton instance."""
+        if cls not in cls._instances:
+            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwds)
+        return cls._instances[cls]