tools/run_tests: Split unit tests from integration tests

Changes how unit tests are executed. Instead of running all tests on
a selected target (i.e. host or a VM), we will select a platform to test
and run_tests will use a separate target for unit tests than for
integration tests.

e.g. instead of running all tests in the aarch64 vm:

./tools/run_tests --target=vm:aarch64

We can now run

./tools/run_tests --platform=aarch64

to run unit tests via user-space emulation on the host, and only
integration tests on the VM.

This will eventually apply to x86 testing as well, so tests can be
run in an unprivileged environment (e.g. podman).

A new TestOption has been added to mark unit tests that have system
interactions that require them to be run like integration tests in a VM.
These should be fixed.

You can also use --unit-tests and --integration-tests to run just one
type of tests. Running unit tests only takes a few seconds on most
platforms since we can use user-space emulation.

BUG=b:247139912
TEST=./tools/run_tests
./tools/run_tests -p aarch64
./tools/run_tests -p mingw64
./tools/run_tests -p armhf

Change-Id: Icd0c502623f1889906d199e752b3eccb7de76dc0
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/3902688
Reviewed-by: Zihan Chen <zihanchen@google.com>
Commit-Queue: Dennis Kempin <denniskempin@google.com>
diff --git a/tools/impl/test_config.py b/tools/impl/test_config.py
index f1575de..8269a6f 100755
--- a/tools/impl/test_config.py
+++ b/tools/impl/test_config.py
@@ -33,6 +33,12 @@
     # This test needs to be the only one runnning to prevent interference with other tests.
     RUN_EXCLUSIVE = "run_exclusive"
 
+    # This unit test requires special privileges and needs to be run in a test VM like an
+    # integration test.
+    # Note: This flag should be transitory and tests should be refactored to only require
+    # privileges in integration tests.
+    UNIT_AS_INTEGRATION_TEST = "unit_as_integration_test"
+
     # This test needs longer than usual to run.
     LARGE = "large"
 
@@ -74,7 +80,7 @@
 CRATE_OPTIONS: Dict[str, List[TestOption]] = {
     "base": [TestOption.SINGLE_THREADED, TestOption.LARGE],
     "cros_async": [TestOption.LARGE, TestOption.RUN_EXCLUSIVE],
-    "crosvm": [TestOption.SINGLE_THREADED],
+    "crosvm": [TestOption.SINGLE_THREADED, TestOption.UNIT_AS_INTEGRATION_TEST],
     "crosvm_plugin": [
         TestOption.DO_NOT_BUILD_AARCH64,
         TestOption.DO_NOT_BUILD_ARMHF,
@@ -86,6 +92,7 @@
         TestOption.LARGE,
         TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL,
         TestOption.DO_NOT_RUN_ARMHF,
+        TestOption.UNIT_AS_INTEGRATION_TEST,
     ],
     "disk": [TestOption.DO_NOT_RUN_AARCH64, TestOption.DO_NOT_RUN_ARMHF],  # b/202294155
     # FFmpeg 5.0 not available on Debian Bullseye used in container images.
@@ -123,7 +130,7 @@
     ],
     "libvda": [TestOption.DO_NOT_BUILD],  # b/202293971
     "rutabaga_gfx": [TestOption.DO_NOT_BUILD_ARMHF],  # b/210015864
-    "vhost": [TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL],
+    "vhost": [TestOption.DO_NOT_RUN_ON_FOREIGN_KERNEL, TestOption.UNIT_AS_INTEGRATION_TEST],
     "vm_control": [TestOption.DO_NOT_BUILD_ARMHF],  # b/210015864
 }
 
diff --git a/tools/impl/test_runner.py b/tools/impl/test_runner.py
index 3a8cea8..1b420e0 100644
--- a/tools/impl/test_runner.py
+++ b/tools/impl/test_runner.py
@@ -12,7 +12,7 @@
 import sys
 from multiprocessing import Pool
 from pathlib import Path
-from typing import Dict, Iterable, List, NamedTuple
+from typing import Dict, Iterable, List, NamedTuple, Optional
 
 from . import test_target, testvm
 from .common import all_tracked_files
@@ -24,19 +24,19 @@
 
 To build and run all tests locally:
 
-    $ ./tools/run_tests --target=host
+    $ ./tools/run_tests
 
-To cross-compile tests for aarch64 and run them on a built-in VM:
+Unit tests will be executed directly on the host, while integration tests will be executed inside
+a built-in VM.
 
-    $ ./tools/run_tests --target=vm:aarch64
+To cross-compile tests for aarch64, armhf or windows you can use:
 
-The VM will be automatically set up and booted. It will remain running between
-test runs and can be managed with `./tools/aarch64vm`.
+    $ ./tools/run_tests --platform=aarch64
+    $ ./tools/run_tests --platform=armhf
+    $ ./tools/run_tests --platform=mingw64
 
-Tests can also be run on a remote device via SSH. However it is your
-responsiblity that runtime dependencies of crosvm are provided.
-
-    $ ./tools/run_tests --target=ssh:hostname
+The built-in VMs will be automatically set up and booted. They will remain running between
+test runs and can be managed with `./tools/aarch64vm` or `./tools/x86vmm`.
 
 The default test target can be managed with `./tools/set_test_target`
 
@@ -383,34 +383,46 @@
 
 def execute_all(
     executables: List[Executable],
-    target: test_target.TestTarget,
+    unit_test_target: Optional[test_target.TestTarget],
+    integration_test_target: Optional[test_target.TestTarget],
     attempts: int,
     collect_coverage: bool,
 ):
     """Executes all tests in the `executables` list in parallel."""
 
-    def is_exclusive(executable: Executable):
-        return TestOption.RUN_EXCLUSIVE in CRATE_OPTIONS.get(executable.crate_name, [])
+    def is_integration_test(executable: Executable):
+        options = CRATE_OPTIONS.get(executable.crate_name, [])
+        return executable.kind == "test" or TestOption.UNIT_AS_INTEGRATION_TEST in options
 
-    pool_executables = [e for e in executables if not is_exclusive(e)]
-    sys.stdout.write(f"Running {len(pool_executables)} test binaries in parallel on {target}")
-    sys.stdout.flush()
-    with Pool(PARALLELISM) as pool:
-        for result in pool.imap(
-            functools.partial(execute_test, target, attempts, collect_coverage), pool_executables
-        ):
+    unit_tests = [e for e in executables if not is_integration_test(e)]
+    if unit_test_target:
+        sys.stdout.write(f"Running {len(unit_tests)} unit tests on {unit_test_target}")
+        sys.stdout.flush()
+        with Pool(PARALLELISM) as pool:
+            for result in pool.imap(
+                functools.partial(execute_test, unit_test_target, attempts, collect_coverage),
+                unit_tests,
+            ):
+                print_test_progress(result)
+                yield result
+        print()
+    else:
+        print("Not running unit tests as requested.")
+
+    if integration_test_target:
+        integration_tests = [e for e in executables if is_integration_test(e)]
+        sys.stdout.write(
+            f"Running {len(integration_tests)} integration tests on {integration_test_target}"
+        )
+        sys.stdout.flush()
+        for executable in integration_tests:
+            result = execute_test(integration_test_target, attempts, collect_coverage, executable)
             print_test_progress(result)
             yield result
-    print()
+        print()
 
-    exclusive_executables = [e for e in executables if is_exclusive(e)]
-    sys.stdout.write(f"Running {len(exclusive_executables)} test binaries on {target}")
-    sys.stdout.flush()
-    for executable in exclusive_executables:
-        result = execute_test(target, attempts, collect_coverage, executable)
-        print_test_progress(result)
-        yield result
-    print()
+    else:
+        print("Not running integration tests as requested.")
 
 
 def find_crosvm_binary(executables: List[Executable]):
@@ -469,11 +481,12 @@
     )
     parser.add_argument(
         "--target",
-        default="host",
         help="Execute tests on the selected target. See ./tools/set_test_target",
     )
     parser.add_argument(
         "--build-target",
+        "--platform",
+        "-p",
         help=(
             "Override the cargo triple to build. Shorthands are available: (x86_64, armhf, "
             + "aarch64, mingw64, msvc64)."
@@ -487,9 +500,16 @@
         ),
     )
     parser.add_argument(
+        "--clean",
+        action="store_true",
+        help="Clean any compilation artifacts and rebuild test VM.",
+    )
+    parser.add_argument(
         "--build-only",
         action="store_true",
     )
+    parser.add_argument("--unit-tests", action="store_true")
+    parser.add_argument("--integration-tests", action="store_true")
     parser.add_argument(
         "--cov",
         action="store_true",
@@ -545,15 +565,57 @@
     collect_coverage = bool(args.generate_lcov)
     emulator_cmd = args.emulator.split(" ") if args.emulator else None
     build_target = Triple.from_shorthand(args.build_target) if args.build_target else None
-    target = test_target.TestTarget(args.target, build_target, emulator_cmd)
-    print("Test target:", target)
+
+    if args.target:
+        print("Warning: Setting --target for running crosvm tests is deprecated.")
+        print()
+        print("  Use --platform instead to specify which platform to test for. For example:")
+        print("  `./tools/run_tests --platform=aarch64` (or armhf, or mingw64)")
+        print()
+        print("  Using --platform will run unit tests directly on the host and integration tests")
+        print("  in a test VM. This is the behavior used by Luci as well.")
+        print("  Setting --target will force both unit and integration tests to run on the")
+        print("  specified target instead.")
+        target = test_target.TestTarget(args.target, build_target, emulator_cmd)
+        unit_test_target = target
+        integration_test_target = target
+    else:
+        build_target = build_target or Triple.host_default()
+        unit_test_target = test_target.TestTarget("host", build_target)
+        if str(build_target) == "x86_64-unknown-linux-gnu":
+            print("Note: x86 tests are temporarily all run on the host until we improve the")
+            print("      performance of the built-in VM. See http://b/247139912")
+            print("")
+            integration_test_target = unit_test_target
+        elif str(build_target) == "aarch64-unknown-linux-gnu":
+            integration_test_target = test_target.TestTarget("vm:aarch64", build_target)
+        else:
+            # Do not run integration tests in unrecognized scenarios.
+            integration_test_target = None
+
+    if args.unit_tests and not args.integration_tests:
+        integration_test_target = None
+    elif args.integration_tests and not args.unit_tests:
+        unit_test_target = None
+
+    print("Unit Test target:", unit_test_target or "skip")
+    print("Integration Test target:", integration_test_target or "skip")
+
+    main_target = integration_test_target or unit_test_target
+    if not main_target:
+        return
+
+    if args.clean:
+        if main_target.vm:
+            testvm.clean(main_target.vm)
+        subprocess.check_call(["cargo", "clean"])
 
     # Start booting VM while we build
-    if target.vm:
-        testvm.build_if_needed(target.vm)
-        testvm.up(target.vm)
+    if main_target.vm:
+        testvm.build_if_needed(main_target.vm)
+        testvm.up(main_target.vm)
 
-    executables = list(build_all_binaries(target, args.crosvm_direct, collect_coverage))
+    executables = list(build_all_binaries(main_target, args.crosvm_direct, collect_coverage))
 
     if args.build_only:
         print("Not running tests as requested.")
@@ -562,13 +624,15 @@
     # Upload dependencies plus the main crosvm binary for integration tests if the
     # crosvm binary is not excluded from testing.
     crosvm_binary = find_crosvm_binary(executables).binary_path
-    extra_files = [crosvm_binary] if not exclude_crosvm(target.build_triple) else []
+    extra_files = [crosvm_binary] if not exclude_crosvm(main_target.build_triple) else []
 
-    test_target.prepare_target(target, extra_files=extra_files)
+    test_target.prepare_target(main_target, extra_files=extra_files)
 
     # Execute all test binaries
     test_executables = [
-        e for e in executables if e.is_test and should_run_executable(e, target, args.test_names)
+        e
+        for e in executables
+        if e.is_test and should_run_executable(e, main_target, args.test_names)
     ]
 
     all_results: List[ExecutableResults] = []
@@ -576,13 +640,21 @@
         if args.repeat > 1:
             print()
             print(f"Round {i+1}/{args.repeat}:")
-        results = [*execute_all(test_executables, target, args.retry + 1, collect_coverage)]
+        results = [
+            *execute_all(
+                test_executables,
+                unit_test_target,
+                integration_test_target,
+                args.retry + 1,
+                collect_coverage,
+            )
+        ]
         if args.generate_lcov and i == args.repeat - 1:
             generate_lcov(results, crosvm_binary, args.generate_lcov, args.cov)
         all_results.extend(results)
         random.shuffle(test_executables)
 
-    flakes = [r for r in all_results if r.previous_attempts]
+    flakes = [r for r in all_results if r.previous_attempts and r.success]
     if flakes:
         print()
         print(f"There are {len(flakes)} flaky tests")
diff --git a/tools/presubmit b/tools/presubmit
index f2cb731..3ed716b 100755
--- a/tools/presubmit
+++ b/tools/presubmit
@@ -93,20 +93,25 @@
 
 commands=(
     "./tools/health-check"
-    "./tools/run_tests --target=host"
 )
 
 if [ "$ALL" == true ]; then
     commands+=(
-        "$(aarch64_wrapper) ./tools/run_tests --target=vm:aarch64"
-        "$(aarch64_wrapper) ./tools/run_tests --target=vm:aarch64 --build-target=armhf"
-        "./tools/run_tests --target=host --build-target=mingw64 --build-only"
+        "./tools/run_tests"
+        "./tools/run_tests --target=mingw64"
+        "$(aarch64_wrapper) ./tools/run_tests --platform=aarch64"
+        "$(aarch64_wrapper) ./tools/run_tests --platform=armhf"
         "cargo build --verbose --no-default-features"
     )
 elif [ "$QUICK" != true ]; then
     commands+=(
-        "$(aarch64_wrapper) ./tools/run_tests --target=vm:aarch64"
-        "./tools/run_tests --target=host --build-target=mingw64 --build-only"
+        "./tools/run_tests"
+        "$(aarch64_wrapper) ./tools/run_tests --platform=aarch64 --unit-tests"
+        "./tools/run_tests --target=mingw64 --unit-tests"
+    )
+else
+    commands+=(
+        "./tools/run_tests --unit-tests"
     )
 fi