Merge "Add a config for apigen workspace"
diff --git a/bazel.WORKSPACE b/bazel.WORKSPACE
index e6039d1..bf4562b 100644
--- a/bazel.WORKSPACE
+++ b/bazel.WORKSPACE
@@ -65,7 +65,6 @@
 
 register_toolchains(
     "//prebuilts/build-tools:py_toolchain",
-    "//prebuilts/clang/host/linux-x86:all",
 
     # For Starlark Android rules
     "//prebuilts/sdk:android_default_toolchain",
@@ -149,3 +148,7 @@
 )
 
 register_toolchains("@rules_kotlin//toolchains/kotlin_jvm:kt_jvm_toolchain")
+
+load("//prebuilts/clang/host/linux-x86:cc_toolchain_config.bzl", "cc_register_toolchains")
+
+cc_register_toolchains()
diff --git a/ci/incremental_build.py b/ci/incremental_build.py
index 38c3aee..600be7c 100755
--- a/ci/incremental_build.py
+++ b/ci/incremental_build.py
@@ -23,6 +23,7 @@
 import dataclasses
 import datetime
 import functools
+import json
 import logging
 import os
 import re
@@ -35,14 +36,22 @@
 from typing import Callable
 from typing import Final
 from typing import Mapping
+from typing import Optional
+from typing import TypeVar
 
 INDICATOR_FILE: Final[str] = 'build/soong/soong_ui.bash'
-"""This file path is relative to the source tree root"""
+SUMMARY_CSV: Final[str] = 'summary.csv'
+TIP: Final[str] = (
+    f'TIP: For a quick look at key data points in {SUMMARY_CSV} try:\n'
+    '  tail -n +2 summary.csv | \\\n'  # skip the header row
+    '  column -t -s, -J \\\n'  # load as json
+    '    -N "$(head -n 1 summary.csv)" | \\\n'  # first row is the header
+    '  jq -r ".table[] | [.time, .ninja_explains, .logfile] | @tsv"'
+    # display the selected attributes as a table'
+)
 
+# the following variables capture user input, see respective help messages
 repeat_count: int
-"""Repeating a build should be a no-op but that has not been the case.
-This instructs a build to be repeated such that we can detect anomalies"""
-
 log_dir: Path
 
 
@@ -51,34 +60,27 @@
   """Get the path to the root of the Android source tree"""
   logging.debug('Checking if Android source tree root is %s', d)
   if d.parent == d:
-    raise RuntimeError('Unable to find ROOT source directory')
+    sys.exit('Unable to find ROOT source directory, specifically,'
+             f'{INDICATOR_FILE} not found anywhere. '
+             'Try `m nothing` and `repo sync`')
   if d.joinpath(INDICATOR_FILE).is_file():
     logging.info('Android source tree root = %s', d)
     return d
   return get_top(d.parent)
 
 
-def repeat(fn: Callable[[...], None]):
-  """a decorator to repeat a function"""
-
-  @functools.wraps(fn)
-  def wrapped(*args, **kwargs):
-    for i in range(0, 1 + repeat_count):
-      if i > 0:
-        logging.info('Repetition #%d for %s', i, fn.__name__)
-      fn(*args, **kwargs)
-
-  return wrapped
-
-
-@dataclasses.dataclass(frozen=True)
+@dataclasses.dataclass
 class Cuj:
   name: str
-  do_hook: Callable[[], None]
-  undo_hook: Callable[[], None]
+  do: Callable[[], None]
+  undo: Optional[Callable[[], None]]
+
+  def with_prefix(self, prefix: str) -> 'Cuj':
+    self.name = f'{prefix} {self.name}'
+    return self
 
 
-def count_explanations(process_log_file: Path) -> int:
+def _count_explanations(process_log_file: Path) -> int:
   explanations = 0
   pattern = re.compile(
       r'^ninja explain:(?! edge with output .* is a phony output,'
@@ -90,7 +92,52 @@
   return explanations
 
 
-@repeat
+@dataclasses.dataclass
+class PerfInfoOrEvent:
+  """
+  A duck-typed union of `soong_build_metrics.PerfInfo` and
+  `soong_build_bp2build_metrics.Event`
+  """
+  name: str
+  real_time: datetime.timedelta
+  start_time: int
+  description: str = ''  # Bp2BuildMetrics#Event doesn't have description
+
+  def __post_init__(self):
+    if isinstance(self.real_time, int):
+      self.real_time = datetime.timedelta(microseconds=self.real_time / 1000)
+
+
+def read_perf_info(
+    pb_file: Path,
+    proto_file: Path,
+    proto_message: str
+) -> list[PerfInfoOrEvent]:
+  """
+  Loads PerfInfo or Event from the file sorted chronologically
+  Note we are not using protoc-generated classes for simplicity (e.g. dependency
+  on `google.protobuf`)
+  """
+  cmd = (f'printproto --proto2  --raw_protocol_buffer '
+         f'--message={proto_message} '
+         f'--proto="{proto_file}" '
+         '--multiline '
+         '--json --json_accuracy_loss_reaction=ignore '
+         f'"{pb_file}" '
+         '| jq ".. | objects | select(.real_time) | select(.name)" '
+         '| jq -s ". | sort_by(.start_time)"')
+  result = subprocess.check_output(cmd, shell=True, cwd=get_top())
+
+  def parse(d: dict) -> Optional[PerfInfoOrEvent]:
+    fields: set[str] = {f.name for f in dataclasses.fields(PerfInfoOrEvent)}
+    filtered = {k: v for (k, v) in d.items() if k in fields}
+    return PerfInfoOrEvent(**filtered)
+
+  metrics: list[PerfInfoOrEvent] = [parse(d) for d in json.loads(result)]
+
+  return metrics
+
+
 def build(
     options: argparse.Namespace,
     cuj_name: str,
@@ -107,10 +154,19 @@
     ninja_args += ' '
   ninja_args += '-d explain --quiet'
 
-  pattern = re.compile(r'(?:^|\s)-n\b')
-  if not allow_dry_run and pattern.search(ninja_args):
-    logging.warning('ignoring "-n"')
-    ninja_args = pattern.sub('', ninja_args)
+  ninja_dry_run = re.compile(r'(?:^|\s)-n\b')
+
+  soong_ui_ninja_args = os.environ.get('SOONG_UI_NINJA_ARGS') or ''
+  if ninja_dry_run.search(soong_ui_ninja_args):
+    sys.exit('"-n" in SOONG_UI_NINJA_ARGS would not update build.ninja etc')
+
+  if soong_ui_ninja_args != '':
+    soong_ui_ninja_args += ' '
+  soong_ui_ninja_args += '-d explain --quiet'
+
+  if not allow_dry_run and ninja_dry_run.search(ninja_args):
+    logging.warning(f'ignoring "-n" in NINJA_ARGS={ninja_args}')
+    ninja_args = ninja_dry_run.sub('', ninja_args)
 
   targets: Final[list[str]] = options.targets
   is_bazel = targets[0].startswith('//')
@@ -123,34 +179,11 @@
     cmd += '--bazel-mode'
   cmd += ' '.join(targets)
   overrides: Mapping[str, str] = {'NINJA_ARGS': ninja_args,
+                                  'SOONG_UI_NINJA_ARGS': soong_ui_ninja_args,
                                   'TARGET_BUILD_VARIANT': 'userdebug',
                                   'TARGET_PRODUCT': 'aosp_arm64'
                                   }
   env: Mapping[str, str] = {**os.environ, **overrides}
-  process_log_file = get_log_file(cuj_name)
-  with open(process_log_file, 'w') as f:
-    logging.info('see %s', process_log_file)
-    f.write(datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%s\n'))
-    f.write(f'Running:{cmd}\n')
-    env_for_logging = [f'{k}={v}' for (k, v) in env.items()]
-    env_for_logging.sort()
-    env_string = '\n  '.join(env_for_logging)
-    f.write(f'Environment Variables:\n  {env_string}\n\n\n')
-    start = time.perf_counter()
-    p = subprocess.run(cmd,
-                       check=False,
-                       cwd=get_top(),
-                       env=env,
-                       text=True,
-                       shell=True,
-                       stdout=f,
-                       stderr=f)
-    elapsed = datetime.timedelta(seconds=time.perf_counter() - start)
-  # TODO(usta): `shell=False` when build/envsetup.sh needn't be sourced for `b`
-
-  if p.returncode != 0:
-    raise SystemExit(
-        f'subprocess yielded {p.returncode} see {process_log_file}')
 
   build_type: str
   if is_bazel:
@@ -162,48 +195,149 @@
   else:
     build_type = 'soong'
 
-  summary = {
-      'logfile': process_log_file.name,
-      'build type': build_type,
-      'targets': ' '.join(targets),
-      'time': elapsed,
-      'explanations': count_explanations(process_log_file)}
-  with open(log_dir.joinpath('summary.csv'), 'a') as stream:
-    writer = csv.DictWriter(stream, fieldnames=summary.keys())
-    if stream.tell() == 0:  # file's empty
+  env_for_logging = [f'{k}={v}' for (k, v) in env.items()]
+  env_for_logging.sort()
+  env_string = '\n  '.join(env_for_logging)
+
+  for run_number in range(0, repeat_count + 1):
+    process_log_file = _cuj2filename(cuj_name, 'log', run_number)
+    with open(process_log_file, 'w') as f:
+      logging.info('TIP: to see the log:\n  tail -f "%s"', process_log_file)
+      f.write(datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%s\n'))
+      f.write(f'Environment Variables:\n  {env_string}\n\n\n')
+      f.write(f'Running:{cmd}\n')
+      start = time.time_ns()
+      # ^ not time.perf_counter_ns() as we need wall clock time for stat()
+      p = subprocess.run(
+          cmd,
+          check=False,
+          cwd=get_top(),
+          env=env,
+          text=True,
+          shell=True,
+          # TODO(usta): `shell=False` when `source build/envsetup.sh` not needed
+          stdout=f,
+          stderr=f)
+      elapsed = datetime.timedelta(
+        microseconds=(time.time_ns() - start) / 1000)
+
+    if p.returncode != 0:
+      logging.error(
+          f'subprocess yielded {p.returncode} see {process_log_file}')
+
+    _write_summary(
+        start,
+        cuj=cuj_name,
+        run=run_number,
+        build_type=build_type if p.returncode == 0 else f'FAILED {build_type}',
+        targets=' '.join(targets),
+        time=elapsed,
+        ninja_explains=_count_explanations(process_log_file))
+
+
+T = TypeVar('T')
+
+
+def merge(list_a: list[T], list_b: list[T]) -> list[T]:
+  """
+  Merges two lists while maintaining order assuming the two have a
+  consistent ordering, i.e. for any two elements present in both lists,
+  their order is the same in both (i.e. the same element comes first)
+  merge([],[]) -> []
+  merge([],[1,2]) -> [1,2]
+  merge([1, 5, 3], [2, 5, 9, 3]) -> [1, 2, 5, 9, 3]
+  """
+  j = 0
+  acc = []
+  for i in range(0, len(list_a)):
+    if j == len(list_b):
+      acc.extend(list_a[i:])
+      break
+    a = list_a[i]
+    try:
+      k = list_b.index(a, j)
+      acc.extend(list_b[j:k + 1])
+      j = k + 1
+    except ValueError:
+      acc.append(a)
+  acc.extend(list_b[j:])
+  return acc
+
+
+# run with pytest
+def test_merge():
+  assert merge([], []) == []
+  assert merge([1, 2], []) == [1, 2]
+  assert merge([1, 2], [3, 4]) == [1, 2, 3, 4]
+  assert merge([1, 5, 9], [3, 5, 7]) == [1, 3, 5, 9, 7]
+  assert merge([1, 2, 3], [5, 7]) == [1, 2, 3, 5, 7]
+  assert merge([1, 2, 3], [5, 7, 1]) == [5, 7, 1, 2, 3]
+
+
+def _write_summary(start_nanos: int, **row):
+  """
+  Writes the row combined with metrics from `out/soong_metrics`
+  to summary.csv. For `write_summary(time.time_ns(), a = 1, b = 'hi')`, the file
+  content will be:
+    |  a  |  b  | ... | soong/bootstrap | soong_build/soong_build | ...
+    |  1  | hi  | ... | 0:02:07.979398  | 0:01:51.517449          | ...
+  :param row: metadata columns for a row, e.g.
+                 cuj, target(s) built, etc.
+  """
+  headers_for_run: list[str] = [k for k in row]
+
+  pb_file = get_out_dir().joinpath('soong_metrics')
+  if pb_file.exists() and pb_file.stat().st_mtime_ns > start_nanos:
+    for m in read_perf_info(
+        pb_file=pb_file,
+        proto_file=get_top().joinpath(
+            'build/soong/ui/metrics/'
+            'metrics_proto/metrics.proto'),
+        proto_message='soong_build_metrics.MetricsBase'
+    ):
+      key = f'{m.name}/{m.description}'
+      headers_for_run.append(key)
+      row[key] = m.real_time
+
+  pb_file = get_out_dir().joinpath('bp2build_metrics.pb')
+  if pb_file.exists() and pb_file.stat().st_mtime_ns > start_nanos:
+    for m in read_perf_info(
+        pb_file=pb_file,
+        proto_file=get_top().joinpath(
+            'build/soong/ui/metrics/'
+            'bp2build_metrics_proto/bp2build_metrics.proto'),
+        proto_message='soong_build_bp2build_metrics.Bp2BuildMetrics'
+    ):
+      key = f'{m.name}/{m.description}'
+      headers_for_run.append(key)
+      row[key] = m.real_time
+
+  summary_csv = log_dir.joinpath('summary.csv')
+  append_to_file = summary_csv.exists()
+  rows: list[dict[str, any]] = []
+  if append_to_file:
+    with open(summary_csv, mode='r', newline='') as f:
+      reader = csv.DictReader(f)
+      headers_in_summary_csv: list[str] = reader.fieldnames or []
+      if headers_in_summary_csv != headers_for_run:
+        # an example of why the headers would differ: unlike a mixed build,
+        # a legacy build wouldn't have bp2build metrics
+        logging.debug('headers differ:\n%s\n%s',
+                      headers_in_summary_csv, headers_for_run)
+        append_to_file = False  # to be re-written
+        headers_for_run = merge(headers_in_summary_csv, headers_for_run)
+        logging.debug('merged headers:\n%s', headers_for_run)
+        rows = [r for r in reader]  # read current rows to rewrite later
+  rows.append(row)
+  with open(summary_csv, mode='a' if append_to_file else 'w',
+            newline='') as f:
+    writer = csv.DictWriter(f, headers_for_run)
+    if not append_to_file:
       writer.writeheader()
-    writer.writerow(summary)
+    writer.writerows(rows)
 
 
-def touch(p: Path, parents: bool = False):
-  def mtime():
-    logging.debug('mtime(%s)= %s', p,
-                  datetime.datetime.fromtimestamp(p.stat().st_mtime))
-
-  if not p.parent.exists():
-    if parents:
-      p.parent.mkdir(parents=True, exist_ok=True)
-    else:
-      raise SystemExit(f'Directory does not exist: {p.parent}')
-  if p.exists():
-    mtime()
-  p.touch()
-  mtime()
-
-
-def create_c_file(f: Path, parents: bool = False):
-  touch(f, parents)
-  with open(f, 'w') as f:
-    f.write('''
-#include <stdio.h>
-int main(){
-  printf("Hello World");
-  return 0;
-}
-''')
-
-
-def validate_int_in_range(lo: int, hi: int) -> Callable[[str], int]:
+def _validate_int_in_range(lo: int, hi: int) -> Callable[[str], int]:
   def validate(i: str) -> int:
     if lo <= int(i) <= hi:
       return int(i)
@@ -233,54 +367,111 @@
   return Path(out_dir) if out_dir else get_top().joinpath('out')
 
 
-def get_log_file(cuj_name: str, suffix: int = 0) -> Path:
+def _cuj2filename(cuj_name: str, extension: str, suffix: int) -> Path:
   """
   Creates a file for logging output for the given cuj_name. A numeric
-  subscript is appended to the filename to distinguish different runs.
+  suffix is appended to the filename to distinguish different runs.
   """
-  f = log_dir.joinpath(f'{cuj_name.replace(" ", "_")}_{suffix}.log')
+  f = log_dir.joinpath(
+      f'{cuj_name.replace("/", "__")}-{suffix}.{extension}')
   f.parent.mkdir(parents=True, exist_ok=True)
   if f.exists():
-    return get_log_file(cuj_name, suffix + 1)
+    return _cuj2filename(cuj_name, extension, suffix + 1)
   f.touch(exist_ok=False)
   return f
 
 
+def touch_file(p: Path, parents: bool = False):
+  """
+  Used as an approximation for file edits in CUJs.
+  This works because Ninja determines freshness based on Modify timestamp.
+  :param p: file to be `touch`-ed
+  :param parents: if true, create the parent directories as needed
+  """
+
+  def mtime():
+    logging.debug('mtime(%s)= %s', p,
+                  datetime.datetime.fromtimestamp(p.stat().st_mtime))
+
+  if not p.parent.exists():
+    if parents:
+      p.parent.mkdir(parents=True, exist_ok=True)
+    else:
+      raise SystemExit(f'Directory does not exist: {p.parent}')
+  if p.exists():
+    mtime()
+  p.touch()
+  mtime()
+
+
 @functools.cache
-def get_cujs() -> list[Cuj]:
-  def noop():
-    logging.debug('do nothing')
+def _get_cujs() -> list[Cuj]:
+  def touch(p: str):
+    return Cuj(name=f'touch {p}',
+               do=lambda: touch_file(get_top().joinpath(p)),
+               undo=None)
 
-  unreferenced = get_top().joinpath(
-      'bionic/libc/arch-common/bionic/unreferenced-test-file.c')
-  unreferenced_in_unreferenced = get_top().joinpath(
-      'unreferenced/unreferenced-test-file.c')
-  return [Cuj('noop', noop, noop), Cuj(
-      'touch root Android.bp',
-      do_hook=lambda: touch(get_top().joinpath('Android.bp')),
-      undo_hook=noop
-  ), Cuj(
-      'new empty Android.bp',
-      do_hook=lambda: touch(get_top().joinpath('some_directory/Android.bp'),
-                            parents=True),
-      undo_hook=lambda: shutil.rmtree(get_top().joinpath('some_directory'))
-  ), Cuj(
-      'new unreferenced c file',
-      do_hook=lambda: create_c_file(unreferenced),
-      undo_hook=unreferenced.unlink
-  ), Cuj(
-      'new unreferenced c file in unreferenced dir',
-      do_hook=lambda: create_c_file(unreferenced_in_unreferenced, parents=True),
-      undo_hook=unreferenced_in_unreferenced.unlink
-  ), Cuj(
-      'touch AndroidManifest.xml',
-      do_hook=lambda: touch(
-          get_top().joinpath('packages/apps/DevCamera/AndroidManifest.xml')),
-      undo_hook=noop
-  )]
+  def new(p: str, content: Optional[str] = None):
+    file = Path(p)
+    if file.is_absolute():
+      raise SystemExit(f'expected relative paths: {p}')
+    file = get_top().joinpath(file)
+    if file.exists():
+      raise SystemExit(
+          f'File {p} already exists, probably due to an interrupted earlier run'
+          f'of {__file__}, TIP: `repo status` and revert changes!!!')
+    missing_dirs = [f for f in file.parents if
+                    not f.exists() and f.relative_to(get_top())]
+    shallowest_missing_dir = missing_dirs[-1] if len(missing_dirs) else None
+
+    def do():
+      touch_file(file, parents=True)
+      if content:
+        with open(file, mode="w") as f:
+          f.write(content)
+
+    def undo():
+      if shallowest_missing_dir:
+        shutil.rmtree(shallowest_missing_dir)
+      else:
+        file.unlink(missing_ok=False)
+
+    return Cuj(name=f'new {p}', do=do, undo=undo)
+
+  def delete_create(p: str):
+    original = get_top().joinpath(p)
+    copied = get_out_dir().joinpath(f'{original.name}.bak')
+
+    return Cuj(name=f'delete and create {p}',
+               do=lambda: original.rename(copied),
+               undo=lambda: copied.rename(original))
+
+  return [
+      Cuj(name='noop',
+          do=lambda: logging.debug('d nothing'),
+          undo=None),
+      touch('Android.bp'),
+      new('some_directory/Android.bp', '// empty test file'),
+      new('unreferenced/unreferenced-file.c', '''
+          #include <stdio.h>
+          int main(){
+            printf("Hello World");
+            return 0;
+          }
+        '''),
+      new('bionic/libc/arch-common/bionic/unreferenced.c'),
+      touch('bionic/libc/bionic/icu.cpp'),
+      delete_create('bionic/libc/bionic/icu.cpp'),
+      touch('libcore/benchmarks/src/benchmarks/Foo.java').with_prefix(
+          'globbed'),
+      delete_create('libcore/benchmarks/src/benchmarks/Foo.java').with_prefix(
+          'globbed'),
+      touch('art/artd/tests/AndroidManifest.xml'),
+      delete_create('art/artd/tests/AndroidManifest.xml'),
+  ]
 
 
-def get_user_input(cujs: list[Cuj]) -> argparse.Namespace:
+def _get_user_input(cujs: list[Cuj]) -> argparse.Namespace:
   p = argparse.ArgumentParser(
       formatter_class=argparse.RawTextHelpFormatter,
       description='' +
@@ -289,26 +480,30 @@
 
   cuj_list = '\n'.join([f'{i}: {cuj.name}' for i, cuj in enumerate(cujs)])
   p.add_argument('-c', '--cujs', nargs='*',
-                 type=validate_int_in_range(0, len(cujs) - 1),
+                 type=_validate_int_in_range(0, len(cujs) - 1),
                  help=f'The index number(s) for the CUJ(s):\n{cuj_list}')
 
-  p.add_argument('-r', '--repeat', type=validate_int_in_range(0, 10), default=1,
+  p.add_argument('-r', '--repeat', type=_validate_int_in_range(0, 10),
+                 default=1,
                  help='The number of times to repeat the build invocation. '
-                      'If 0, then will not repeat (i.e. do exactly once). '
-                      'Defaults to %(default)d')
+                      'If 0, do not repeat (i.e. do exactly once). '
+                      'Defaults to %(default)d\n'
+                      'TIP: Repetitions should ideally be null builds.')
 
   log_levels = dict(getattr(logging, '_levelToName')).values()
   p.add_argument('-v', '--verbosity', choices=log_levels, default='INFO',
-                 help='Log level, defaults to %(default)s')
+                 help='Log level, defaults to %(default)s\n'
+                      'TIP: specify a directory outside of the source tree')
 
   p.add_argument('-l', '--log-dir', type=str, default=None,
-                 help='Directory to collect logs in, '
-                      'defaults to $OUT_DIR/timing_logs. '
-                      'There is also a summary.csv file generated there.\n'
-                      'Try `cat summary.csv | column -t -s,` to view it.')
+                 help='Directory to collect logs in, defaults to '
+                      f'$OUT_DIR/timing_logs. There is also a {SUMMARY_CSV} '
+                      f'file generated there.\n{TIP}')
 
-  p.add_argument('--bazel-mode-dev', action=argparse.BooleanOptionalAction)
-  p.add_argument('--bazel-mode', action=argparse.BooleanOptionalAction)
+  p.add_argument('--bazel-mode-dev', default=False, action='store_true')
+  p.add_argument('--bazel-mode', default=False, action='store_true')
+  p.add_argument('--skip-repo-status', default=False, action='store_true',
+                 help='Skip "repo status" check')
 
   p.add_argument('targets', nargs='*',
                  action=ValidateTargets,
@@ -341,20 +536,51 @@
   return options
 
 
+def has_uncommitted_changed() -> bool:
+  """
+  effectively a quick 'repo status' that fails fast
+  if any project has uncommitted changes
+  """
+  for cmd in ['diff', 'diff --staged']:
+    diff = subprocess.run(
+        args=f'repo forall -c git {cmd} --quiet --exit-code'.split(),
+        cwd=get_top(), text=True,
+        stdout=subprocess.DEVNULL,
+        stderr=subprocess.DEVNULL)
+    if diff.returncode != 0:
+      return True
+  return False
+
+
 def main():
   """
-  Runs the provided targets under various CUJs while timing them. In pseudocode:
-
-    time m droid dist
+  Run provided target(s) under various CUJs and collect metrics.
+  In pseudocode:
+    time m <target>
+    collect metrics
     for each cuj:
         make relevant changes
-        time m droid dist
+        time m <target>
+        collect metrics
         revert those changes
-        time m droid dist
+        time m <target>
+        collect metrics
   """
-  cujs = get_cujs()
-  options = get_user_input(cujs)
+  cujs = _get_cujs()
+  options = _get_user_input(cujs)
 
+  if not options.skip_repo_status and has_uncommitted_changed():
+    response = input('There are uncommitted changes (TIP: repo status).\n'
+                     'Continue?[Y/n]')
+    if response.upper() != 'Y':
+      return
+
+  logging.warning('If you kill this process, make sure to `repo status` and '
+                  'revert unwanted changes.\n'
+                  'TIP: If you have no local changes of interest you may\n  '
+                  'repo forall -p -c git reset --hard\n        and\n  '
+                  'repo forall -p -c git clean --force\n        and even\n  '
+                  'm clean')
   logging.info('START initial build')
   build(options, 'initial build', allow_dry_run=False)
   logging.info('DONE initial build\n\n')
@@ -363,16 +589,15 @@
       logging.debug('Ignoring cuj "%s"', cuj.name)
       continue
     logging.info('START "%s"', cuj.name)
-    cuj.do_hook()
+    cuj.do()
     build(options, cuj.name, allow_dry_run=True)
-    logging.info('Revert change from "%s"', cuj.name)
-    cuj.undo_hook()
-    build(options, cuj.name + ' undo', allow_dry_run=False)
+    if cuj.undo:
+      logging.info('Revert change from "%s"', cuj.name)
+      cuj.undo()
+      build(options, cuj.name + ' undo', allow_dry_run=False)
     logging.info('DONE "%s"\n\n', cuj.name)
 
-  logging.info(
-      f'TIP: run `cat {log_dir.joinpath("summary.csv")} '
-      f'| column -t -s,` to view the results')
+  logging.info(TIP)
 
 
 if __name__ == '__main__':
diff --git a/ci/incremental_mixed_build.sh b/ci/incremental_mixed_build.sh
deleted file mode 100755
index 2f869c7..0000000
--- a/ci/incremental_mixed_build.sh
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/bin/bash -eu
-
-# Copyright (C) 2022 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# The script defines a number of `cujs` and loops through them; in pseudo-code:
-#
-# for each cuj in cujs {
-#   m droid dist
-#   make cuj relevant changes
-#   time m droid dist
-#   revert cuj relevant changes
-# }
-#
-# Note: this script assumes PWD is the root of source tree.
-
-readonly mypath=$(realpath "$0")
-# console output to be limited
-declare quiet=
-# CUJ related soong_builds to dry run ninja
-declare dry_run=
-# CUJ (0-based index in cujs array) to run
-declare cuj_to_run=
-declare -a cujs
-declare -a targets=(droid dist)
-
-# Finds the ordinal number of the last know run of this script
-function run_n {
-  if [[ ! -d "out" ]]; then
-    echo '0'
-  else
-    local -r n=$(find out -maxdepth 1 -name 'run-*.log' | sed -E "s/.*-([0-9]+)-.*\.log$/\1/" | sort -n -r | head -n 1)
-    if [[ -z $n ]]; then
-      echo '1' # just to signify that the next build is not a clean build
-    else
-      echo "$n"
-    fi
-  fi
-}
-
-function show_spinner {
-  local -r chars="-/|\\"
-  local i=0
-  while read -r; do
-    printf "\b%s" "${chars:i++%${#chars}:1}"
-  done
-  printf "\bDONE\n"
-}
-
-function output {
-  if [[ -n $quiet ]]; then
-    show_spinner
-  else
-    cat
-  fi
-}
-
-function count_explanations {
-  grep '^ninja explain:' "$1" | grep -c -v "^ninja explain: edge with output .\+ is a phony output, so is always dirty$"
-}
-
-function summarize {
-  local -r log_file=$1
-  if [[ -n $quiet ]]; then
-    # display time information on console
-    tail -n 3 "$log_file"
-  fi
-  local -r explanations=$(count_explanations "$log_file")
-  if [[ $explanations -eq 0 ]]; then
-    echo "Build ${targets[*]} ($log_file) was a NULL build"
-  else
-    # Note: ninja explanations doesn't necessarily match the number of actions performed;
-    # it will be AT LEAST the number of actions.
-    # Number of actions can be deduced from `.ninja_log`. However, for debugging and
-    # tweaking performance, ninja explanations are far more useful.
-    echo "Build ${targets[*]} ($log_file) was a NON-NULL build, ninja explanations count = $explanations"
-  fi
-}
-
-function build_once {
-  local -r cuj=$1
-  local -r log_file=$2
-  mkdir -p out && touch "$log_file"
-  echo "Build ${targets[*]} ($log_file)...............................................STARTED"
-  local ninja_args
-  if [[ -n $dry_run && $cuj != "reset" ]]; then
-    echo "DRY RUN"
-    ninja_args='-d explain -n'
-  else
-    ninja_args='-d explain'
-  fi
-  (time build/soong/soong_ui.bash \
-    --make-mode \
-    --mk-metrics \
-    --skip-soong-tests \
-    NINJA_ARGS="$ninja_args" \
-    TARGET_BUILD_VARIANT=userdebug \
-    TARGET_PRODUCT=aosp_coral \
-    "${targets[@]}") 2>&1 | tee --append "$log_file" | output
-  local -r exitStatus="${PIPESTATUS[0]}"
-  if [[ $exitStatus -ne 0 ]]; then
-    echo "FAILED with exit code $exitStatus"
-    exit "$exitStatus"
-  fi
-  summarize "$log_file"
-  echo "Build ${targets[*]} ($log_file) .................................................DONE"
-}
-
-function cuj_noop {
-    echo "do nothing"
-}
-
-function cuj_touchRootAndroidBp {
-  local undo=${1:-}
-  if [[ -z $undo ]]; then
-    touch Android.bp
-  else
-    echo "do nothing"
-  fi
-}
-
-function cuj_newUnreferencedFile {
-  local undo=${1:-}
-  mkdir -p unreferenced_directory
-  if [[ -n $undo ]]; then
-    rm -rf unreferenced_directory
-  else
-    cat <<EOF >unreferenced_directory/test.c
-#include <stdio.h>
-int main(){
-  printf("Hello World");
-  return 0
-}
-EOF
-  fi
-}
-
-function cuj_newAndroidBp {
-  local undo=${1:-}
-  if [[ -n $undo ]]; then
-    rm -rf some_directory
-  else
-    mkdir -p some_directory
-    touch some_directory/Android.bp
-  fi
-}
-
-# Note: cuj_xxx functions must precede this line to be discovered here
-readarray -t cujs< <(declare -F | awk '$NF ~ /cuj_/ {print $NF}')
-
-function usage {
-  cat <<EOF >&2
-Usage: $mypath [-c cuj_to_run] [-n] [-q] [TARGET1 [TARGET2 [ ...]]]
-  -c: The index number for the CUJ to test. Choose one of:
-EOF
-for ((i = 0; i < ${#cujs[@]}; i++)); do
-  echo "      $i: ${cujs[$i]}"
-done
-  cat <<EOF >&2
-  -n: Dry ninja runs (except "resetting" runs that precede CUJs)
-  -q: Quiet. Console output will be suppressed.
-If you omit targets, "${targets[*]}" will be used.
-Set --bazel-mode-dev for mixed builds.
-EOF
-  exit 1
-}
-
-while getopts "c:nq" o; do
-  case "${o}" in
-  c) cuj_to_run=${OPTARG} ;;
-  n) dry_run=true ;;
-  q) quiet=true ;;
-  *) usage ;;
-  esac
-done
-shift $((OPTIND - 1))
-if [[ $# -gt 0 ]]; then
-  IFS=" " read -r -a targets <<<"$@"
-fi
-if [[ -n $cuj_to_run ]]; then
-  if [[ ! $cuj_to_run =~ ^[0-9]+$ || $cuj_to_run -ge "${#cujs[@]}" ]]; then
-    echo "No such CUJ \"$cuj_to_run\", choose between 0 and $((${#cujs[@]} - 1))"
-    usage
-  fi
-  cujs=("${cujs[$cuj_to_run]}")
-fi
-declare -i this_run
-this_run=$((1 + "$(run_n)"))
-for ((i = 0; i < ${#cujs[@]}; i++)); do
-  build_once "reset" "out/run-$this_run-reset.log"
-  cuj=${cujs[i]}
-  echo "perform $cuj"
-  eval "$cuj"
-  build_once "$cuj" "out/run-$this_run-$cuj.log"
-  echo "undo $cuj"
-  eval "$cuj undo"
-done
diff --git a/platforms/arch/BUILD b/platforms/arch/BUILD
index f8e1ccb..724581d 100644
--- a/platforms/arch/BUILD
+++ b/platforms/arch/BUILD
@@ -17,6 +17,11 @@
 )
 
 constraint_value(
+    name = "riscv64",
+    constraint_setting = "@platforms//cpu:cpu",
+)
+
+constraint_value(
     name = "x86",
     constraint_setting = "@platforms//cpu:cpu",
 )
diff --git a/platforms/os_arch/BUILD.bazel b/platforms/os_arch/BUILD.bazel
index c0cf318..39e24cc 100644
--- a/platforms/os_arch/BUILD.bazel
+++ b/platforms/os_arch/BUILD.bazel
@@ -15,6 +15,14 @@
 )
 
 config_setting(
+    name = "android_riscv64",
+    constraint_values = [
+        "//build/bazel/platforms/arch:riscv64",
+        "//build/bazel/platforms/os:android",
+    ],
+)
+
+config_setting(
     name = "android_x86",
     constraint_values = [
         "//build/bazel/platforms/arch:x86",
diff --git a/rules/android/android_app_certificate.bzl b/rules/android/android_app_certificate.bzl
index d3f3f54..1555392 100644
--- a/rules/android/android_app_certificate.bzl
+++ b/rules/android/android_app_certificate.bzl
@@ -14,6 +14,9 @@
 limitations under the License.
 """
 
+load("@bazel_skylib//lib:paths.bzl", "paths")
+load("@soong_injection//product_config:product_variables.bzl", "product_vars")
+
 AndroidAppCertificateInfo = provider(
     "Info needed for Android app certificates",
     fields = {
@@ -47,3 +50,47 @@
         pk8 = certificate + ".pk8",
         **kwargs
     )
+
+_default_cert_package = "//build/make/target/product/security"
+
+# Set up the android_app_certificate dependency pointing to the .pk8 and
+# .x509.pem files in the source tree.
+#
+# Every caller who use this function will have their own android_app_certificate
+# target, even if the underlying certs are shared by many.
+#
+# If cert_name is used, then it will be looked up from the app certificate
+# package as determined by the DefaultAppCertificate variable, or the hardcoded
+# directory.
+#
+# Otherwise, if the DefaultAppCertificate variable is used, then an
+# android_app_certificate target will be created to point to the path value, and
+# the .pk8 and .x509.pem suffixes are added automatically.
+#
+# Finally (cert_name not used AND DefaultAppCertificate not specified), use the
+# testkey.
+def android_app_certificate_with_default_cert(name, cert_name = None):
+    default_cert = product_vars.get("DefaultAppCertificate")
+
+    if cert_name and default_cert:
+        certificate = "".join(["//", paths.dirname(default_cert), ":", cert_name])
+    elif cert_name:
+        # if a specific certificate name is given, check the default directory
+        # for that certificate.
+        certificate = _default_cert_package + ":" + cert_name
+    elif default_cert:
+        # This assumes that there is a BUILD file marking the directory of
+        # the default cert as a package.
+        certificate = "".join([
+            "//",
+            paths.dirname(default_cert),
+            ":",
+            paths.basename(default_cert),
+        ])
+    else:
+        certificate = _default_cert_package + ":testkey"
+
+    android_app_certificate(
+        name = name,
+        certificate = certificate,
+    )
diff --git a/rules/android/android_binary.bzl b/rules/android/android_binary.bzl
index 7661401..149b3fd 100644
--- a/rules/android/android_binary.bzl
+++ b/rules/android/android_binary.bzl
@@ -14,42 +14,10 @@
 limitations under the License.
 """
 
-load("@bazel_skylib//lib:paths.bzl", "paths")
 load("@rules_android//rules:rules.bzl", _android_binary = "android_binary")
-load("@soong_injection//product_config:product_variables.bzl", "product_vars")
-load("android_app_certificate.bzl", "android_app_certificate")
+load("android_app_certificate.bzl", "android_app_certificate_with_default_cert")
 load("android_app_keystore.bzl", "android_app_keystore")
 
-def _default_cert_prod_var():
-    return product_vars["DefaultAppCertificate"]
-
-def _default_app_certificate_package():
-    default_cert = _default_cert_prod_var()
-    if default_cert:
-        return "//" + paths.dirname(default_cert)
-
-    # if product variable is not set, default to Soong default:
-    return "//build/make/target/product/security"
-
-def _default_app_certificate():
-    default_cert = _default_cert_prod_var()
-    if default_cert:
-        return default_cert
-    return _default_app_certificate_package() + ":testkey"
-
-def _android_app_certificate_with_default_cert(name, cert_name):
-    if cert_name:
-        # if a specific certificate name is given, check the default directory
-        # for that certificate
-        certificate = _default_app_certificate_package() + ":" + cert_name
-    else:
-        certificate = _default_app_certificate()
-
-    android_app_certificate(
-        name = name,
-        certificate = certificate,
-    )
-
 def android_binary(
         name,
         certificate = None,
@@ -79,7 +47,7 @@
     if certificate or certificate_name:
         if certificate_name:
             app_cert_name = name + "_app_certificate"
-            _android_app_certificate_with_default_cert(app_cert_name, certificate_name)
+            android_app_certificate_with_default_cert(app_cert_name, certificate_name)
             certificate = ":" + app_cert_name
 
         app_keystore_name = name + "_keystore"
diff --git a/rules/apex/apex.bzl b/rules/apex/apex.bzl
index 2964e13..d51176b 100644
--- a/rules/apex/apex.bzl
+++ b/rules/apex/apex.bzl
@@ -19,7 +19,7 @@
 load("//build/bazel/rules:prebuilt_file.bzl", "PrebuiltFileInfo")
 load("//build/bazel/rules:sh_binary.bzl", "ShBinaryInfo")
 load("//build/bazel/rules/cc:stripped_cc_common.bzl", "StrippedCcBinaryInfo")
-load("//build/bazel/rules/android:android_app_certificate.bzl", "AndroidAppCertificateInfo")
+load("//build/bazel/rules/android:android_app_certificate.bzl", "AndroidAppCertificateInfo", "android_app_certificate_with_default_cert")
 load("//build/bazel/rules/apex:transition.bzl", "apex_transition", "shared_lib_transition_32", "shared_lib_transition_64")
 load("//build/bazel/platforms:transitions.bzl", "default_android_transition")
 load("//build/bazel/rules/apex:cc.bzl", "ApexCcInfo", "apex_cc_aspect")
@@ -441,7 +441,10 @@
         "logging_parent": attr.string(),
         "file_contexts": attr.label(allow_single_file = True, mandatory = True),
         "key": attr.label(providers = [ApexKeyInfo], mandatory = True),
-        "certificate": attr.label(providers = [AndroidAppCertificateInfo], mandatory = True),
+        "certificate": attr.label(
+            providers = [AndroidAppCertificateInfo],
+            mandatory = True,
+        ),
         "min_sdk_version": attr.string(default = "current"),
         "updatable": attr.bool(default = True),
         "installable": attr.bool(default = True),
@@ -504,6 +507,7 @@
         file_contexts = None,
         key = None,
         certificate = None,
+        certificate_name = None,
         min_sdk_version = None,
         updatable = True,
         installable = True,
@@ -527,13 +531,27 @@
     if compressible:
         capex_output = name + ".capex"
 
+    if certificate and certificate_name:
+        fail("Cannot use both certificate_name and certificate attributes together. Use only one of them.")
+    app_cert_name = name + "_app_certificate"
+    if certificate_name:
+        # use the name key in the default cert dir
+        android_app_certificate_with_default_cert(app_cert_name, certificate_name)
+        certificate_label = ":" + app_cert_name
+    elif certificate:
+        certificate_label = certificate
+    else:
+        # use the default testkey
+        android_app_certificate_with_default_cert(app_cert_name)
+        certificate_label = ":" + app_cert_name
+
     _apex(
         name = name,
         manifest = manifest,
         android_manifest = android_manifest,
         file_contexts = file_contexts,
         key = key,
-        certificate = certificate,
+        certificate = certificate_label,
         min_sdk_version = min_sdk_version,
         updatable = updatable,
         installable = installable,
diff --git a/rules/apex/apex_test.bzl b/rules/apex/apex_test.bzl
index 2d414e2..08eafcd 100644
--- a/rules/apex/apex_test.bzl
+++ b/rules/apex/apex_test.bzl
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 load("//build/bazel/rules:sh_binary.bzl", "sh_binary")
+load("//build/bazel/rules/android:android_app_certificate.bzl", "AndroidAppCertificateInfo", "android_app_certificate")
 load("//build/bazel/rules/cc:cc_binary.bzl", "cc_binary")
 load("//build/bazel/rules/cc:cc_library_shared.bzl", "cc_library_shared")
 load("//build/bazel/rules/cc:cc_library_static.bzl", "cc_library_static")
@@ -1078,6 +1079,85 @@
 
     return test_name
 
+def _apex_certificate_test(ctx):
+    env = analysistest.begin(ctx)
+    target_under_test = analysistest.target_under_test(env)
+    cert = target_under_test[ApexInfo].container_key_pair
+
+    asserts.equals(env, ctx.attr.expected_pem_path, cert[0].path)
+    asserts.equals(env, ctx.attr.expected_pk8_path, cert[1].path)
+
+    return analysistest.end(env)
+
+apex_certificate_test = analysistest.make(
+    _apex_certificate_test,
+    attrs = {
+        "expected_pem_path": attr.string(),
+        "expected_pk8_path": attr.string(),
+    },
+)
+
+def _test_apex_certificate_none():
+    name = "apex_certificate_none"
+    test_name = name + "_test"
+
+    test_apex(
+        name = name,
+        certificate = None,
+    )
+
+    apex_certificate_test(
+        name = test_name,
+        target_under_test = name,
+        expected_pem_path = "build/make/target/product/security/testkey.x509.pem",
+        expected_pk8_path = "build/make/target/product/security/testkey.pk8",
+    )
+
+    return test_name
+
+def _test_apex_certificate_name():
+    name = "apex_certificate_name"
+    test_name = name + "_test"
+
+    test_apex(
+        name = name,
+        certificate = None,
+        certificate_name = "shared",  # use something other than testkey
+    )
+
+    apex_certificate_test(
+        name = test_name,
+        target_under_test = name,
+        expected_pem_path = "build/make/target/product/security/shared.x509.pem",
+        expected_pk8_path = "build/make/target/product/security/shared.pk8",
+    )
+
+    return test_name
+
+def _test_apex_certificate_label():
+    name = "apex_certificate_label"
+    test_name = name + "_test"
+
+    android_app_certificate(
+        name = name + "_cert",
+        certificate = name,
+        tags = ["manual"],
+    )
+
+    test_apex(
+        name = name,
+        certificate = name + "_cert",
+    )
+
+    apex_certificate_test(
+        name = test_name,
+        target_under_test = name,
+        expected_pem_path = "build/bazel/rules/apex/apex_certificate_label.x509.pem",
+        expected_pk8_path = "build/bazel/rules/apex/apex_certificate_label.pk8",
+    )
+
+    return test_name
+
 def apex_test_suite(name):
     native.test_suite(
         name = name,
@@ -1104,5 +1184,8 @@
             _test_default_apex_manifest_version(),
             _test_min_sdk_version_failure(),
             _test_min_sdk_version_failure_transitive(),
+            _test_apex_certificate_none(),
+            _test_apex_certificate_name(),
+            _test_apex_certificate_label(),
         ],
     )
diff --git a/rules/apex/apex_test_helpers.bzl b/rules/apex/apex_test_helpers.bzl
index 04aaf9f..fabdcfa 100644
--- a/rules/apex/apex_test_helpers.bzl
+++ b/rules/apex/apex_test_helpers.bzl
@@ -17,67 +17,65 @@
 load(":apex.bzl", "ApexInfo", "apex")
 
 # Set up test-local dependencies required for every apex.
-def setup_apex_required_deps():
-    file_contexts_name = "test_file_contexts"
-    manifest_name = "test_manifest"
-    key_name = "test_key"
-    certificate_name = "test_certificate"
-
+def setup_apex_required_deps(
+        file_contexts,
+        key,
+        manifest,
+        certificate):
     # Use the same shared common deps for all test apexes.
-    if not native.existing_rule(file_contexts_name):
+    if file_contexts and not native.existing_rule(file_contexts):
         native.genrule(
-            name = file_contexts_name,
-            outs = [file_contexts_name + ".out"],
+            name = file_contexts,
+            outs = [file_contexts + ".out"],
             cmd = "echo unused && exit 1",
             tags = ["manual"],
         )
 
-    if not native.existing_rule(manifest_name):
+    if manifest and not native.existing_rule(manifest):
         native.genrule(
-            name = manifest_name,
-            outs = [manifest_name + ".json"],
+            name = manifest,
+            outs = [manifest + ".json"],
             cmd = "echo unused && exit 1",
             tags = ["manual"],
         )
 
     # Required for ApexKeyInfo provider
-    if not native.existing_rule(key_name):
+    if key and not native.existing_rule(key):
         apex_key(
-            name = key_name,
-            private_key = key_name + ".pem",
-            public_key = key_name + ".avbpubkey",
+            name = key,
+            private_key = key + ".pem",
+            public_key = key + ".avbpubkey",
             tags = ["manual"],
         )
 
     # Required for AndroidAppCertificate provider
-    if not native.existing_rule(certificate_name):
+    if certificate and not native.existing_rule(certificate):
         android_app_certificate(
-            name = certificate_name,
-            certificate = certificate_name + ".cert",
+            name = certificate,
+            certificate = certificate + ".cert",
             tags = ["manual"],
         )
 
-    return struct(
-        file_contexts_name = file_contexts_name,
-        manifest_name = manifest_name,
-        key_name = key_name,
-        certificate_name = certificate_name,
-    )
-
 def test_apex(
         name,
-        file_contexts = None,
-        key = None,
-        manifest = None,
-        certificate = None,
+        file_contexts = "test_file_contexts",
+        key = "test_key",
+        manifest = "test_manifest",
+        certificate = "test_certificate",
         **kwargs):
-    names = setup_apex_required_deps()
+    setup_apex_required_deps(
+        file_contexts = file_contexts,
+        key = key,
+        manifest = manifest,
+        certificate = certificate,
+    )
+
     apex(
         name = name,
-        file_contexts = file_contexts or names.file_contexts_name,
-        key = key or names.key_name,
-        manifest = manifest or names.manifest_name,
-        certificate = certificate or names.certificate_name,
+        file_contexts = file_contexts,
+        key = key,
+        manifest = manifest,
+        certificate = certificate,
         tags = ["manual"],
         **kwargs
     )