Merge "The buildbot now uses device_testdex as modes."
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index 9e49d76..9e709d8 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -8,7 +8,7 @@
 dir.runtime = /apex/com.android.runtime/bin/
 
 [runtime]
-additional.namespaces = platform
+additional.namespaces = platform,conscrypt
 
 # Keep in sync with runtime namespace in /system/etc/ld.config.txt.
 namespace.default.isolated = true
@@ -28,3 +28,17 @@
 namespace.platform.link.default.shared_libs += libnativebridge.so
 namespace.platform.link.default.shared_libs += libnativehelper.so
 namespace.platform.link.default.shared_libs += libnativeloader.so
+
+###############################################################################
+# "conscrypt" APEX namespace
+#
+# This namespace is for libraries within the conscrypt APEX.
+###############################################################################
+namespace.conscrypt.isolated = true
+namespace.conscrypt.visible = true
+
+namespace.conscrypt.search.paths = /apex/com.android.conscrypt/${LIB}
+namespace.conscrypt.links = platform
+namespace.conscrypt.link.platform.shared_libs  = libc.so
+namespace.conscrypt.link.platform.shared_libs += libm.so
+namespace.conscrypt.link.platform.shared_libs += libdl.so
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 1725154..478ecdf 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -427,6 +427,7 @@
   gc::CollectorType collector_type_ = gc::kCollectorTypeDefault;
   bool verify_pre_gc_heap_ = false;
   bool verify_pre_sweeping_heap_ = kIsDebugBuild;
+  bool generational_cc = kEnableGenerationalCCByDefault;
   bool verify_post_gc_heap_ = false;
   bool verify_pre_gc_rosalloc_ = kIsDebugBuild;
   bool verify_pre_sweeping_rosalloc_ = false;
@@ -455,6 +456,10 @@
         xgc.verify_pre_sweeping_heap_ = true;
       } else if (gc_option == "nopresweepingverify") {
         xgc.verify_pre_sweeping_heap_ = false;
+      } else if (gc_option == "generational_cc") {
+        xgc.generational_cc = true;
+      } else if (gc_option == "nogenerational_cc") {
+        xgc.generational_cc = false;
       } else if (gc_option == "postverify") {
         xgc.verify_post_gc_heap_ = true;
       } else if (gc_option == "nopostverify") {
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index c23524a..a412938 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1203,6 +1203,15 @@
   });
 }
 
+static std::string GetHiddenapiFlagStr(uint32_t hiddenapi_flags) {
+  std::stringstream ss;
+  hiddenapi::ApiList api_list(hiddenapi_flags);
+  api_list.Dump(ss);
+  std::string str_api_list = ss.str();
+  std::transform(str_api_list.begin(), str_api_list.end(), str_api_list.begin(), ::toupper);
+  return str_api_list;
+}
+
 /*
  * Dumps a method.
  */
@@ -1220,12 +1229,19 @@
   char* typeDescriptor = strdup(signature.ToString().c_str());
   const char* backDescriptor = dex_file.StringByTypeIdx(pMethodId.class_idx_);
   char* accessStr = createAccessFlagStr(flags, kAccessForMethod);
+  const uint32_t hiddenapiFlags = method.GetHiddenapiFlags();
 
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     fprintf(gOutFile, "    #%d              : (in %s)\n", i, backDescriptor);
     fprintf(gOutFile, "      name          : '%s'\n", name);
     fprintf(gOutFile, "      type          : '%s'\n", typeDescriptor);
     fprintf(gOutFile, "      access        : 0x%04x (%s)\n", flags, accessStr);
+    if (hiddenapiFlags != 0u) {
+      fprintf(gOutFile,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapiFlags,
+              GetHiddenapiFlagStr(hiddenapiFlags).c_str());
+    }
     if (method.GetCodeItem() == nullptr) {
       fprintf(gOutFile, "      code          : (none)\n");
     } else {
@@ -1330,12 +1346,19 @@
   const char* typeDescriptor = dex_file.StringByTypeIdx(field_id.type_idx_);
   const char* backDescriptor = dex_file.StringByTypeIdx(field_id.class_idx_);
   char* accessStr = createAccessFlagStr(flags, kAccessForField);
+  const uint32_t hiddenapiFlags = field.GetHiddenapiFlags();
 
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     fprintf(gOutFile, "    #%d              : (in %s)\n", i, backDescriptor);
     fprintf(gOutFile, "      name          : '%s'\n", name);
     fprintf(gOutFile, "      type          : '%s'\n", typeDescriptor);
     fprintf(gOutFile, "      access        : 0x%04x (%s)\n", flags, accessStr);
+    if (hiddenapiFlags != 0u) {
+      fprintf(gOutFile,
+              "      hiddenapi     : 0x%04x (%s)\n",
+              hiddenapiFlags,
+              GetHiddenapiFlagStr(hiddenapiFlags).c_str());
+    }
     if (data != nullptr) {
       fputs("      value         : ", gOutFile);
       dumpEncodedValue(&dex_file, data);
@@ -1488,7 +1511,7 @@
   }
 
   // Fields and methods.
-  ClassAccessor accessor(*pDexFile, pClassDef);
+  ClassAccessor accessor(*pDexFile, pClassDef, /* parse_hiddenapi_class_data= */ true);
 
   // Prepare data for static fields.
   const u1* sData = pDexFile->GetEncodedStaticFieldValuesArray(pClassDef);
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 143f5b0..268abe4 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -469,6 +469,7 @@
   DCHECK_EQ(header_->HiddenapiClassDatas().Size(), header_->ClassDefs().Size());
 
   stream->AlignTo(SectionAlignment(DexFile::kDexTypeHiddenapiClassData));
+  ProcessOffset(stream, &header_->HiddenapiClassDatas());
   const uint32_t start = stream->Tell();
 
   // Compute offsets for each class def and write the header.
@@ -989,6 +990,15 @@
   }
 }
 
+void DexWriter::ProcessOffset(Stream* stream, dex_ir::CollectionBase* item) {
+  if (compute_offsets_) {
+    item->SetOffset(stream->Tell());
+  } else {
+    // Not computing offsets, just use the one in the item.
+    stream->Seek(item->GetOffset());
+  }
+}
+
 std::unique_ptr<DexContainer> DexWriter::CreateDexContainer() const {
   return std::unique_ptr<DexContainer>(new DexWriter::Container);
 }
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index 98041d3..62247ec 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -271,6 +271,7 @@
   // Process an offset, if compute_offset is set, write into the dex ir item, otherwise read the
   // existing offset and use that for writing.
   void ProcessOffset(Stream* stream, dex_ir::Item* item);
+  void ProcessOffset(Stream* stream, dex_ir::CollectionBase* item);
 
   dex_ir::Header* const header_;
   DexLayout* const dex_layout_;
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 58d8575..b989d9e 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -96,10 +96,10 @@
   // The byte thresholds at which we display amounts.  A byte count is displayed
   // in unit U when kUnitThresholds[U] <= bytes < kUnitThresholds[U+1].
   static const int64_t kUnitThresholds[] = {
-    0,              // B up to...
-    3*1024,         // KB up to...
-    2*1024*1024,    // MB up to...
-    1024*1024*1024  // GB from here.
+    0,       // B up to...
+    10*KB,   // KB up to...
+    10*MB,   // MB up to...
+    10LL*GB  // GB from here.
   };
   static const int64_t kBytesPerUnit[] = { 1, KB, MB, GB };
   static const char* const kUnitStrings[] = { "B", "KB", "MB", "GB" };
diff --git a/libartbase/base/utils_test.cc b/libartbase/base/utils_test.cc
index c3b61ce..631a225 100644
--- a/libartbase/base/utils_test.cc
+++ b/libartbase/base/utils_test.cc
@@ -23,8 +23,8 @@
 class UtilsTest : public testing::Test {};
 
 TEST_F(UtilsTest, PrettySize) {
-  EXPECT_EQ("1GB", PrettySize(1 * GB));
-  EXPECT_EQ("2GB", PrettySize(2 * GB));
+  EXPECT_EQ("1024MB", PrettySize(1 * GB));
+  EXPECT_EQ("2048MB", PrettySize(2 * GB));
   if (sizeof(size_t) > sizeof(uint32_t)) {
     EXPECT_EQ("100GB", PrettySize(100 * GB));
   }
diff --git a/runtime/Android.bp b/runtime/Android.bp
index b89eb02..a3081e9 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -396,8 +396,10 @@
         "libnativeloader",
         "libbacktrace",
         "liblog",
-        // For atrace, properties, ashmem, set_sched_policy.
+        // For atrace, properties, ashmem.
         "libcutils",
+        // For set_sched_policy.
+        "libprocessgroup",
         // For common macros.
         "libbase",
     ],
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 1014c0e..2de7910 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -36,8 +36,7 @@
     Thread* const self,
     mirror::Object* ref,
     accounting::ContinuousSpaceBitmap* bitmap) {
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && !done_scanning_.load(std::memory_order_acquire)) {
+  if (use_generational_cc_ && !done_scanning_.load(std::memory_order_acquire)) {
     // Everything in the unevac space should be marked for young generation CC,
     // except for large objects.
     DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
@@ -130,7 +129,7 @@
                                                mirror::Object* holder,
                                                MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (from_ref == nullptr) {
     return nullptr;
   }
@@ -172,9 +171,7 @@
         return to_ref;
       }
       case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
-        if (kEnableGenerationalConcurrentCopyingCollection
-            && kNoUnEvac
-            && !region_space_->IsLargeObject(from_ref)) {
+        if (kNoUnEvac && use_generational_cc_ && !region_space_->IsLargeObject(from_ref)) {
           if (!kFromGCThread) {
             DCHECK(IsMarkedInUnevacFromSpace(from_ref)) << "Returning unmarked object to mutator";
           }
@@ -245,8 +242,7 @@
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!kEnableGenerationalConcurrentCopyingCollection
-             || done_scanning_.load(std::memory_order_acquire)) {
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
     // If the card table scanning is not finished yet, then only read-barrier
     // state should be checked. Checking the mark bitmap is unreliable as there
     // may be some objects - whose corresponding card is dirty - which are
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f7b76a..642b12e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -69,15 +69,19 @@
 
 ConcurrentCopying::ConcurrentCopying(Heap* heap,
                                      bool young_gen,
+                                     bool use_generational_cc,
                                      const std::string& name_prefix,
                                      bool measure_read_barrier_slow_path)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") +
                        "concurrent copying"),
-      region_space_(nullptr), gc_barrier_(new Barrier(0)),
+      region_space_(nullptr),
+      gc_barrier_(new Barrier(0)),
       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
                                                      kDefaultGcMarkStackSize,
                                                      kDefaultGcMarkStackSize)),
+      use_generational_cc_(use_generational_cc),
+      young_gen_(young_gen),
       rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
                                                          kReadBarrierMarkStackSize,
                                                          kReadBarrierMarkStackSize)),
@@ -100,7 +104,6 @@
       region_space_inter_region_bitmap_(nullptr),
       non_moving_space_inter_region_bitmap_(nullptr),
       reclaimed_bytes_ratio_sum_(0.f),
-      young_gen_(young_gen),
       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
       measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
       mark_from_read_barrier_measurements_(false),
@@ -119,7 +122,7 @@
       num_bytes_allocated_before_gc_(0) {
   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
                 "The region space size and the read barrier table region size must match");
-  CHECK(kEnableGenerationalConcurrentCopyingCollection || !young_gen_);
+  CHECK(use_generational_cc_ || !young_gen_);
   Thread* self = Thread::Current();
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -138,7 +141,7 @@
       pooled_mark_stacks_.push_back(mark_stack);
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     // Allocate sweep array free buffer.
     std::string error_msg;
     sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
@@ -194,7 +197,7 @@
     InitializePhase();
     // In case of forced evacuation, all regions are evacuated and hence no
     // need to compute live_bytes.
-    if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+    if (use_generational_cc_ && !young_gen_ && !force_evacuate_all_) {
       MarkingPhase();
     }
   }
@@ -290,7 +293,7 @@
 }
 
 void ConcurrentCopying::CreateInterRegionRefBitmaps() {
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
   DCHECK(region_space_inter_region_bitmap_ == nullptr);
   DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
   DCHECK(region_space_ != nullptr);
@@ -325,7 +328,7 @@
       CHECK(!space->IsZygoteSpace());
       CHECK(!space->IsImageSpace());
       CHECK(space == region_space_ || space == heap_->non_moving_space_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         if (space == region_space_) {
           region_space_bitmap_ = region_space_->GetMarkBitmap();
         } else if (young_gen_ && space->IsContinuousMemMapAllocSpace()) {
@@ -358,7 +361,7 @@
       }
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
       CHECK(space->IsLargeObjectSpace());
       space->AsLargeObjectSpace()->CopyLiveToMarked();
@@ -391,7 +394,7 @@
   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
 
   force_evacuate_all_ = false;
-  if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+  if (!use_generational_cc_ || !young_gen_) {
     if (gc_cause == kGcCauseExplicit ||
         gc_cause == kGcCauseCollectorTransition ||
         GetCurrentIteration()->GetClearSoftReferences()) {
@@ -407,7 +410,7 @@
       DCHECK(immune_gray_stack_.empty());
     }
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     done_scanning_.store(false, std::memory_order_release);
   }
   BindBitmaps();
@@ -421,7 +424,7 @@
     }
     LOG(INFO) << "GC end of InitializePhase";
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+  if (use_generational_cc_ && !young_gen_) {
     region_space_bitmap_->Clear();
   }
   mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
@@ -533,7 +536,7 @@
       cc->region_space_->SetFromSpace(
           cc->rb_table_,
           evac_mode,
-          /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
+          /*clear_live_bytes=*/ !cc->use_generational_cc_);
     }
     cc->SwapStacks();
     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -542,7 +545,7 @@
       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
     }
     cc->is_marking_ = true;
-    if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
+    if (kIsDebugBuild && !cc->use_generational_cc_) {
       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
     }
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -866,7 +869,7 @@
   DCHECK(obj != nullptr);
   DCHECK(immune_spaces_.ContainsObject(obj));
   // Update the fields without graying it or pushing it onto the mark stack.
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Young GC does not care about references to unevac space. It is safe to not gray these as
     // long as scan immune objects happens after scanning the dirty cards.
     Scan<true>(obj);
@@ -1394,7 +1397,7 @@
   if (kUseBakerReadBarrier) {
     gc_grays_immune_objects_ = false;
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     if (kVerboseMode) {
       LOG(INFO) << "GC ScanCardsForSpace";
     }
@@ -2152,7 +2155,7 @@
       if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
         // It may be already marked if we accidentally pushed the same object twice due to the racy
         // bitmap read in MarkUnevacFromSpaceRegion.
-        if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+        if (use_generational_cc_ && young_gen_) {
           CHECK(region_space_->IsLargeObject(to_ref));
           region_space_->ZeroLiveBytesForLargeObject(to_ref);
         }
@@ -2169,7 +2172,7 @@
       }
       break;
     case space::RegionSpace::RegionType::kRegionTypeToSpace:
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         // Copied to to-space, set the bit so that the next GC can scan objects.
         region_space_bitmap_->Set(to_ref);
       }
@@ -2214,7 +2217,7 @@
       }
   }
   if (perform_scan) {
-    if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+    if (use_generational_cc_ && young_gen_) {
       Scan<true>(to_ref);
     } else {
       Scan<false>(to_ref);
@@ -2373,7 +2376,7 @@
 }
 
 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (use_generational_cc_ && young_gen_) {
     // Only sweep objects on the live stack.
     SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false);
   } else {
@@ -2407,7 +2410,7 @@
 // Copied and adapted from MarkSweep::SweepArray.
 void ConcurrentCopying::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
   CheckEmptyMarkStack();
   TimingLogger::ScopedTiming t("SweepArray", GetTimings());
   Thread* self = Thread::Current();
@@ -2891,8 +2894,7 @@
   DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!kEnableGenerationalConcurrentCopyingCollection
-             || done_scanning_.load(std::memory_order_acquire)) {
+  } else if (!use_generational_cc_ || done_scanning_.load(std::memory_order_acquire)) {
     // Read the comment in IsMarkedInUnevacFromSpace()
     accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
     accounting::LargeObjectBitmap* los_bitmap = nullptr;
@@ -2954,7 +2956,7 @@
   explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
       : collector_(collector), thread_(thread) {
     // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-    DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+    DCHECK(!kNoUnEvac || collector_->use_generational_cc_);
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
@@ -2991,7 +2993,7 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     // Avoid all read barriers during visit references to help performance.
     // Don't do this in transaction mode because we may read the old value of an field which may
@@ -3012,7 +3014,7 @@
 template <bool kNoUnEvac>
 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
   // Cannot have `kNoUnEvac` when Generational CC collection is disabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || !kNoUnEvac);
+  DCHECK(!kNoUnEvac || use_generational_cc_);
   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   mirror::Object* ref = obj->GetFieldObject<
       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
@@ -3386,7 +3388,7 @@
       } else {
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
-        if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+        if (!use_generational_cc_ || !young_gen_) {
           // Mark it in the live bitmap.
           CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
         }
@@ -3482,7 +3484,7 @@
     los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
     DCHECK(los_bitmap->HasAddress(ref));
   }
-  if (kEnableGenerationalConcurrentCopyingCollection) {
+  if (use_generational_cc_) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
     // Not done scanning, use AtomicSetReadBarrierPointer.
@@ -3551,11 +3553,11 @@
   }
   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
   // positives.
-  if (!kEnableGenerationalConcurrentCopyingCollection && !kVerifyNoMissingCardMarks) {
+  if (!kVerifyNoMissingCardMarks && !use_generational_cc_) {
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
-  } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+  } else if (use_generational_cc_ && !young_gen_) {
     region_space_inter_region_bitmap_->Clear();
     non_moving_space_inter_region_bitmap_->Clear();
   }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a41c17a..124713c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -65,10 +65,11 @@
   // pages.
   static constexpr bool kGrayDirtyImmuneObjects = true;
 
-  explicit ConcurrentCopying(Heap* heap,
-                             bool young_gen,
-                             const std::string& name_prefix = "",
-                             bool measure_read_barrier_slow_path = false);
+  ConcurrentCopying(Heap* heap,
+                    bool young_gen,
+                    bool use_generational_cc,
+                    const std::string& name_prefix = "",
+                    bool measure_read_barrier_slow_path = false);
   ~ConcurrentCopying();
 
   void RunPhases() override
@@ -90,7 +91,7 @@
   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_);
   GcType GetGcType() const override {
-    return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+    return (use_generational_cc_ && young_gen_)
         ? kGcTypeSticky
         : kGcTypePartial;
   }
@@ -323,6 +324,19 @@
   std::unique_ptr<Barrier> gc_barrier_;
   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Generational CC collection is currently only
+  // compatible with Baker read barriers. Set in Heap constructor.
+  const bool use_generational_cc_;
+
+  // Generational "sticky", only trace through dirty objects in region space.
+  const bool young_gen_;
+
+  // If true, the GC thread is done scanning marked objects on dirty and aged
+  // card (see ConcurrentCopying::CopyingPhase).
+  Atomic<bool> done_scanning_;
+
   // The read-barrier mark-bit stack. Stores object references whose
   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
   // so that this bit can be reset at the end of the collection in
@@ -400,12 +414,6 @@
   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
   float reclaimed_bytes_ratio_sum_;
 
-  // Generational "sticky", only trace through dirty objects in region space.
-  const bool young_gen_;
-  // If true, the GC thread is done scanning marked objects on dirty and aged
-  // card (see ConcurrentCopying::CopyingPhase).
-  Atomic<bool> done_scanning_;
-
   // The skipped blocks are memory blocks/chucks that were copies of
   // objects that were unused due to lost races (cas failures) at
   // object copy/forward pointer install. They are reused.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d699da0..5f62d75 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -107,8 +107,9 @@
 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
-static constexpr double kStickyGcThroughputAdjustment =
-    kEnableGenerationalConcurrentCopyingCollection ? 0.5 : 1.0;
+static double GetStickyGcThroughputAdjustment(bool use_generational_cc) {
+  return use_generational_cc ? 0.5 : 1.0;
+}
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -201,6 +202,7 @@
            bool gc_stress_mode,
            bool measure_gc_performance,
            bool use_homogeneous_space_compaction_for_oom,
+           bool use_generational_cc,
            uint64_t min_interval_homogeneous_space_compaction_by_oom,
            bool dump_region_info_before_gc,
            bool dump_region_info_after_gc)
@@ -288,6 +290,7 @@
       pending_collector_transition_(nullptr),
       pending_heap_trim_(nullptr),
       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+      use_generational_cc_(use_generational_cc),
       running_collection_is_blocking_(false),
       blocking_gc_count_(0U),
       blocking_gc_time_(0U),
@@ -494,7 +497,8 @@
     MemMap region_space_mem_map =
         space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
     CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
-    region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
+    region_space_ = space::RegionSpace::Create(
+        kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -652,26 +656,28 @@
     if (MayUseCollector(kCollectorTypeCC)) {
       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
                                                                        /*young_gen=*/false,
+                                                                       use_generational_cc_,
                                                                        "",
                                                                        measure_gc_performance);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_ = new collector::ConcurrentCopying(
             this,
             /*young_gen=*/true,
+            use_generational_cc_,
             "young",
             measure_gc_performance);
       }
       active_concurrent_copying_collector_ = concurrent_copying_collector_;
       DCHECK(region_space_ != nullptr);
       concurrent_copying_collector_->SetRegionSpace(region_space_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         young_concurrent_copying_collector_->SetRegionSpace(region_space_);
         // At this point, non-moving space should be created.
         DCHECK(non_moving_space_ != nullptr);
         concurrent_copying_collector_->CreateInterRegionRefBitmaps();
       }
       garbage_collectors_.push_back(concurrent_copying_collector_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         garbage_collectors_.push_back(young_concurrent_copying_collector_);
       }
     }
@@ -2262,7 +2268,7 @@
     gc_plan_.clear();
     switch (collector_type_) {
       case kCollectorTypeCC: {
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           gc_plan_.push_back(collector::kGcTypeSticky);
         }
         gc_plan_.push_back(collector::kGcTypeFull);
@@ -2739,7 +2745,7 @@
         collector = semi_space_collector_;
         break;
       case kCollectorTypeCC:
-        if (kEnableGenerationalConcurrentCopyingCollection) {
+        if (use_generational_cc_) {
           // TODO: Other threads must do the flip checkpoint before they start poking at
           // active_concurrent_copying_collector_. So we should not concurrency here.
           active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
@@ -3637,19 +3643,21 @@
     collector::GcType non_sticky_gc_type = NonStickyGcType();
     // Find what the next non sticky collector will be.
     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       if (non_sticky_collector == nullptr) {
         non_sticky_collector = FindCollectorByGcType(collector::kGcTypePartial);
       }
       CHECK(non_sticky_collector != nullptr);
     }
+    double sticky_gc_throughput_adjustment = GetStickyGcThroughputAdjustment(use_generational_cc_);
+
     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
     // do another sticky collection next.
     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
     // if the sticky GC throughput always remained >= the full/partial throughput.
     size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
-    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
+    if (current_gc_iteration_.GetEstimatedThroughput() * sticky_gc_throughput_adjustment >=
         non_sticky_collector->GetEstimatedMeanThroughput() &&
         non_sticky_collector->NumberOfIterations() > 0 &&
         bytes_allocated <= target_footprint) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 52c9386..4c5d896 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -212,6 +212,7 @@
        bool gc_stress_mode,
        bool measure_gc_performance,
        bool use_homogeneous_space_compaction,
+       bool use_generational_cc,
        uint64_t min_interval_homogeneous_space_compaction_by_oom,
        bool dump_region_info_before_gc,
        bool dump_region_info_after_gc);
@@ -532,6 +533,10 @@
     return num_bytes_allocated_.load(std::memory_order_relaxed);
   }
 
+  bool GetUseGenerationalCC() const {
+    return use_generational_cc_;
+  }
+
   // Returns the number of objects currently allocated.
   size_t GetObjectsAllocated() const
       REQUIRES(!Locks::heap_bitmap_lock_);
@@ -768,7 +773,7 @@
 
   // Returns the active concurrent copying collector.
   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
+    if (use_generational_cc_) {
       DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
              (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
     } else {
@@ -1477,6 +1482,11 @@
   // Whether or not we use homogeneous space compaction to avoid OOM errors.
   bool use_homogeneous_space_compaction_for_oom_;
 
+  // If true, enable generational collection when using the Concurrent Copying
+  // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+  // for major collections. Set in Heap constructor.
+  const bool use_generational_cc_;
+
   // True if the currently running collection has made some thread wait.
   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
   // The number of blocking GC runs.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a5ba1dc..5179702 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -93,11 +93,12 @@
   return mem_map;
 }
 
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
-  return new RegionSpace(name, std::move(mem_map));
+RegionSpace* RegionSpace::Create(
+    const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
+  return new RegionSpace(name, std::move(mem_map), use_generational_cc);
 }
 
-RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
     : ContinuousMemMapAllocSpace(name,
                                  std::move(mem_map),
                                  mem_map.Begin(),
@@ -105,6 +106,7 @@
                                  mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
       region_lock_("Region lock", kRegionSpaceRegionLock),
+      use_generational_cc_(use_generational_cc),
       time_(1U),
       num_regions_(mem_map_.Size() / kRegionSize),
       num_non_free_regions_(0U),
@@ -179,9 +181,44 @@
   return num_regions * kRegionSize;
 }
 
+void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
+  // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
+  DCHECK(GetUseGenerationalCC() || clear_live_bytes);
+  DCHECK(!IsFree() && IsInToSpace());
+  type_ = RegionType::kRegionTypeUnevacFromSpace;
+  if (IsNewlyAllocated()) {
+    // A newly allocated region set as unevac from-space must be
+    // a large or large tail region.
+    DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
+    // Always clear the live bytes of a newly allocated (large or
+    // large tail) region.
+    clear_live_bytes = true;
+    // Clear the "newly allocated" status here, as we do not want the
+    // GC to see it when encountering (and processing) references in the
+    // from-space.
+    //
+    // Invariant: There should be no newly-allocated region in the
+    // from-space (when the from-space exists, which is between the calls
+    // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+    is_newly_allocated_ = false;
+  }
+  if (clear_live_bytes) {
+    // Reset the live bytes, as we have made a non-evacuation
+    // decision (possibly based on the percentage of live bytes).
+    live_bytes_ = 0;
+  }
+}
+
+bool RegionSpace::Region::GetUseGenerationalCC() {
+  // We are retrieving the info from Heap, instead of the cached version in
+  // RegionSpace, because accessing the Heap from a Region object is easier
+  // than accessing the RegionSpace.
+  return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
+}
+
 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
   // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || (evac_mode != kEvacModeNewlyAllocated));
+  DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
   // The region should be evacuated if:
   // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
@@ -253,7 +290,7 @@
 
 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
   // This method is only used when Generational CC collection is enabled.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+  DCHECK(use_generational_cc_);
 
   // This code uses a logic similar to the one used in RegionSpace::FreeLarge
   // to traverse the regions supporting `obj`.
@@ -292,7 +329,7 @@
                                EvacMode evac_mode,
                                bool clear_live_bytes) {
   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-  DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
+  DCHECK(use_generational_cc_ || clear_live_bytes);
   ++time_;
   if (kUseTableLookupReadBarrier) {
     DCHECK(rb_table->IsAllCleared());
@@ -336,9 +373,7 @@
           // mark-bit otherwise the live_bytes will not be updated in
           // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
           // logic.
-          if (kEnableGenerationalConcurrentCopyingCollection
-              && !should_evacuate
-              && is_newly_allocated) {
+          if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
             GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
           }
           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
@@ -506,7 +541,7 @@
         // bitmap. But they cannot do so before we know the next GC cycle will
         // be a major one, so this operation happens at the beginning of such a
         // major collection, before marking starts.
-        if (!kEnableGenerationalConcurrentCopyingCollection) {
+        if (!use_generational_cc_) {
           GetLiveBitmap()->ClearRange(
               reinterpret_cast<mirror::Object*>(r->Begin()),
               reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
@@ -520,8 +555,7 @@
         // `r` when it has an undefined live bytes count (i.e. when
         // `r->LiveBytes() == static_cast<size_t>(-1)`) with
         // Generational CC.
-        if (!kEnableGenerationalConcurrentCopyingCollection ||
-            (r->LiveBytes() != static_cast<size_t>(-1))) {
+        if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
           // Only some allocated bytes are live in this unevac region.
           // This should only happen for an allocated non-large region.
           DCHECK(r->IsAllocated()) << r->State();
@@ -918,7 +952,7 @@
     Region* r = &regions_[region_index];
     if (r->IsFree()) {
       r->Unfree(this, time_);
-      if (kEnableGenerationalConcurrentCopyingCollection) {
+      if (use_generational_cc_) {
         // TODO: Add an explanation for this assertion.
         DCHECK(!for_evac || !r->is_newly_allocated_);
       }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index a6f501b..d8b54e2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -59,7 +59,7 @@
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
   static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
+  static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   // Allocate `num_bytes`, returns null if the space is full.
   mirror::Object* Alloc(Thread* self,
@@ -368,7 +368,7 @@
   }
 
  private:
-  RegionSpace(const std::string& name, MemMap&& mem_map);
+  RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
   class Region {
    public:
@@ -523,33 +523,7 @@
     // collection, RegionSpace::ClearFromSpace will preserve the space
     // used by this region, and tag it as to-space (see
     // Region::SetUnevacFromSpaceAsToSpace below).
-    void SetAsUnevacFromSpace(bool clear_live_bytes) {
-      // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
-      DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
-      DCHECK(!IsFree() && IsInToSpace());
-      type_ = RegionType::kRegionTypeUnevacFromSpace;
-      if (IsNewlyAllocated()) {
-        // A newly allocated region set as unevac from-space must be
-        // a large or large tail region.
-        DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
-        // Always clear the live bytes of a newly allocated (large or
-        // large tail) region.
-        clear_live_bytes = true;
-        // Clear the "newly allocated" status here, as we do not want the
-        // GC to see it when encountering (and processing) references in the
-        // from-space.
-        //
-        // Invariant: There should be no newly-allocated region in the
-        // from-space (when the from-space exists, which is between the calls
-        // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
-        is_newly_allocated_ = false;
-      }
-      if (clear_live_bytes) {
-        // Reset the live bytes, as we have made a non-evacuation
-        // decision (possibly based on the percentage of live bytes).
-        live_bytes_ = 0;
-      }
-    }
+    void SetAsUnevacFromSpace(bool clear_live_bytes);
 
     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
     // This is only valid if it is currently an unevac from-space region.
@@ -562,7 +536,7 @@
     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
 
     void AddLiveBytes(size_t live_bytes) {
-      DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
+      DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace());
       DCHECK(!IsLargeTail());
       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
       // For large allocations, we always consider all bytes in the regions live.
@@ -616,6 +590,8 @@
     uint64_t GetLongestConsecutiveFreeBytes() const;
 
    private:
+    static bool GetUseGenerationalCC();
+
     size_t idx_;                        // The region's index in the region space.
     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
     uint8_t* begin_;                    // The begin address of the region.
@@ -738,6 +714,8 @@
 
   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  // Cached version of Heap::use_generational_cc_.
+  const bool use_generational_cc_;
   uint32_t time_;                  // The time as the number of collections since the startup.
   size_t num_regions_;             // The number of regions in this space.
   // The number of non-free regions in this space.
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4a04259..6fd691f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -703,6 +703,7 @@
   UsageMessage(stream, "  -Xgc:[no]postsweepingverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]postverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]presweepingverify\n");
+  UsageMessage(stream, "  -Xgc:[no]generational_cc\n");
   UsageMessage(stream, "  -Ximage:filename\n");
   UsageMessage(stream, "  -Xbootclasspath-locations:bootclasspath\n"
                        "     (override the dex locations of the -Xbootclasspath files)\n");
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index cbb7b82..77d2316 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -130,6 +130,23 @@
   EXPECT_EQ(gc::kCollectorTypeSS, xgc.collector_type_);
 }
 
+TEST_F(ParsedOptionsTest, ParsedOptionsGenerationalCC) {
+  RuntimeOptions options;
+  options.push_back(std::make_pair("-Xgc:generational_cc", nullptr));
+
+  RuntimeArgumentMap map;
+  bool parsed = ParsedOptions::Parse(options, false, &map);
+  ASSERT_TRUE(parsed);
+  ASSERT_NE(0u, map.Size());
+
+  using Opt = RuntimeArgumentMap;
+
+  EXPECT_TRUE(map.Exists(Opt::GcOption));
+
+  XGcOption xgc = map.GetOrDefault(Opt::GcOption);
+  ASSERT_TRUE(xgc.generational_cc);
+}
+
 TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) {
   using Opt = RuntimeArgumentMap;
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4853187..a86bc94 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1240,6 +1240,10 @@
             kExtraDefaultHeapGrowthMultiplier;
   }
   XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
+
+  // Generational CC collection is currently only compatible with Baker read barriers.
+  bool use_generational_cc = kUseBakerReadBarrier && xgc_option.generational_cc;
+
   heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
                        runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
                        runtime_options.GetOrDefault(Opt::HeapMinFree),
@@ -1274,6 +1278,7 @@
                        xgc_option.gcstress_,
                        xgc_option.measure_,
                        runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
+                       use_generational_cc,
                        runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs),
                        runtime_options.Exists(Opt::DumpRegionInfoBeforeGC),
                        runtime_options.Exists(Opt::DumpRegionInfoAfterGC));
diff --git a/runtime/runtime_globals.h b/runtime/runtime_globals.h
index 793291a..81d350b 100644
--- a/runtime/runtime_globals.h
+++ b/runtime/runtime_globals.h
@@ -40,16 +40,24 @@
 static constexpr bool kMarkCompactSupport = false && kMovingCollector;
 // True if we allow moving classes.
 static constexpr bool kMovingClasses = !kMarkCompactSupport;
-// If true, enable generational collection when using the Concurrent Copying
-// (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
-// for major collections.
+// When using the Concurrent Copying (CC) collector, if
+// `ART_USE_GENERATIONAL_CC` is true, enable generational collection by default,
+// i.e. use sticky-bit CC for minor collections and (full) CC for major
+// collections.
+// This default value can be overridden with the runtime option
+// `-Xgc:[no]generational_cc`.
 //
-// Generational CC collection is currently only compatible with Baker read
-// barriers.
-#if defined(ART_USE_GENERATIONAL_CC) && defined(ART_READ_BARRIER_TYPE_IS_BAKER)
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true;
+// TODO(b/67628039): Consider either:
+// - renaming this to a better descriptive name (e.g.
+//   `ART_USE_GENERATIONAL_CC_BY_DEFAULT`); or
+// - removing `ART_USE_GENERATIONAL_CC` and having a fixed default value.
+// Any of these changes will require adjusting users of this preprocessor
+// directive and the corresponding build system environment variable (e.g. in
+// ART's continuous testing).
+#ifdef ART_USE_GENERATIONAL_CC
+static constexpr bool kEnableGenerationalCCByDefault = true;
 #else
-static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false;
+static constexpr bool kEnableGenerationalCCByDefault = false;
 #endif
 
 // If true, enable the tlab allocator by default.
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index 8ff6c52..24864f9 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -21,7 +21,7 @@
 #include <sys/resource.h>
 #include <sys/time.h>
 
-#include <cutils/sched_policy.h>
+#include <processgroup/sched_policy.h>
 #include <utils/threads.h>
 
 #include "base/macros.h"
diff --git a/test/202-thread-oome/src/Main.java b/test/202-thread-oome/src/Main.java
index f7df93b..b5c0ce6 100644
--- a/test/202-thread-oome/src/Main.java
+++ b/test/202-thread-oome/src/Main.java
@@ -21,7 +21,7 @@
       t.start();
     } catch (OutOfMemoryError expected) {
       // TODO: fix bionic bug https://b/6702535 so we can check the full detail message.
-      if (!expected.getMessage().startsWith("pthread_create (3GB stack) failed: ")) {
+      if (!expected.getMessage().startsWith("pthread_create (3073MB stack) failed: ")) {
         throw new AssertionError(expected);
       }
     }
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 8dfb402..3dc2789 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -82,6 +82,14 @@
   return int_index;
 }
 
+extern "C" JNIEXPORT void JNICALL Java_Main_setWhitelistAll(JNIEnv*, jclass, jboolean value) {
+  std::vector<std::string> exemptions;
+  if (value != JNI_FALSE) {
+    exemptions.push_back("L");
+  }
+  Runtime::Current()->SetHiddenApiExemptions(exemptions);
+}
+
 static jobject NewInstance(JNIEnv* env, jclass klass) {
   jmethodID constructor = env->GetMethodID(klass, "<init>", "()V");
   if (constructor == nullptr) {
diff --git a/test/674-hiddenapi/src-art/Main.java b/test/674-hiddenapi/src-art/Main.java
index 190f4ac..d6a8c6d 100644
--- a/test/674-hiddenapi/src-art/Main.java
+++ b/test/674-hiddenapi/src-art/Main.java
@@ -119,9 +119,8 @@
     // loaded by their parent class loader.
     String nativeLibCopy = createNativeLibCopy(parentDomain, childDomain, whitelistAllApis);
 
-    if (whitelistAllApis) {
-      VMRuntime.getRuntime().setHiddenApiExemptions(new String[]{"L"});
-    }
+    // Set exemptions to "L" (matches all classes) if we are testing whitelisting.
+    setWhitelistAll(whitelistAllApis);
 
     // Invoke ChildClass.runTest
     Class<?> childClass = Class.forName("ChildClass", true, childLoader);
@@ -129,8 +128,6 @@
         "runTest", String.class, Integer.TYPE, Integer.TYPE, Boolean.TYPE);
     runTestMethod.invoke(null, nativeLibCopy, parentDomain.ordinal(), childDomain.ordinal(),
         whitelistAllApis);
-
-    VMRuntime.getRuntime().setHiddenApiExemptions(new String[0]);
   }
 
   // Routine which tries to figure out the absolute path of our native library.
@@ -203,4 +200,5 @@
   private static native int appendToBootClassLoader(String dexPath, boolean isCorePlatform);
   private static native void setDexDomain(int index, boolean isCorePlatform);
   private static native void init();
+  private static native void setWhitelistAll(boolean value);
 }
diff --git a/tools/run-gtests.sh b/tools/run-gtests.sh
index bf29023..8585589 100755
--- a/tools/run-gtests.sh
+++ b/tools/run-gtests.sh
@@ -35,7 +35,7 @@
   ${ADB} shell "chroot $ART_TEST_CHROOT env LD_LIBRARY_PATH= ANDROID_ROOT='/system' ANDROID_RUNTIME_ROOT=/system $i" || fail $i
 done
 
-if [ -n $failing_tests ]; then
+if [ -n "$failing_tests" ]; then
   for i in "${failing_tests[@]}"; do
     echo "Failed test: $i"
   done