Merge "Add apache-xml to boot class path for tests."
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index f2e12f6..0ec0a15 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -19,6 +19,11 @@
     "libopenjdkjvmti",
     "libadbconnection",
 ]
+bionic_native_shared_libs = [
+    "libc",
+    "libm",
+    "libdl",
+]
 // - Fake library that avoids namespace issues and gives some warnings for nosy apps.
 art_runtime_fake_native_shared_libs = [
      // FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk,
@@ -102,7 +107,8 @@
     compile_multilib: "both",
     manifest: "manifest.json",
     native_shared_libs: art_runtime_base_native_shared_libs
-        + art_runtime_fake_native_shared_libs,
+        + art_runtime_fake_native_shared_libs
+        + bionic_native_shared_libs,
     multilib: {
         both: {
             // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
@@ -130,7 +136,8 @@
     manifest: "manifest.json",
     native_shared_libs: art_runtime_base_native_shared_libs
         + art_runtime_fake_native_shared_libs
-        + art_runtime_debug_native_shared_libs,
+        + art_runtime_debug_native_shared_libs
+        + bionic_native_shared_libs,
     multilib: {
         both: {
             // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index 924c44b..b5e8d8b 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -33,11 +33,7 @@
 
    sudo apt-get install libguestfs-tools
 "
-which tree > /dev/null || die "This script requires the 'tree' tool.
-On Debian-based systems, this can be installed with:
 
-   sudo apt-get install tree
-"
 [[ -n "$ANDROID_PRODUCT_OUT" ]] \
   || die "You need to source and lunch before you can use this script."
 
@@ -46,6 +42,7 @@
 
 build_apex_p=true
 list_image_files_p=false
+print_image_tree_p=false
 
 function usage {
   cat <<EOF
@@ -53,7 +50,8 @@
 Build (optional) and run tests on Android Runtime APEX package (on host).
 
   -s, --skip-build    skip the build step
-  -l, --list-files    list the contents of the ext4 image
+  -l, --list-files    list the contents of the ext4 image using `find`
+  -t, --print-tree    list the contents of the ext4 image using `tree`
   -h, --help          display this help and exit
 
 EOF
@@ -64,6 +62,7 @@
   case "$1" in
     (-s|--skip-build) build_apex_p=false;;
     (-l|--list-files) list_image_files_p=true;;
+    (-t|--print-tree) print_image_tree_p=true;;
     (-h|--help) usage;;
     (*) die "Unknown option: '$1'
 Try '$0 --help' for more information.";;
@@ -71,6 +70,14 @@
   shift
 done
 
+if $print_image_tree_p; then
+  which tree >/dev/null || die "This script requires the 'tree' tool.
+On Debian-based systems, this can be installed with:
+
+   sudo apt-get install tree
+"
+fi
+
 
 # build_apex APEX_MODULE
 # ----------------------
@@ -82,6 +89,24 @@
   fi
 }
 
+# maybe_list_apex_contents MOUNT_POINT
+# ------------------------------------
+# If any listing/printing option was used, honor them and display the contents
+# of the APEX payload at MOUNT_POINT.
+function maybe_list_apex_contents {
+  local mount_point=$1
+
+  # List the contents of the mounted image using `find` (optional).
+  if $list_image_files_p; then
+    say "Listing image files" && find "$mount_point"
+  fi
+
+  # List the contents of the mounted image using `tree` (optional).
+  if $print_image_tree_p; then
+    say "Printing image tree" && ls -ld "$mount_point" && tree -aph --du "$mount_point"
+  fi
+}
+
 function check_binary {
   [[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image"
 }
@@ -218,10 +243,6 @@
 
   # Mount the image from the Android Runtime APEX.
   guestmount -a "$image_file" -m "$partition" "$mount_point"
-
-  # List the contents of the mounted image (optional).
-  $list_image_files_p \
-    && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
 }
 
 # Testing release APEX package (com.android.runtime.release).
@@ -229,6 +250,8 @@
 
 apex_module="com.android.runtime.release"
 
+say "Processing APEX package $apex_module"
+
 work_dir=$(mktemp -d)
 mount_point="$work_dir/image"
 
@@ -240,6 +263,9 @@
 # Set up APEX package.
 setup_target_apex "$apex_module" "$mount_point"
 
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
 # Run tests on APEX package.
 say "Checking APEX package $apex_module"
 check_release_contents
@@ -249,12 +275,15 @@
 cleanup_target
 
 say "$apex_module tests passed"
+echo
 
 # Testing debug APEX package (com.android.runtime.debug).
 # -------------------------------------------------------
 
 apex_module="com.android.runtime.debug"
 
+say "Processing APEX package $apex_module"
+
 work_dir=$(mktemp -d)
 mount_point="$work_dir/image"
 
@@ -266,6 +295,9 @@
 # Set up APEX package.
 setup_target_apex "$apex_module" "$mount_point"
 
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
 # Run tests on APEX package.
 say "Checking APEX package $apex_module"
 check_release_contents
@@ -279,6 +311,7 @@
 cleanup_target
 
 say "$apex_module tests passed"
+echo
 
 
 # Testing host APEX package (com.android.runtime.host).
@@ -319,6 +352,8 @@
 
 apex_module="com.android.runtime.host"
 
+say "Processing APEX package $apex_module"
+
 work_dir=$(mktemp -d)
 mount_point="$work_dir/zip"
 
@@ -330,6 +365,9 @@
 # Set up APEX package.
 setup_host_apex "$apex_module" "$mount_point"
 
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
 # Run tests on APEX package.
 say "Checking APEX package $apex_module"
 check_release_contents
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 18f7105..0039be0 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1742,6 +1742,9 @@
     if (&cls->GetDexFile() == &accessor.GetDexFile()) {
       ObjectLock<mirror::Class> lock(self, cls);
       mirror::Class::SetStatus(cls, status, self);
+      if (status >= ClassStatus::kVerified) {
+        cls->SetVerificationAttempted();
+      }
     }
   } else {
     DCHECK(self->IsExceptionPending());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f6a3e32..3b92e2c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -212,6 +212,22 @@
   self->AssertPendingException();
 }
 
+// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+// before.
+template <bool kNeedsVerified = false>
+static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kNeedsVerified) {
+    // To not fail access-flags access checks, push a minimal state.
+    mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current());
+  }
+  if (!klass->WasVerificationAttempted()) {
+    klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
+    klass->SetVerificationAttempted();
+  }
+}
+
 void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def) {
   // The class failed to initialize on a previous attempt, so we want to throw
   // a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
@@ -3950,6 +3966,7 @@
   h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
   h_class->SetPrimitiveType(type);
   h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
+  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_);
   mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self);
   const char* descriptor = Primitive::Descriptor(type);
   ObjPtr<mirror::Class> existing = InsertClass(descriptor,
@@ -4097,6 +4114,7 @@
   new_class->PopulateEmbeddedVTable(image_pointer_size_);
   ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
   new_class->SetImt(object_imt, image_pointer_size_);
+  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_);
   mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self);
   // don't need to set new_class->SetObjectSize(..)
   // because Object::SizeOf delegates to Array::SizeOf
@@ -4127,6 +4145,8 @@
   // and remove "interface".
   access_flags |= kAccAbstract | kAccFinal;
   access_flags &= ~kAccInterface;
+  // Arrays are access-checks-clean and preverified.
+  access_flags |= kAccVerificationAttempted;
 
   new_class->SetAccessFlags(access_flags);
 
@@ -4361,17 +4381,6 @@
   return false;
 }
 
-// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
-// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
-// before.
-static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (!klass->WasVerificationAttempted()) {
-    klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
-    klass->SetVerificationAttempted();
-  }
-}
-
 verifier::FailureKind ClassLinker::VerifyClass(
     Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
   {
@@ -4848,6 +4857,7 @@
   {
     // Lock on klass is released. Lock new class object.
     ObjectLock<mirror::Class> initialization_lock(self, klass);
+    EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
     mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
   }
 
@@ -5598,8 +5608,7 @@
   DCHECK(c != nullptr);
 
   if (c->IsInitialized()) {
-    EnsureSkipAccessChecksMethods(c, image_pointer_size_);
-    self->AssertNoPendingException();
+    DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader();
     return true;
   }
   // SubtypeCheckInfo::Initialized must happen-before any new-instance for that type.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index fe45b9e..061c788 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -114,7 +114,8 @@
     EXPECT_EQ(0, primitive->GetIfTableCount());
     EXPECT_TRUE(primitive->GetIfTable() != nullptr);
     EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
-    EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
+    EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted,
+              primitive->GetAccessFlags());
   }
 
   void AssertObjectClass(ObjPtr<mirror::Class> JavaLangObject)
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3160422..1014c0e 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -37,14 +37,15 @@
     mirror::Object* ref,
     accounting::ContinuousSpaceBitmap* bitmap) {
   if (kEnableGenerationalConcurrentCopyingCollection
-      && young_gen_
       && !done_scanning_.load(std::memory_order_acquire)) {
-    // Everything in the unevac space should be marked for generational CC except for large objects.
-    DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+    // Everything in the unevac space should be marked for young generation CC,
+    // except for large objects.
+    DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
+        << ref << " "
         << ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
-    // Since the mark bitmap is still filled in from last GC, we can not use that or else the
-    // mutator may see references to the from space. Instead, use the baker pointer itself as
-    // the mark bit.
+    // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
+    // we can not use that or else the mutator may see references to the from space. Instead, use
+    // the baker pointer itself as the mark bit.
     if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
       // TODO: We don't actually need to scan this object later, we just need to clear the gray
       // bit.
@@ -244,7 +245,7 @@
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+  } else if (!kEnableGenerationalConcurrentCopyingCollection
              || done_scanning_.load(std::memory_order_acquire)) {
     // If the card table scanning is not finished yet, then only read-barrier
     // state should be checked. Checking the mark bitmap is unreliable as there
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 53aa9ba..861f0d3 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -188,6 +188,11 @@
   {
     ReaderMutexLock mu(self, *Locks::mutator_lock_);
     InitializePhase();
+    // In case of forced evacuation, all regions are evacuated and hence no
+    // need to compute live_bytes.
+    if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+      MarkingPhase();
+    }
   }
   if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
     // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
@@ -201,7 +206,7 @@
   FlipThreadRoots();
   {
     ReaderMutexLock mu(self, *Locks::mutator_lock_);
-    MarkingPhase();
+    CopyingPhase();
   }
   // Verify no from space refs. This causes a pause.
   if (kEnableNoFromSpaceRefsVerification) {
@@ -299,12 +304,22 @@
           DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
           space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
         }
-        // Age all of the cards for the region space so that we know which evac regions to scan.
-        Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
-            space->Begin(),
-            space->End(),
-            AgeCardVisitor(),
-            VoidFunctor());
+        if (young_gen_) {
+          // Age all of the cards for the region space so that we know which evac regions to scan.
+          heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
+                                                   space->End(),
+                                                   AgeCardVisitor(),
+                                                   VoidFunctor());
+        } else {
+          // In a full-heap GC cycle, the card-table corresponding to region-space and
+          // non-moving space can be cleared, because this cycle only needs to
+          // capture writes during the marking phase of this cycle to catch
+          // objects that skipped marking due to heap mutation. Furthermore,
+          // if the next GC is a young-gen cycle, then it only needs writes to
+          // be captured after the thread-flip of this GC cycle, as that is when
+          // the young-gen for the next GC cycle starts getting populated.
+          heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+        }
       } else {
         if (space == region_space_) {
           // It is OK to clear the bitmap with mutators running since the only place it is read is
@@ -381,6 +396,7 @@
   if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
     region_space_bitmap_->Clear();
   }
+  mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
   // Mark all of the zygote large objects without graying them.
   MarkZygoteLargeObjects();
 }
@@ -471,7 +487,7 @@
     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
     // Note: self is not necessarily equal to thread since thread may be suspended.
     Thread* self = Thread::Current();
-    if (kVerifyNoMissingCardMarks) {
+    if (kVerifyNoMissingCardMarks && cc->young_gen_) {
       cc->VerifyNoMissingCardMarks();
     }
     CHECK_EQ(thread, self);
@@ -485,9 +501,11 @@
     }
     {
       TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
-      // Only change live bytes for full CC.
+      // Only change live bytes for 1-phase full heap CC.
       cc->region_space_->SetFromSpace(
-          cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
+          cc->rb_table_,
+          evac_mode,
+          /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
     }
     cc->SwapStacks();
     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -496,9 +514,7 @@
       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
     }
     cc->is_marking_ = true;
-    cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
-                               std::memory_order_relaxed);
-    if (kIsDebugBuild && !cc->young_gen_) {
+    if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
     }
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -860,6 +876,460 @@
   ConcurrentCopying* const collector_;
 };
 
+template <bool kAtomicTestAndSet>
+class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
+ public:
+  explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
+      : collector_(cc), self_(self) {}
+
+  void VisitRoots(mirror::Object*** roots,
+                  size_t count,
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (size_t i = 0; i < count; ++i) {
+      mirror::Object** root = roots[i];
+      mirror::Object* ref = *root;
+      if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+        collector_->PushOntoMarkStack(self_, ref);
+      }
+    }
+  }
+
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
+                  const RootInfo& info ATTRIBUTE_UNUSED) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (size_t i = 0; i < count; ++i) {
+      mirror::CompressedReference<mirror::Object>* const root = roots[i];
+      if (!root->IsNull()) {
+        mirror::Object* ref = root->AsMirrorPtr();
+        if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+          collector_->PushOntoMarkStack(self_, ref);
+        }
+      }
+    }
+  }
+
+ private:
+  ConcurrentCopying* const collector_;
+  Thread* const self_;
+};
+
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
+ public:
+  RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
+                                       bool disable_weak_ref_access)
+      : concurrent_copying_(concurrent_copying),
+        disable_weak_ref_access_(disable_weak_ref_access) {
+  }
+
+  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+    // Note: self is not necessarily equal to thread since thread may be suspended.
+    Thread* const self = Thread::Current();
+    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+        << thread->GetState() << " thread " << thread << " self " << self;
+    // Revoke thread local mark stacks.
+    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+    if (tl_mark_stack != nullptr) {
+      MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
+      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+      thread->SetThreadLocalMarkStack(nullptr);
+    }
+    // Disable weak ref access.
+    if (disable_weak_ref_access_) {
+      thread->SetWeakRefAccessEnabled(false);
+    }
+    // If thread is a running mutator, then act on behalf of the garbage collector.
+    // See the code in ThreadList::RunCheckpoint.
+    concurrent_copying_->GetBarrier().Pass(self);
+  }
+
+ protected:
+  ConcurrentCopying* const concurrent_copying_;
+
+ private:
+  const bool disable_weak_ref_access_;
+};
+
+class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
+  public RevokeThreadLocalMarkStackCheckpoint {
+ public:
+  explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
+    RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
+
+  void Run(Thread* thread) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    Thread* const self = Thread::Current();
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
+    // only.
+    CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
+    thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+    // Barrier handling is done in the base class' Run() below.
+    RevokeThreadLocalMarkStackCheckpoint::Run(thread);
+  }
+};
+
+void ConcurrentCopying::CaptureThreadRootsForMarking() {
+  TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
+  if (kVerboseMode) {
+    LOG(INFO) << "time=" << region_space_->Time();
+    region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+  }
+  Thread* const self = Thread::Current();
+  CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+  gc_barrier_->Init(self, 0);
+  size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
+  // If there are no threads to wait which implys that all the checkpoint functions are finished,
+  // then no need to release the mutator lock.
+  if (barrier_count == 0) {
+    return;
+  }
+  Locks::mutator_lock_->SharedUnlock(self);
+  {
+    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+    gc_barrier_->Increment(self, barrier_count);
+  }
+  Locks::mutator_lock_->SharedLock(self);
+  if (kVerboseMode) {
+    LOG(INFO) << "time=" << region_space_->Time();
+    region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+    LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
+  }
+}
+
+// Used to scan ref fields of an object.
+template <bool kHandleInterRegionRefs>
+class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
+ public:
+  explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
+                                                   size_t obj_region_idx)
+      : collector_(collector),
+      obj_region_idx_(obj_region_idx),
+      contains_inter_region_idx_(false) {}
+
+  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+    DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
+    DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
+    CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
+  }
+
+  void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    // If the referent is not null, then we must re-visit the object during
+    // copying phase to enqueue it for delayed processing and setting
+    // read-barrier state to gray to ensure that call to GetReferent() triggers
+    // the read-barrier. We use same data structure that is used to remember
+    // objects with inter-region refs for this purpose too.
+    if (kHandleInterRegionRefs
+        && !contains_inter_region_idx_
+        && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
+      contains_inter_region_idx_ = true;
+    }
+  }
+
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CheckReference(root->AsMirrorPtr());
+  }
+
+  bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+    return contains_inter_region_idx_;
+  }
+
+ private:
+  void CheckReference(mirror::Object* ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (ref == nullptr) {
+      // Nothing to do.
+      return;
+    }
+    if (!collector_->TestAndSetMarkBitForRef(ref)) {
+      collector_->PushOntoLocalMarkStack(ref);
+    }
+    if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
+      size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
+      // If a region-space object refers to an outside object, we will have a
+      // mismatch of region idx, but the object need not be re-visited in
+      // copying phase.
+      if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
+        contains_inter_region_idx_ = true;
+      }
+    }
+  }
+
+  ConcurrentCopying* const collector_;
+  const size_t obj_region_idx_;
+  mutable bool contains_inter_region_idx_;
+};
+
+void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
+  DCHECK(ref != nullptr);
+  DCHECK(!immune_spaces_.ContainsObject(ref));
+  DCHECK(TestMarkBitmapForRef(ref));
+  size_t obj_region_idx = static_cast<size_t>(-1);
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
+    // Add live bytes to the corresponding region
+    if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
+      // Newly Allocated regions are always chosen for evacuation. So no need
+      // to update live_bytes_.
+      size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
+      size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+      region_space_->AddLiveBytes(ref, alloc_size);
+    }
+  }
+  ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
+      visitor(this, obj_region_idx);
+  ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+      visitor, visitor);
+  // Mark the corresponding card dirty if the object contains any
+  // inter-region reference.
+  if (visitor.ContainsInterRegionRefs()) {
+    heap_->GetCardTable()->MarkCard(ref);
+  }
+}
+
+template <bool kAtomic>
+bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
+  accounting::ContinuousSpaceBitmap* bitmap = nullptr;
+  accounting::LargeObjectBitmap* los_bitmap = nullptr;
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    bitmap = region_space_bitmap_;
+  } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+    bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+  } else if (immune_spaces_.ContainsObject(ref)) {
+    // References to immune space objects are always live.
+    DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+    return true;
+  } else {
+    // Should be a large object. Must be page aligned and the LOS must exist.
+    if (kIsDebugBuild
+        && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+      // It must be heap corruption. Remove memory protection and dump data.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                  MemberOffset(0),
+                                                  ref,
+                                                  /* fatal */ true);
+    }
+    los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+  }
+  if (kAtomic) {
+    return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
+  } else {
+    return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
+  }
+}
+
+bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
+  if (LIKELY(region_space_->HasAddress(ref))) {
+    return region_space_bitmap_->Test(ref);
+  } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+    return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
+  } else if (immune_spaces_.ContainsObject(ref)) {
+    // References to immune space objects are always live.
+    DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+    return true;
+  } else {
+    // Should be a large object. Must be page aligned and the LOS must exist.
+    if (kIsDebugBuild
+        && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+      // It must be heap corruption. Remove memory protection and dump data.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                  MemberOffset(0),
+                                                  ref,
+                                                  /* fatal */ true);
+    }
+    return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
+  }
+}
+
+void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
+  if (kIsDebugBuild) {
+    Thread *self = Thread::Current();
+    DCHECK_EQ(thread_running_gc_, self);
+    DCHECK(self->GetThreadLocalMarkStack() == nullptr);
+  }
+  DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
+  gc_mark_stack_->PushBack(ref);
+}
+
+void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
+  // Process thread-local mark stack containing thread roots
+  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
+                               /* checkpoint_callback */ nullptr,
+                               [this] (mirror::Object* ref)
+                                   REQUIRES_SHARED(Locks::mutator_lock_) {
+                                 AddLiveBytesAndScanRef(ref);
+                               });
+
+  while (!gc_mark_stack_->IsEmpty()) {
+    mirror::Object* ref = gc_mark_stack_->PopBack();
+    AddLiveBytesAndScanRef(ref);
+  }
+}
+
+class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
+ public:
+  explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
+
+  ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
+        visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
+    obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+        visitor, visitor);
+  }
+
+  static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+  }
+
+ private:
+  ConcurrentCopying* const collector_;
+};
+
+/* Invariants for two-phase CC
+ * ===========================
+ * A) Definitions
+ * ---------------
+ * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
+ * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
+ * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
+ * 4) Gray: marked in bitmap, and exists in mark stack
+ * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
+ *    dirty, and exists in mark stack
+ * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
+ *
+ * B) Before marking phase
+ * -----------------------
+ * 1) All objects are white
+ * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
+ * 3) Mark bitmap is cleared
+ * 4) Mark stack is empty
+ *
+ * C) During marking phase
+ * ------------------------
+ * 1) If a black object holds an inter-region or white reference, then its
+ *    corresponding card is dirty. In other words, it changes from being
+ *    black-clean to black-dirty
+ * 2) No black-clean object points to a white object
+ *
+ * D) After marking phase
+ * -----------------------
+ * 1) There are no gray objects
+ * 2) All newly allocated objects are in from space
+ * 3) No white object can be reachable, directly or otherwise, from a
+ *    black-clean object
+ *
+ * E) During copying phase
+ * ------------------------
+ * 1) Mutators cannot observe white and black-dirty objects
+ * 2) New allocations are in to-space (newly allocated regions are part of to-space)
+ * 3) An object in mark stack must have its rb_state = Gray
+ *
+ * F) During card table scan
+ * --------------------------
+ * 1) Referents corresponding to root references are gray or in to-space
+ * 2) Every path from an object that is read or written by a mutator during
+ *    this period to a dirty black object goes through some gray object.
+ *    Mutators preserve this by graying black objects as needed during this
+ *    period. Ensures that a mutator never encounters a black dirty object.
+ *
+ * G) After card table scan
+ * ------------------------
+ * 1) There are no black-dirty objects
+ * 2) Referents corresponding to root references are gray, black-clean or in
+ *    to-space
+ *
+ * H) After copying phase
+ * -----------------------
+ * 1) Mark stack is empty
+ * 2) No references into evacuated from-space
+ * 3) No reference to an object which is unmarked and is also not in newly
+ *    allocated region. In other words, no reference to white objects.
+*/
+
+void ConcurrentCopying::MarkingPhase() {
+  TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
+  if (kVerboseMode) {
+    LOG(INFO) << "GC MarkingPhase";
+  }
+  accounting::CardTable* const card_table = heap_->GetCardTable();
+  Thread* const self = Thread::Current();
+  // Clear live_bytes_ of every non-free region, except the ones that are newly
+  // allocated.
+  region_space_->SetAllRegionLiveBytesZero();
+  if (kIsDebugBuild) {
+    region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+  }
+  // Scan immune spaces
+  {
+    TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+    for (auto& space : immune_spaces_.GetSpaces()) {
+      DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      ImmuneSpaceCaptureRefsVisitor visitor(this);
+      if (table != nullptr) {
+        table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
+      } else {
+        WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+        card_table->Scan<false>(
+            live_bitmap,
+            space->Begin(),
+            space->Limit(),
+            visitor,
+            accounting::CardTable::kCardDirty - 1);
+      }
+    }
+  }
+  // Scan runtime roots
+  {
+    TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+    CaptureRootsForMarkingVisitor visitor(this, self);
+    Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
+  }
+  {
+    // TODO: don't visit the transaction roots if it's not active.
+    TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
+    CaptureRootsForMarkingVisitor visitor(this, self);
+    Runtime::Current()->VisitNonThreadRoots(&visitor);
+  }
+  // Capture thread roots
+  CaptureThreadRootsForMarking();
+  // Process mark stack
+  ProcessMarkStackForMarkingAndComputeLiveBytes();
+
+  // Age the cards.
+  for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsImageSpace() || space->IsZygoteSpace()) {
+      // Image and zygote spaces are already handled since we gray the objects in the pause.
+      continue;
+    }
+    card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+  }
+
+  if (kVerboseMode) {
+    LOG(INFO) << "GC end of MarkingPhase";
+  }
+}
+
 template <bool kNoUnEvac>
 void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
   Scan<kNoUnEvac>(obj);
@@ -876,12 +1346,13 @@
 }
 
 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
-void ConcurrentCopying::MarkingPhase() {
-  TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
+void ConcurrentCopying::CopyingPhase() {
+  TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
   if (kVerboseMode) {
-    LOG(INFO) << "GC MarkingPhase";
+    LOG(INFO) << "GC CopyingPhase";
   }
   Thread* self = Thread::Current();
+  accounting::CardTable* const card_table = heap_->GetCardTable();
   if (kIsDebugBuild) {
     MutexLock mu(self, *Locks::thread_list_lock_);
     CHECK(weak_ref_access_enabled_);
@@ -894,7 +1365,7 @@
   if (kUseBakerReadBarrier) {
     gc_grays_immune_objects_ = false;
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (kEnableGenerationalConcurrentCopyingCollection) {
     if (kVerboseMode) {
       LOG(INFO) << "GC ScanCardsForSpace";
     }
@@ -912,34 +1383,45 @@
         continue;
       }
       // Scan all of the objects on dirty cards in unevac from space, and non moving space. These
-      // are from previous GCs and may reference things in the from space.
+      // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
+      // in the from space.
       //
       // Note that we do not need to process the large-object space (the only discontinuous space)
       // as it contains only large string objects and large primitive array objects, that have no
       // reference to other objects, except their class. There is no need to scan these large
       // objects, as the String class and the primitive array classes are expected to never move
-      // during a minor (young-generation) collection:
+      // during a collection:
       // - In the case where we run with a boot image, these classes are part of the image space,
       //   which is an immune space.
       // - In the case where we run without a boot image, these classes are allocated in the
       //   non-moving space (see art::ClassLinker::InitWithoutImage).
-      Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+      card_table->Scan<false>(
           space->GetMarkBitmap(),
           space->Begin(),
           space->End(),
           [this, space](mirror::Object* obj)
               REQUIRES(Locks::heap_bitmap_lock_)
               REQUIRES_SHARED(Locks::mutator_lock_) {
-            // Don't push or gray unevac refs.
-            if (kIsDebugBuild && space == region_space_) {
-              // We may get unevac large objects.
-              if (!region_space_->IsInUnevacFromSpace(obj)) {
-                CHECK(region_space_bitmap_->Test(obj));
-                region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
-                LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+            // TODO: This code may be refactored to avoid scanning object while
+            // done_scanning_ is false by setting rb_state to gray, and pushing the
+            // object on mark stack. However, it will also require clearing the
+            // corresponding mark-bit and, for region space objects,
+            // decrementing the object's size from the corresponding region's
+            // live_bytes.
+            if (young_gen_) {
+              // Don't push or gray unevac refs.
+              if (kIsDebugBuild && space == region_space_) {
+                // We may get unevac large objects.
+                if (!region_space_->IsInUnevacFromSpace(obj)) {
+                  CHECK(region_space_bitmap_->Test(obj));
+                  region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+                  LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+                }
               }
+              ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+            } else if (space != region_space_ || region_space_->IsInUnevacFromSpace(obj)) {
+              ScanDirtyObject</*kNoUnEvac*/ false>(obj);
             }
-            ScanDirtyObject</*kNoUnEvac*/ true>(obj);
           },
           accounting::CardTable::kCardDirty - 1);
     }
@@ -962,10 +1444,13 @@
       if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
         table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
       } else {
-        // TODO: Scan only the aged cards.
-        live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
-                                      reinterpret_cast<uintptr_t>(space->Limit()),
-                                      visitor);
+        WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+        card_table->Scan<false>(
+            live_bitmap,
+            space->Begin(),
+            space->Limit(),
+            visitor,
+            accounting::CardTable::kCardDirty - 1);
       }
     }
   }
@@ -1074,7 +1559,7 @@
     CHECK(weak_ref_access_enabled_);
   }
   if (kVerboseMode) {
-    LOG(INFO) << "GC end of MarkingPhase";
+    LOG(INFO) << "GC end of CopyingPhase";
   }
 }
 
@@ -1434,40 +1919,6 @@
   ConcurrentCopying* const collector_;
 };
 
-class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
- public:
-  RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
-                                       bool disable_weak_ref_access)
-      : concurrent_copying_(concurrent_copying),
-        disable_weak_ref_access_(disable_weak_ref_access) {
-  }
-
-  void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
-    // Note: self is not necessarily equal to thread since thread may be suspended.
-    Thread* self = Thread::Current();
-    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
-        << thread->GetState() << " thread " << thread << " self " << self;
-    // Revoke thread local mark stacks.
-    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
-    if (tl_mark_stack != nullptr) {
-      MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
-      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
-      thread->SetThreadLocalMarkStack(nullptr);
-    }
-    // Disable weak ref access.
-    if (disable_weak_ref_access_) {
-      thread->SetWeakRefAccessEnabled(false);
-    }
-    // If thread is a running mutator, then act on behalf of the garbage collector.
-    // See the code in ThreadList::RunCheckpoint.
-    concurrent_copying_->GetBarrier().Pass(self);
-  }
-
- private:
-  ConcurrentCopying* const concurrent_copying_;
-  const bool disable_weak_ref_access_;
-};
-
 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
                                                     Closure* checkpoint_callback) {
   Thread* self = Thread::Current();
@@ -1525,7 +1976,11 @@
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Process the thread-local mark stacks and the GC mark stack.
     count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
-                                          /* checkpoint_callback= */ nullptr);
+                                          /* checkpoint_callback= */ nullptr,
+                                          [this] (mirror::Object* ref)
+                                              REQUIRES_SHARED(Locks::mutator_lock_) {
+                                            ProcessMarkStackRef(ref);
+                                          });
     while (!gc_mark_stack_->IsEmpty()) {
       mirror::Object* to_ref = gc_mark_stack_->PopBack();
       ProcessMarkStackRef(to_ref);
@@ -1581,8 +2036,10 @@
   return count == 0;
 }
 
+template <typename Processor>
 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
-                                                       Closure* checkpoint_callback) {
+                                                       Closure* checkpoint_callback,
+                                                       const Processor& processor) {
   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
   size_t count = 0;
@@ -1596,7 +2053,7 @@
   for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
     for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
       mirror::Object* to_ref = p->AsMirrorPtr();
-      ProcessMarkStackRef(to_ref);
+      processor(to_ref);
       ++count;
     }
     {
@@ -1647,6 +2104,12 @@
         perform_scan = true;
         // Only add to the live bytes if the object was not already marked and we are not the young
         // GC.
+        // Why add live bytes even after 2-phase GC?
+        // We need to ensure that if there is a unevac region with any live
+        // objects, then its live_bytes must be non-zero. Otherwise,
+        // ClearFromSpace() will clear the region. Considering, that we may skip
+        // live objects during marking phase of 2-phase GC, we have to take care
+        // of such objects here.
         add_to_live_bytes = true;
       }
       break;
@@ -1788,7 +2251,12 @@
   DisableWeakRefAccessCallback dwrac(this);
   // Process the thread local mark stacks one last time after switching to the shared mark stack
   // mode and disable weak ref accesses.
-  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
+  ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
+                               &dwrac,
+                               [this] (mirror::Object* ref)
+                                   REQUIRES_SHARED(Locks::mutator_lock_) {
+                                 ProcessMarkStackRef(ref);
+                               });
   if (kVerboseMode) {
     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   }
@@ -2054,7 +2522,7 @@
     uint64_t cleared_objects;
     {
       TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
-      region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+      region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
       // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
       // RegionSpace::ClearFromSpace may clear empty unevac regions.
       CHECK_GE(cleared_bytes, from_bytes);
@@ -2363,7 +2831,7 @@
   DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
-  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+  } else if (!kEnableGenerationalConcurrentCopyingCollection
              || done_scanning_.load(std::memory_order_acquire)) {
     // Read the comment in IsMarkedInUnevacFromSpace()
     accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
@@ -2954,7 +3422,7 @@
     los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
     DCHECK(los_bitmap->HasAddress(ref));
   }
-  if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+  if (kEnableGenerationalConcurrentCopyingCollection) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
     // Not done scanning, use AtomicSetReadBarrierPointer.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index e251fbc..4442ad5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -79,6 +79,8 @@
   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+  void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void FinishPhase() REQUIRES(!mark_stack_lock_,
@@ -205,7 +207,10 @@
   void VerifyNoMissingCardMarks()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
-  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
+  template <typename Processor>
+  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+                                      Closure* checkpoint_callback,
+                                      const Processor& processor)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -302,6 +307,15 @@
   // Set the read barrier mark entrypoints to non-null.
   void ActivateReadBarrierEntrypoints();
 
+  void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+  void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  template <bool kAtomic = false>
+  bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+  void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
+
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;
   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -382,7 +396,7 @@
   // Generational "sticky", only trace through dirty objects in region space.
   const bool young_gen_;
   // If true, the GC thread is done scanning marked objects on dirty and aged
-  // card (see ConcurrentCopying::MarkingPhase).
+  // card (see ConcurrentCopying::CopyingPhase).
   Atomic<bool> done_scanning_;
 
   // The skipped blocks are memory blocks/chucks that were copies of
@@ -448,6 +462,10 @@
   class VerifyNoFromSpaceRefsFieldVisitor;
   class VerifyNoFromSpaceRefsVisitor;
   class VerifyNoMissingCardMarkVisitor;
+  class ImmuneSpaceCaptureRefsVisitor;
+  template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
+  class CaptureThreadRootsForMarkingAndCheckpoint;
+  template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
 };
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5ff1270..dbec4ea 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -205,9 +205,10 @@
       continue;
     }
     if (r->IsLarge()) {
-      // Avoid visiting dead large objects since they may contain dangling pointers to the
-      // from-space.
-      DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+      // We may visit a large object with live_bytes = 0 here. However, it is
+      // safe as it cannot contain dangling pointers because corresponding regions
+      // (and regions corresponding to dead referents) cannot be allocated for new
+      // allocations without first clearing regions' live_bytes and state.
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
       DCHECK(obj->GetClass() != nullptr);
       visitor(obj);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 21cae93..98b140e 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -319,6 +319,7 @@
                 state == RegionState::kRegionStateLarge) &&
                type == RegionType::kRegionTypeToSpace);
         bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
+        bool is_newly_allocated = r->IsNewlyAllocated();
         if (should_evacuate) {
           r->SetAsFromSpace();
           DCHECK(r->IsInFromSpace());
@@ -329,6 +330,17 @@
         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
                      type == RegionType::kRegionTypeToSpace)) {
           prev_large_evacuated = should_evacuate;
+          // In 2-phase full heap GC, this function is called after marking is
+          // done. So, it is possible that some newly allocated large object is
+          // marked but its live_bytes is still -1. We need to clear the
+          // mark-bit otherwise the live_bytes will not be updated in
+          // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
+          // logic.
+          if (kEnableGenerationalConcurrentCopyingCollection
+              && !should_evacuate
+              && is_newly_allocated) {
+            GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
+          }
           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
           DCHECK_GT(num_expected_large_tails, 0U);
         }
@@ -367,7 +379,8 @@
 }
 
 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
-                                 /* out */ uint64_t* cleared_objects) {
+                                 /* out */ uint64_t* cleared_objects,
+                                 const bool clear_bitmap) {
   DCHECK(cleared_bytes != nullptr);
   DCHECK(cleared_objects != nullptr);
   *cleared_bytes = 0;
@@ -395,13 +408,18 @@
   // (see b/62194020).
   uint8_t* clear_block_begin = nullptr;
   uint8_t* clear_block_end = nullptr;
-  auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
+  auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) {
     r->Clear(/*zero_and_release_pages=*/false);
     if (clear_block_end != r->Begin()) {
       // Region `r` is not adjacent to the current clear block; zero and release
       // pages within the current block and restart a new clear block at the
       // beginning of region `r`.
       ZeroAndProtectRegion(clear_block_begin, clear_block_end);
+      if (clear_bitmap) {
+        GetLiveBitmap()->ClearRange(
+            reinterpret_cast<mirror::Object*>(clear_block_begin),
+            reinterpret_cast<mirror::Object*>(clear_block_end));
+      }
       clear_block_begin = r->Begin();
     }
     // Add region `r` to the clear block.
@@ -426,20 +444,23 @@
         // It is also better to clear these regions now instead of at the end of the next GC to
         // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
         // live percent evacuation logic.
+        *cleared_bytes += r->BytesAllocated();
+        *cleared_objects += r->ObjectsAllocated();
+        clear_region(r);
         size_t free_regions = 1;
         // Also release RAM for large tails.
         while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
-          DCHECK(r->IsLarge());
           clear_region(&regions_[i + free_regions]);
           ++free_regions;
         }
-        *cleared_bytes += r->BytesAllocated();
-        *cleared_objects += r->ObjectsAllocated();
         num_non_free_regions_ -= free_regions;
-        clear_region(r);
-        GetLiveBitmap()->ClearRange(
-            reinterpret_cast<mirror::Object*>(r->Begin()),
-            reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+        // When clear_bitmap is true, this clearing of bitmap is taken care in
+        // clear_region().
+        if (!clear_bitmap) {
+          GetLiveBitmap()->ClearRange(
+              reinterpret_cast<mirror::Object*>(r->Begin()),
+              reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+        }
         continue;
       }
       r->SetUnevacFromSpaceAsToSpace();
@@ -519,6 +540,11 @@
   }
   // Clear pages for the last block since clearing happens when a new block opens.
   ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+  if (clear_bitmap) {
+    GetLiveBitmap()->ClearRange(
+        reinterpret_cast<mirror::Object*>(clear_block_begin),
+        reinterpret_cast<mirror::Object*>(clear_block_end));
+  }
   // Update non_free_region_index_limit_.
   SetNonFreeRegionLimit(new_non_free_region_index_limit);
   evac_region_ = nullptr;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8810f8c..0d5ebcc 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -228,6 +228,11 @@
     return false;
   }
 
+  bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK_LT(idx, num_regions_);
+    return regions_[idx].IsNewlyAllocated();
+  }
+
   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
     if (HasAddress(ref)) {
       Region* r = RefToRegionUnlocked(ref);
@@ -291,7 +296,9 @@
   size_t FromSpaceSize() REQUIRES(!region_lock_);
   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
   size_t ToSpaceSize() REQUIRES(!region_lock_);
-  void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+  void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+                      /* out */ uint64_t* cleared_objects,
+                      const bool clear_bitmap)
       REQUIRES(!region_lock_);
 
   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -310,6 +317,40 @@
     }
   }
 
+  void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
+    MutexLock mu(Thread::Current(), region_lock_);
+    const size_t iter_limit = kUseTableLookupReadBarrier
+        ? num_regions_
+        : std::min(num_regions_, non_free_region_index_limit_);
+    for (size_t i = 0; i < iter_limit; ++i) {
+      Region* r = &regions_[i];
+      // Newly allocated regions don't need up-to-date live_bytes_ for deciding
+      // whether to be evacuated or not. See Region::ShouldBeEvacuated().
+      if (!r->IsFree() && !r->IsNewlyAllocated()) {
+        r->ZeroLiveBytes();
+      }
+    }
+  }
+
+  size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK(HasAddress(ref));
+    uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+    size_t reg_idx = offset / kRegionSize;
+    DCHECK_LT(reg_idx, num_regions_);
+    Region* reg = &regions_[reg_idx];
+    DCHECK_EQ(reg->Idx(), reg_idx);
+    DCHECK(reg->Contains(ref));
+    return reg_idx;
+  }
+  // Return -1 as region index for references outside this region space.
+  size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+    if (HasAddress(ref)) {
+      return RegionIdxForRefUnchecked(ref);
+    } else {
+      return static_cast<size_t>(-1);
+    }
+  }
+
   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
 
@@ -515,11 +556,10 @@
     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
 
     void AddLiveBytes(size_t live_bytes) {
-      DCHECK(IsInUnevacFromSpace());
+      DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
       DCHECK(!IsLargeTail());
       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
-      // For large allocations, we always consider all bytes in the
-      // regions live.
+      // For large allocations, we always consider all bytes in the regions live.
       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
       DCHECK_LE(live_bytes_, BytesAllocated());
     }
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d004d64..2e41a9d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -366,6 +366,13 @@
     }
   } else {
     // Enter the "with access check" interpreter.
+
+    // The boot classpath should really not have to run access checks.
+    DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr
+           || Runtime::Current()->IsVerificationSoftFail()
+           || Runtime::Current()->IsAotCompiler())
+        << method->PrettyMethod();
+
     if (kInterpreterImplKind == kMterpImplKind) {
       // No access check variants for Mterp.  Just use the switch version.
       if (transaction_active) {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 185ae3b..679ca43 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -871,6 +871,9 @@
 }
 
 inline void Class::SetAccessFlags(uint32_t new_access_flags) {
+  if (kIsDebugBuild) {
+    SetAccessFlagsDCheck(new_access_flags);
+  }
   // Called inside a transaction when setting pre-verified flag during boot image compilation.
   if (Runtime::Current()->IsActiveTransaction()) {
     SetField32<true>(AccessFlagsOffset(), new_access_flags);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 83d76a9..c5ed1bf 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -206,6 +206,10 @@
     }
   }
 
+  if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) {
+    CHECK(h_this->WasVerificationAttempted()) << h_this->PrettyClassAndClassLoader();
+  }
+
   if (!class_linker_initialized) {
     // When the class linker is being initialized its single threaded and by definition there can be
     // no waiters. During initialization classes may appear temporary but won't be retired as their
@@ -1461,5 +1465,12 @@
 template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
 template void Class::GetAccessFlagsDCheck<kVerifyAll>();
 
+void Class::SetAccessFlagsDCheck(uint32_t new_access_flags) {
+  uint32_t old_access_flags = GetField32<kVerifyNone>(AccessFlagsOffset());
+  // kAccVerificationAttempted is retained.
+  CHECK((old_access_flags & kAccVerificationAttempted) == 0 ||
+        (new_access_flags & kAccVerificationAttempted) != 0);
+}
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 66b1405..d5aa514 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1306,6 +1306,8 @@
   template<VerifyObjectFlags kVerifyFlags>
   void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void SetAccessFlagsDCheck(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
 
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index 52367c7..82c82c6 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -41,6 +41,7 @@
   if (status == ClassStatus::kResolved) {
     ObjectLock<mirror::Class> lock(soa.Self(), klass);
     klass->SetStatus(klass, ClassStatus::kVerified, soa.Self());
+    klass->SetVerificationAttempted();
   } else {
     LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status;
   }
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a723c3b..5bcd1c0 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -447,19 +447,63 @@
     },
     {
         "tests": [
-            "137-cfi",
-            "595-profile-saving",
-            "900-hello-plugin",
-            "909-attach-agent",
-            "981-dedup-original-dex",
-            "1900-track-alloc"
+            "004-ThreadStress",
+            "130-hprof",
+            "579-inline-infinite",
+            "1946-list-descriptors"
         ],
-        "description": ["Tests that require exact knowledge of the number of plugins and agents."],
+        "description": ["Too slow to finish in the timeout"],
         "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
     },
     {
         "tests": [
+            "911-get-stack-trace"
+        ],
+        "description": ["Tests that fail when run with step-stress for unknown reasons."],
+        "bug": "b/120995005",
+        "variant": "jvmti-stress | step-stress"
+    },
+    {
+        "tests": [
+            "004-SignalTest",
+            "004-StackWalk",
+            "064-field-access",
+            "083-compiler-regressions",
+            "098-ddmc",
+            "107-int-math2",
+            "129-ThreadGetId",
+            "135-MirandaDispatch",
             "132-daemon-locks-shutdown",
+            "163-app-image-methods",
+            "607-daemon-stress",
+            "674-hiddenapi",
+            "687-deopt",
+            "904-object-allocation"
+        ],
+        "description": ["Tests that sometimes fail when run with jvmti-stress for unknown reasons."],
+        "bug": "b/120995005",
+        "variant": "jvmti-stress | trace-stress | field-stress | step-stress"
+    },
+    {
+        "tests": [
+            "018-stack-overflow",
+            "137-cfi",
+            "595-profile-saving",
+            "597-deopt-busy-loop",
+            "597-deopt-new-string",
+            "660-clinit",
+            "900-hello-plugin",
+            "909-attach-agent",
+            "924-threads",
+            "981-dedup-original-dex",
+            "1900-track-alloc"
+        ],
+        "description": ["Tests that require exact knowledge of the deoptimization state, the ",
+                        "number of plugins and agents, or breaks other openjdkjvmti assumptions."],
+        "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
+    },
+    {
+        "tests": [
             "607-daemon-stress",
             "602-deoptimizeable",
             "121-simple-suspend-check",
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index e123e9f..cd7af10 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -157,14 +157,12 @@
       : jvmtienv_(jvmtienv),
         class_(c),
         name_(nullptr),
-        generic_(nullptr),
         file_(nullptr),
         debug_ext_(nullptr) {}
 
   ~ScopedClassInfo() {
     if (class_ != nullptr) {
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
-      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
       jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
     }
@@ -173,12 +171,11 @@
   bool Init() {
     if (class_ == nullptr) {
       name_ = const_cast<char*>("<NONE>");
-      generic_ = const_cast<char*>("<NONE>");
       return true;
     } else {
       jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
       jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
-      return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+      return jvmtienv_->GetClassSignature(class_, &name_, nullptr) == JVMTI_ERROR_NONE &&
           ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
           ret1 != JVMTI_ERROR_INVALID_CLASS &&
           ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
@@ -192,9 +189,6 @@
   const char* GetName() const {
     return name_;
   }
-  const char* GetGeneric() const {
-    return generic_;
-  }
   const char* GetSourceDebugExtension() const {
     if (debug_ext_ == nullptr) {
       return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
@@ -214,7 +208,6 @@
   jvmtiEnv* jvmtienv_;
   jclass class_;
   char* name_;
-  char* generic_;
   char* file_;
   char* debug_ext_;
 };
@@ -229,14 +222,12 @@
         class_info_(nullptr),
         name_(nullptr),
         signature_(nullptr),
-        generic_(nullptr),
         first_line_(-1) {}
 
   ~ScopedMethodInfo() {
     DeleteLocalRef(env_, declaring_class_);
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
-    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
   }
 
   bool Init() {
@@ -257,7 +248,7 @@
       return false;
     }
     return class_info_->Init() &&
-        (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+        (jvmtienv_->GetMethodName(method_, &name_, &signature_, nullptr) == JVMTI_ERROR_NONE);
   }
 
   const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -276,10 +267,6 @@
     return signature_;
   }
 
-  const char* GetGeneric() const {
-    return generic_;
-  }
-
   jint GetFirstLine() const {
     return first_line_;
   }
@@ -292,7 +279,6 @@
   std::unique_ptr<ScopedClassInfo> class_info_;
   char* name_;
   char* signature_;
-  char* generic_;
   jint first_line_;
 
   friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
@@ -306,20 +292,18 @@
         field_(field),
         class_info_(nullptr),
         name_(nullptr),
-        type_(nullptr),
-        generic_(nullptr) {}
+        type_(nullptr) {}
 
   ~ScopedFieldInfo() {
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
     jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_));
-    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
   }
 
   bool Init() {
     class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
     return class_info_->Init() &&
         (jvmtienv_->GetFieldName(
-            declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE);
+            declaring_class_, field_, &name_, &type_, nullptr) == JVMTI_ERROR_NONE);
   }
 
   const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -338,10 +322,6 @@
     return type_;
   }
 
-  const char* GetGeneric() const {
-    return generic_;
-  }
-
  private:
   jvmtiEnv* jvmtienv_;
   jclass declaring_class_;
@@ -349,7 +329,6 @@
   std::unique_ptr<ScopedClassInfo> class_info_;
   char* name_;
   char* type_;
-  char* generic_;
 
   friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m);
 };