Merge "Revert "Disable adding main and non moving spaces to immune region in GSS"" into lmp-dev
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index fae9271..fe5a2ef 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -176,7 +176,6 @@
 #endif
 
 #define PURE __attribute__ ((__pure__))
-#define WARN_UNUSED __attribute__((warn_unused_result))
 
 template<typename T> void UNUSED(const T&) {}
 
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b70041c..fefb907 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -209,7 +209,7 @@
         // obj will be NULL.  Otherwise, obj should always be non-NULL
         // and valid.
         if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
-          Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+          Runtime::Current()->GetHeap()->DumpSpaces();
           JniAbortF(function_name_, "field operation on invalid %s: %p",
                     ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
           return;
@@ -248,7 +248,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
     if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "field operation on invalid %s: %p",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
       return;
@@ -628,7 +628,7 @@
 
     mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
                 what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
       return false;
@@ -682,7 +682,7 @@
 
     mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
     } else if (!a->IsArrayInstance()) {
@@ -703,7 +703,7 @@
     }
     mirror::ArtField* f = soa_.DecodeField(fid);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "invalid jfieldID: %p", fid);
       return nullptr;
     }
@@ -717,7 +717,7 @@
     }
     mirror::ArtMethod* m = soa_.DecodeMethod(mid);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       JniAbortF(function_name_, "invalid jmethodID: %p", mid);
       return nullptr;
     }
@@ -738,7 +738,7 @@
 
     mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      Runtime::Current()->GetHeap()->DumpSpaces();
       // TODO: when we remove work_around_app_jni_bugs, this should be impossible.
       JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
                 ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index c17f88d..64bffc9 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -635,7 +635,7 @@
   // retire a class, the version of the class in the table is returned and this may differ from
   // the class passed in.
   mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
-      WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 2686af0..228d1dc 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -185,7 +185,7 @@
           << from_space->GetGcRetentionPolicy();
       LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
           << to_space->GetGcRetentionPolicy();
-      heap->DumpSpaces(LOG(INFO));
+      heap->DumpSpaces();
       LOG(FATAL) << "FATAL ERROR";
     }
   }
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 104ed36..974952d 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -32,7 +32,10 @@
 template<typename MarkVisitor, typename ReferenceVisitor>
 inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
                                        const ReferenceVisitor& ref_visitor) {
-  DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
+  if (kIsDebugBuild && !IsMarked(obj)) {
+    heap_->DumpSpaces();
+    LOG(FATAL) << "Scanning unmarked object " << obj;
+  }
   obj->VisitReferences<false>(visitor, ref_visitor);
   if (kCountScannedTypes) {
     mirror::Class* klass = obj->GetClass<kVerifyNone>();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 95530be..7e97b3b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -313,8 +313,10 @@
       }
     }
   }
-  CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
-      << heap_->DumpSpaces();
+  if (current_space_bitmap_ == nullptr) {
+    heap_->DumpSpaces();
+    LOG(FATAL) << "Could not find a default mark bitmap";
+  }
 }
 
 void MarkSweep::ExpandMarkStack() {
@@ -941,9 +943,12 @@
 
 void MarkSweep::VerifyIsLive(const Object* obj) {
   if (!heap_->GetLiveBitmap()->Test(obj)) {
-    accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
-    CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
-        allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
+    if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+        heap_->allocation_stack_->End()) {
+      // Object not found!
+      heap_->DumpSpaces();
+      LOG(FATAL) << "Found dead object " << obj;
+    }
   }
 }
 
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 922a71c..47682cc 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -64,25 +64,34 @@
     // Verify all the objects have the correct forward pointer installed.
     obj->AssertReadBarrierPointer();
   }
-  if (from_space_->HasAddress(obj)) {
-    mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
-    // If the object has already been moved, return the new forward address.
-    if (UNLIKELY(forward_address == nullptr)) {
-      forward_address = MarkNonForwardedObject(obj);
-      DCHECK(forward_address != nullptr);
-      // Make sure to only update the forwarding address AFTER you copy the object so that the
-      // monitor word doesn't Get stomped over.
-      obj->SetLockWord(
-          LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
-      // Push the object onto the mark stack for later processing.
-      MarkStackPush(forward_address);
-    }
-    obj_ptr->Assign(forward_address);
-  } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) {
-    BitmapSetSlowPathVisitor visitor(this);
-    if (!mark_bitmap_->Set(obj, visitor)) {
-      // This object was not previously marked.
-      MarkStackPush(obj);
+  if (!immune_region_.ContainsObject(obj)) {
+    if (from_space_->HasAddress(obj)) {
+      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+      // If the object has already been moved, return the new forward address.
+      if (UNLIKELY(forward_address == nullptr)) {
+        forward_address = MarkNonForwardedObject(obj);
+        DCHECK(forward_address != nullptr);
+        // Make sure to only update the forwarding address AFTER you copy the object so that the
+        // monitor word doesn't Get stomped over.
+        obj->SetLockWord(
+            LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
+        // Push the object onto the mark stack for later processing.
+        MarkStackPush(forward_address);
+      }
+      obj_ptr->Assign(forward_address);
+    } else {
+      BitmapSetSlowPathVisitor visitor(this);
+      if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
+        // If a bump pointer space only collection, we should not
+        // reach here as we don't/won't mark the objects in the
+        // non-moving space (except for the promoted objects.)  Note
+        // the non-moving space is added to the immune space.
+        DCHECK(!generational_ || whole_heap_collection_);
+      }
+      if (!mark_bitmap_->Set(obj, visitor)) {
+        // This object was not previously marked.
+        MarkStackPush(obj);
+      }
     }
   }
 }
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c7c567f..cabfe21 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,23 +63,23 @@
   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
   // Mark all of the spaces we never collect as immune.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
-        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
-      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
-    } else if (space->GetLiveBitmap() != nullptr) {
-      if (space == to_space_ || collect_from_space_only_) {
-        if (collect_from_space_only_) {
-          // Bind the main free list space and the non-moving space to the immune space if a bump
-          // pointer space only collection.
-          CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() ||
-                space == GetHeap()->GetNonMovingSpace());
-        }
-        CHECK(space->IsContinuousMemMapAllocSpace());
-        space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
+    if (space->GetLiveBitmap() != nullptr) {
+      if (space == to_space_) {
+        CHECK(to_space_->IsContinuousMemMapAllocSpace());
+        to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
+      } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
+                 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
+                 // Add the main free list space and the non-moving
+                 // space to the immune space if a bump pointer space
+                 // only collection.
+                 || (generational_ && !whole_heap_collection_ &&
+                     (space == GetHeap()->GetNonMovingSpace() ||
+                      space == GetHeap()->GetPrimaryFreeListSpace()))) {
+        CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
       }
     }
   }
-  if (collect_from_space_only_) {
+  if (generational_ && !whole_heap_collection_) {
     // We won't collect the large object space if a bump pointer space only collection.
     is_large_object_space_immune_ = true;
   }
@@ -95,7 +95,7 @@
       bytes_promoted_(0),
       bytes_promoted_since_last_whole_heap_collection_(0),
       large_object_bytes_allocated_at_last_whole_heap_collection_(0),
-      collect_from_space_only_(generational),
+      whole_heap_collection_(true),
       collector_name_(name_),
       swap_semi_spaces_(true) {
 }
@@ -147,10 +147,6 @@
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     mark_bitmap_ = heap_->GetMarkBitmap();
   }
-  if (generational_) {
-    promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
-  }
-  fallback_space_ = GetHeap()->GetNonMovingSpace();
 }
 
 void SemiSpace::ProcessReferences(Thread* self) {
@@ -184,9 +180,9 @@
         GetCurrentIteration()->GetClearSoftReferences()) {
       // If an explicit, native allocation-triggered, or last attempt
       // collection, collect the whole heap.
-      collect_from_space_only_ = false;
+      whole_heap_collection_ = true;
     }
-    if (!collect_from_space_only_) {
+    if (whole_heap_collection_) {
       VLOG(heap) << "Whole heap collection";
       name_ = collector_name_ + " whole";
     } else {
@@ -195,7 +191,7 @@
     }
   }
 
-  if (!collect_from_space_only_) {
+  if (!generational_ || whole_heap_collection_) {
     // If non-generational, always clear soft references.
     // If generational, clear soft references if a whole heap collection.
     GetCurrentIteration()->SetClearSoftReferences(true);
@@ -231,6 +227,8 @@
   {
     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     MarkRoots();
+    // Mark roots of immune spaces.
+    UpdateAndMarkModUnion();
     // Recursively mark remaining objects.
     MarkReachableObjects();
   }
@@ -261,6 +259,46 @@
   }
 }
 
+void SemiSpace::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (immune_region_.ContainsSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      if (table != nullptr) {
+        // TODO: Improve naming.
+        TimingLogger::ScopedTiming t(
+            space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                     "UpdateAndMarkImageModUnionTable",
+                                     GetTimings());
+        table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+      } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
+        DCHECK(kUseRememberedSet);
+        // If a bump pointer space only collection, the non-moving
+        // space is added to the immune space. The non-moving space
+        // doesn't have a mod union table, but has a remembered
+        // set. Its dirty cards will be scanned later in
+        // MarkReachableObjects().
+        DCHECK(generational_ && !whole_heap_collection_ &&
+               (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
+            << "Space " << space->GetName() << " "
+            << "generational_=" << generational_ << " "
+            << "whole_heap_collection_=" << whole_heap_collection_ << " ";
+      } else {
+        DCHECK(!kUseRememberedSet);
+        // If a bump pointer space only collection, the non-moving
+        // space is added to the immune space. But the non-moving
+        // space doesn't have a mod union table. Instead, its live
+        // bitmap will be scanned later in MarkReachableObjects().
+        DCHECK(generational_ && !whole_heap_collection_ &&
+               (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
+            << "Space " << space->GetName() << " "
+            << "generational_=" << generational_ << " "
+            << "whole_heap_collection_=" << whole_heap_collection_ << " ";
+      }
+    }
+  }
+}
+
 class SemiSpaceScanObjectVisitor {
  public:
   explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
@@ -317,30 +355,20 @@
     heap_->MarkAllocStackAsLive(live_stack);
     live_stack->Reset();
   }
+  t.NewTiming("UpdateAndMarkRememberedSets");
   for (auto& space : heap_->GetContinuousSpaces()) {
-    // If the space is immune then we need to mark the references to other spaces.
-    accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
-    if (table != nullptr) {
-      // TODO: Improve naming.
-      TimingLogger::ScopedTiming t2(
-          space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
-                                   "UpdateAndMarkImageModUnionTable",
-                                   GetTimings());
-      table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
-      DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
-    } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
-      // If the space has no mod union table (the non-moving space and main spaces when the bump
-      // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
-      // cards as roots (including the objects on the live stack which have just marked in the live
-      // bitmap above in MarkAllocStackAsLive().)
-      DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
-          << "Space " << space->GetName() << " "
-          << "generational_=" << generational_ << " "
-          << "collect_from_space_only_=" << collect_from_space_only_;
-      accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
-      CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
-      if (rem_set != nullptr) {
-        TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
+    // If the space is immune and has no mod union table (the
+    // non-moving space when the bump pointer space only collection is
+    // enabled,) then we need to scan its live bitmap or dirty cards as roots
+    // (including the objects on the live stack which have just marked
+    // in the live bitmap above in MarkAllocStackAsLive().)
+    if (immune_region_.ContainsSpace(space) &&
+        heap_->FindModUnionTableFromSpace(space) == nullptr) {
+      DCHECK(generational_ && !whole_heap_collection_ &&
+             (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
+      accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
+      if (kUseRememberedSet) {
+        DCHECK(rem_set != nullptr);
         rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
                                          from_space_, this);
         if (kIsDebugBuild) {
@@ -355,7 +383,7 @@
                                         visitor);
         }
       } else {
-        TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
+        DCHECK(rem_set == nullptr);
         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
         SemiSpaceScanObjectVisitor visitor(this);
         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
@@ -365,10 +393,9 @@
     }
   }
 
-  CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
   if (is_large_object_space_immune_) {
     TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
-    DCHECK(collect_from_space_only_);
+    DCHECK(generational_ && !whole_heap_collection_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
     // be newly added to the live set above in MarkAllocStackAsLive().
@@ -479,20 +506,19 @@
 }
 
 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
-  const size_t object_size = obj->SizeOf();
+  size_t object_size = obj->SizeOf();
   size_t bytes_allocated;
   mirror::Object* forward_address = nullptr;
   if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
     // If it's allocated before the last GC (older), move
     // (pseudo-promote) it to the main free list space (as sort
     // of an old generation.)
-    forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
-                                                           nullptr);
+    space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
+    forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+                                                          nullptr);
     if (UNLIKELY(forward_address == nullptr)) {
       // If out of space, fall back to the to-space.
       forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
-      // No logic for marking the bitmap, so it must be null.
-      DCHECK(to_space_->GetLiveBitmap() == nullptr);
     } else {
       bytes_promoted_ += bytes_allocated;
       // Dirty the card at the destionation as it may contain
@@ -500,12 +526,12 @@
       // space.
       GetHeap()->WriteBarrierEveryFieldOf(forward_address);
       // Handle the bitmaps marking.
-      accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
+      accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
       DCHECK(live_bitmap != nullptr);
-      accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
+      accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
       DCHECK(mark_bitmap != nullptr);
       DCHECK(!live_bitmap->Test(forward_address));
-      if (collect_from_space_only_) {
+      if (!whole_heap_collection_) {
         // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
         DCHECK_EQ(live_bitmap, mark_bitmap);
 
@@ -533,23 +559,12 @@
         mark_bitmap->Set(forward_address);
       }
     }
+    DCHECK(forward_address != nullptr);
   } else {
     // If it's allocated after the last GC (younger), copy it to the to-space.
     forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
-    if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
-      to_space_live_bitmap_->Set(forward_address);
-    }
   }
-  // If it's still null, attempt to use the fallback space.
-  if (UNLIKELY(forward_address == nullptr)) {
-    forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
-                                                         nullptr);
-    CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
-    accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
-    if (bitmap != nullptr) {
-      bitmap->Set(forward_address);
-    }
-  }
+  CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
   ++objects_moved_;
   bytes_moved_ += bytes_allocated;
   // Copy over the object and add it to the mark stack since we still need to update its
@@ -564,10 +579,11 @@
     }
     forward_address->AssertReadBarrierPointer();
   }
+  if (to_space_live_bitmap_ != nullptr) {
+    to_space_live_bitmap_->Set(forward_address);
+  }
   DCHECK(to_space_->HasAddress(forward_address) ||
-         fallback_space_->HasAddress(forward_address) ||
-         (generational_ && promo_dest_space_->HasAddress(forward_address)))
-      << forward_address << "\n" << GetHeap()->DumpSpaces();
+         (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
   return forward_address;
 }
 
@@ -632,7 +648,7 @@
 }
 
 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
-  return space != from_space_ && space != to_space_;
+  return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
 }
 
 void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -698,20 +714,22 @@
 // Scan anything that's on the mark stack.
 void SemiSpace::ProcessMarkStack() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+  space::MallocSpace* promo_dest_space = nullptr;
   accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
-  if (collect_from_space_only_) {
+  if (generational_ && !whole_heap_collection_) {
     // If a bump pointer space only collection (and the promotion is
     // enabled,) we delay the live-bitmap marking of promoted objects
     // from MarkObject() until this function.
-    live_bitmap = promo_dest_space_->GetLiveBitmap();
+    promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
+    live_bitmap = promo_dest_space->GetLiveBitmap();
     DCHECK(live_bitmap != nullptr);
-    accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
+    accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
     DCHECK(mark_bitmap != nullptr);
     DCHECK_EQ(live_bitmap, mark_bitmap);
   }
   while (!mark_stack_->IsEmpty()) {
     Object* obj = mark_stack_->PopBack();
-    if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
+    if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
       // obj has just been promoted. Mark the live bitmap for it,
       // which is delayed from MarkObject().
       DCHECK(!live_bitmap->Test(obj));
@@ -724,12 +742,16 @@
 inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
   // All immune objects are assumed marked.
+  if (immune_region_.ContainsObject(obj)) {
+    return obj;
+  }
   if (from_space_->HasAddress(obj)) {
     // Returns either the forwarding address or nullptr.
     return GetForwardingAddressInFromSpace(obj);
-  } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
-             to_space_->HasAddress(obj)) {
-    return obj;  // Already forwarded, must be marked.
+  } else if (to_space_->HasAddress(obj)) {
+    // Should be unlikely.
+    // Already forwarded, must be marked.
+    return obj;
   }
   return mark_bitmap_->Test(obj) ? obj : nullptr;
 }
@@ -755,9 +777,9 @@
   if (generational_) {
     // Decide whether to do a whole heap collection or a bump pointer
     // only space collection at the next collection by updating
-    // collect_from_space_only_.
-    if (collect_from_space_only_) {
-      // Disable collect_from_space_only_ if the bytes promoted since the
+    // whole_heap_collection.
+    if (!whole_heap_collection_) {
+      // Enable whole_heap_collection if the bytes promoted since the
       // last whole heap collection or the large object bytes
       // allocated exceeds a threshold.
       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
@@ -770,14 +792,14 @@
           current_los_bytes_allocated >=
           last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
       if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
-        collect_from_space_only_ = false;
+        whole_heap_collection_ = true;
       }
     } else {
       // Reset the counters.
       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
       large_object_bytes_allocated_at_last_whole_heap_collection_ =
           GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
-      collect_from_space_only_ = true;
+      whole_heap_collection_ = false;
     }
   }
   // Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index c5d25f3..bff0847 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -243,14 +243,9 @@
   // large objects were allocated at the last whole heap collection.
   uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
 
-  // Used for generational mode. When true, we only collect the from_space_.
-  bool collect_from_space_only_;
-
-  // The space which we are promoting into, only used for GSS.
-  space::ContinuousMemMapAllocSpace* promo_dest_space_;
-
-  // The space which we copy to if the to_space_ is full.
-  space::ContinuousMemMapAllocSpace* fallback_space_;
+  // Used for the generational mode. When true, collect the whole
+  // heap. When false, collect only the bump pointer spaces.
+  bool whole_heap_collection_;
 
   // How many objects and bytes we moved, used so that we don't need to Get the size of the
   // to_space_ when calculating how many objects and bytes we freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 696df32..4ec9bc2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1126,13 +1126,7 @@
   return false;
 }
 
-std::string Heap::DumpSpaces() const {
-  std::ostringstream oss;
-  DumpSpaces(oss);
-  return oss.str();
-}
-
-void Heap::DumpSpaces(std::ostream& stream) const {
+void Heap::DumpSpaces(std::ostream& stream) {
   for (const auto& space : continuous_spaces_) {
     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1165,7 +1159,10 @@
 
   if (verify_object_mode_ > kVerifyObjectModeFast) {
     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
-    CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
+    if (!IsLiveObjectLocked(obj)) {
+      DumpSpaces();
+      LOG(FATAL) << "Object is dead: " << obj;
+    }
   }
 }
 
@@ -2357,7 +2354,7 @@
       accounting::RememberedSet* remembered_set = table_pair.second;
       remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
     }
-    DumpSpaces(LOG(ERROR));
+    DumpSpaces();
   }
   return visitor.GetFailureCount();
 }
@@ -2474,7 +2471,12 @@
       visitor(*it);
     }
   }
-  return !visitor.Failed();
+
+  if (visitor.Failed()) {
+    DumpSpaces();
+    return false;
+  }
+  return true;
 }
 
 void Heap::SwapStacks(Thread* self) {
@@ -2572,8 +2574,9 @@
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
     SwapStacks(self);
     // Sort the live stack so that we can quickly binary search it later.
-    CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
-                                    << " missing card mark verification failed\n" << DumpSpaces();
+    if (!VerifyMissingCardMarks()) {
+      LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
+    }
     SwapStacks(self);
   }
   if (verify_mod_union_table_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0da113f..b207953 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -539,8 +539,7 @@
     }
   }
 
-  std::string DumpSpaces() const WARN_UNUSED;
-  void DumpSpaces(std::ostream& stream) const;
+  void DumpSpaces(std::ostream& stream = LOG(INFO));
 
   // Dump object should only be used by the signal handler.
   void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 71c8eb5..fff4df1 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -407,11 +407,11 @@
   // Clear the space back to an empty space.
   virtual void Clear() = 0;
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
     return mark_bitmap_.get();
   }
 
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 592deed..0e6f4d8 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,8 +24,6 @@
 // For size_t.
 #include <stdlib.h>
 
-#include "base/macros.h"
-
 namespace art {
 namespace mirror {
   class Class;
@@ -59,7 +57,8 @@
 // A callback for visiting an object in the heap.
 typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
 // A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
+typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
+    __attribute__((warn_unused_result));
 // A callback for verifying roots.
 typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
     const StackVisitor* visitor, RootType root_type);
@@ -69,12 +68,13 @@
 
 // A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
 // address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
+typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
+    __attribute__((warn_unused_result));
 
 // Returns true if the object in the heap reference is marked, if it is marked and has moved the
 // callback updates the heap reference contain the new value.
 typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
-    void* arg) WARN_UNUSED;
+    void* arg) __attribute__((warn_unused_result));
 typedef void (ProcessMarkStackCallback)(void* arg);
 
 }  // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 2ea4953..b47de81 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -167,7 +167,8 @@
 
 // For rounding integers.
 template<typename T>
-static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n)
+    __attribute__((warn_unused_result));
 
 template<typename T>
 static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
@@ -177,7 +178,8 @@
 }
 
 template<typename T>
-static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n)
+    __attribute__((warn_unused_result));
 
 template<typename T>
 static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
@@ -186,7 +188,7 @@
 
 // For aligning pointers.
 template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
+static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result));
 
 template<typename T>
 static inline T* AlignDown(T* x, uintptr_t n) {
@@ -194,7 +196,7 @@
 }
 
 template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
+static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result));
 
 template<typename T>
 static inline T* AlignUp(T* x, uintptr_t n) {