Merge "Replace ObjectSet with LargeObjectBitmap."
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 915c415..c191226 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -864,7 +864,7 @@
         }
       }
       // Dump the large objects separately.
-      heap->GetLargeObjectsSpace()->GetLiveObjects()->Walk(ImageDumper::Callback, this);
+      heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
       indent_os << "\n";
       os_ = saved_os;
     }
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index ed7b427..c67542f 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -30,9 +30,8 @@
   for (const auto& bitmap : continuous_space_bitmaps_) {
     bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
   }
-  DCHECK(!discontinuous_space_sets_.empty());
-  for (const auto& space_set : discontinuous_space_sets_) {
-    space_set->Visit(visitor);
+  for (const auto& bitmap : large_object_bitmaps_) {
+    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
   }
 }
 
@@ -40,31 +39,61 @@
   ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
   if (LIKELY(bitmap != nullptr)) {
     return bitmap->Test(obj);
-  } else {
-    return GetDiscontinuousSpaceObjectSet(obj) != nullptr;
   }
+  for (const auto& bitmap : large_object_bitmaps_) {
+    if (LIKELY(bitmap->HasAddress(obj))) {
+      return bitmap->Test(obj);
+    }
+  }
+  LOG(FATAL) << "Invalid object " << obj;
+  return false;
 }
 
 inline void HeapBitmap::Clear(const mirror::Object* obj) {
   ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
   if (LIKELY(bitmap != nullptr)) {
     bitmap->Clear(obj);
-  } else {
-    ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
-    DCHECK(set != NULL);
-    set->Clear(obj);
+    return;
   }
+  for (const auto& bitmap : large_object_bitmaps_) {
+    if (LIKELY(bitmap->HasAddress(obj))) {
+      bitmap->Clear(obj);
+    }
+  }
+  LOG(FATAL) << "Invalid object " << obj;
 }
 
-inline void HeapBitmap::Set(const mirror::Object* obj) {
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) {
   ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
-  if (LIKELY(bitmap != NULL)) {
-    bitmap->Set(obj);
-  } else {
-    ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
-    DCHECK(set != NULL);
-    set->Set(obj);
+  if (LIKELY(bitmap != nullptr)) {
+    return bitmap->Set(obj);
   }
+  visitor(obj);
+  for (const auto& bitmap : large_object_bitmaps_) {
+    if (LIKELY(bitmap->HasAddress(obj))) {
+      return bitmap->Set(obj);
+    }
+  }
+  LOG(FATAL) << "Invalid object " << obj;
+  return false;
+}
+
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::AtomicTestAndSet(const mirror::Object* obj,
+                                         const LargeObjectSetVisitor& visitor) {
+  ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+  if (LIKELY(bitmap != nullptr)) {
+    return bitmap->AtomicTestAndSet(obj);
+  }
+  visitor(obj);
+  for (const auto& bitmap : large_object_bitmaps_) {
+    if (LIKELY(bitmap->HasAddress(obj))) {
+      return bitmap->AtomicTestAndSet(obj);
+    }
+  }
+  LOG(FATAL) << "Invalid object " << obj;
+  return false;
 }
 
 inline ContinuousSpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
@@ -76,15 +105,6 @@
   return nullptr;
 }
 
-inline ObjectSet* HeapBitmap::GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const {
-  for (const auto& space_set : discontinuous_space_sets_) {
-    if (space_set->Test(obj)) {
-      return space_set;
-    }
-  }
-  return nullptr;
-}
-
 }  // namespace accounting
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index 1db886c..a5d59bf 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -25,61 +25,58 @@
 
 void HeapBitmap::ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap,
                                ContinuousSpaceBitmap* new_bitmap) {
-  for (auto& bitmap : continuous_space_bitmaps_) {
-    if (bitmap == old_bitmap) {
-      bitmap = new_bitmap;
-      return;
-    }
-  }
-  LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+  auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(),
+                      old_bitmap);
+  CHECK(it != continuous_space_bitmaps_.end()) << " continuous space bitmap " << old_bitmap
+      << " not found";
+  *it = new_bitmap;
 }
 
-void HeapBitmap::ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set) {
-  for (auto& space_set : discontinuous_space_sets_) {
-    if (space_set == old_set) {
-      space_set = new_set;
-      return;
-    }
-  }
-  LOG(FATAL) << "object set " << static_cast<const void*>(old_set) << " not found";
+void HeapBitmap::ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap,
+                                          LargeObjectBitmap* new_bitmap) {
+  auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), old_bitmap);
+  CHECK(it != large_object_bitmaps_.end()) << " large object bitmap " << old_bitmap
+      << " not found";
+  *it = new_bitmap;
 }
 
 void HeapBitmap::AddContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
-  DCHECK(bitmap != NULL);
-
-  // Check for interval overlap.
+  DCHECK(bitmap != nullptr);
+  // Check that there is no bitmap overlap.
   for (const auto& cur_bitmap : continuous_space_bitmaps_) {
-    CHECK(!(
-        bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
-        bitmap->HeapLimit() > cur_bitmap->HeapBegin()))
-        << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
+    CHECK(bitmap->HeapBegin() >= cur_bitmap->HeapLimit() ||
+          bitmap->HeapLimit() <= cur_bitmap->HeapBegin())
+              << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap "
+              << cur_bitmap->Dump();
   }
   continuous_space_bitmaps_.push_back(bitmap);
 }
 
 void HeapBitmap::RemoveContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
+  DCHECK(bitmap != nullptr);
   auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(), bitmap);
   DCHECK(it != continuous_space_bitmaps_.end());
   continuous_space_bitmaps_.erase(it);
 }
 
-void HeapBitmap::AddDiscontinuousObjectSet(ObjectSet* set) {
-  DCHECK(set != nullptr);
-  discontinuous_space_sets_.push_back(set);
+void HeapBitmap::AddLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+  DCHECK(bitmap != nullptr);
+  large_object_bitmaps_.push_back(bitmap);
 }
 
-void HeapBitmap::RemoveDiscontinuousObjectSet(ObjectSet* set) {
-  auto it = std::find(discontinuous_space_sets_.begin(), discontinuous_space_sets_.end(), set);
-  DCHECK(it != discontinuous_space_sets_.end());
-  discontinuous_space_sets_.erase(it);
+void HeapBitmap::RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+  DCHECK(bitmap != nullptr);
+  auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), bitmap);
+  DCHECK(it != large_object_bitmaps_.end());
+  large_object_bitmaps_.erase(it);
 }
 
 void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
   for (const auto& bitmap : continuous_space_bitmaps_) {
     bitmap->Walk(callback, arg);
   }
-  for (const auto& space_set : discontinuous_space_sets_) {
-    space_set->Walk(callback, arg);
+  for (const auto& bitmap : large_object_bitmaps_) {
+    bitmap->Walk(callback, arg);
   }
 }
 
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 61a2429..814dc06 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -33,9 +33,13 @@
  public:
   bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
   void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  template<typename LargeObjectSetVisitor>
+  bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+  template<typename LargeObjectSetVisitor>
+  bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
   ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
-  ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const;
 
   void Walk(ObjectCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -50,7 +54,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
-  void ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set)
+  void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   explicit HeapBitmap(Heap* heap) : heap_(heap) {}
@@ -60,15 +64,15 @@
 
   void AddContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
   void RemoveContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
-  void AddDiscontinuousObjectSet(ObjectSet* set);
-  void RemoveDiscontinuousObjectSet(ObjectSet* set);
+  void AddLargeObjectBitmap(LargeObjectBitmap* bitmap);
+  void RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap);
 
   // Bitmaps covering continuous spaces.
   std::vector<ContinuousSpaceBitmap*, GcAllocator<ContinuousSpaceBitmap*>>
       continuous_space_bitmaps_;
 
   // Sets covering discontinuous spaces.
-  std::vector<ObjectSet*, GcAllocator<ObjectSet*>> discontinuous_space_sets_;
+  std::vector<LargeObjectBitmap*, GcAllocator<LargeObjectBitmap*>> large_object_bitmaps_;
 
   friend class art::gc::Heap;
 };
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 7eed05a..31a1537 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -25,22 +25,35 @@
     const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
   CHECK(mem_map != nullptr);
   uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
-  size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
+  size_t bitmap_size = (RoundUp(static_cast<uint64_t>(heap_capacity), kBytesCoveredPerWord) /
+      kBytesCoveredPerWord) * kWordSize;
   return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
 }
 
 template<size_t kAlignment>
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+                                     size_t bitmap_size, const void* heap_begin)
+    : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
+      heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
+      name_(name) {
+  CHECK(bitmap_begin_ != nullptr);
+  CHECK_NE(bitmap_size, 0U);
+}
+
+template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
     const std::string& name, byte* heap_begin, size_t heap_capacity) {
-  CHECK(heap_begin != NULL);
   // Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
-  size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+  const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
+  size_t bitmap_size = (RoundUp(static_cast<uint64_t>(heap_capacity), kBytesCoveredPerWord) /
+      kBytesCoveredPerWord) * kWordSize;
   std::string error_msg;
-  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size,
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
                                                  PROT_READ | PROT_WRITE, false, &error_msg));
   if (UNLIKELY(mem_map.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
-    return NULL;
+    return nullptr;
   }
   return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
 }
@@ -68,13 +81,13 @@
 }
 
 template<size_t kAlignment>
-inline void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
+void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
   DCHECK_EQ(Size(), source_bitmap->Size());
   std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
 }
 
 template<size_t kAlignment>
-inline void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
+void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
   CHECK(bitmap_begin_ != NULL);
   CHECK(callback != NULL);
 
@@ -96,11 +109,11 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
-                                               const SpaceBitmap<kAlignment>& mark_bitmap,
-                                               uintptr_t sweep_begin, uintptr_t sweep_end,
-                                               SpaceBitmap::SweepCallback* callback, void* arg) {
-  CHECK(live_bitmap.bitmap_begin_ != NULL);
-  CHECK(mark_bitmap.bitmap_begin_ != NULL);
+                                        const SpaceBitmap<kAlignment>& mark_bitmap,
+                                        uintptr_t sweep_begin, uintptr_t sweep_end,
+                                        SpaceBitmap::SweepCallback* callback, void* arg) {
+  CHECK(live_bitmap.bitmap_begin_ != nullptr);
+  CHECK(mark_bitmap.bitmap_begin_ != nullptr);
   CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
   CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
   CHECK(callback != NULL);
@@ -170,8 +183,8 @@
 
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited,
-                                                       ObjectCallback* callback,
-                                                       mirror::Object* obj, void* arg) {
+                                                ObjectCallback* callback, mirror::Object* obj,
+                                                void* arg) {
   if (visited->Test(obj)) {
     return;
   }
@@ -232,12 +245,6 @@
   }
 }
 
-void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
-  for (const mirror::Object* obj : contained_) {
-    callback(const_cast<mirror::Object*>(obj), arg);
-  }
-}
-
 template class SpaceBitmap<kObjectAlignment>;
 template class SpaceBitmap<kPageSize>;
 
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index b90a799..df3fd37 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -198,10 +198,7 @@
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
   SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
-              const void* heap_begin)
-      : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
-        heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
-        name_(name) {}
+              const void* heap_begin);
 
   template<bool kSetBit>
   bool Modify(const mirror::Object* obj);
@@ -232,71 +229,7 @@
   std::string name_;
 };
 
-// Like a bitmap except it keeps track of objects using sets.
-class ObjectSet {
- public:
-  typedef std::set<
-      const mirror::Object*, std::less<const mirror::Object*>,
-      GcAllocator<const mirror::Object*> > Objects;
-
-  bool IsEmpty() const {
-    return contained_.empty();
-  }
-
-  inline void Set(const mirror::Object* obj) {
-    contained_.insert(obj);
-  }
-
-  inline void Clear(const mirror::Object* obj) {
-    Objects::iterator found = contained_.find(obj);
-    if (found != contained_.end()) {
-      contained_.erase(found);
-    }
-  }
-
-  void Clear() {
-    contained_.clear();
-  }
-
-  inline bool Test(const mirror::Object* obj) const {
-    return contained_.find(obj) != contained_.end();
-  }
-
-  const std::string& GetName() const {
-    return name_;
-  }
-
-  void SetName(const std::string& name) {
-    name_ = name;
-  }
-
-  void CopyFrom(const ObjectSet& space_set) {
-    contained_ = space_set.contained_;
-  }
-
-  void Walk(ObjectCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  template <typename Visitor>
-  void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS {
-    for (const mirror::Object* obj : contained_) {
-      visitor(const_cast<mirror::Object*>(obj));
-    }
-  }
-
-  explicit ObjectSet(const std::string& name) : name_(name) {}
-  ~ObjectSet() {}
-
-  Objects& GetObjects() {
-    return contained_;
-  }
-
- private:
-  std::string name_;
-  Objects contained_;
-};
-
 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
-// TODO: Replace usage of ObjectSet with LargeObjectBitmap.
 typedef SpaceBitmap<kLargeObjectAlignment> LargeObjectBitmap;
 
 template<size_t kAlignment>
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 7c18052..972f94d 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -110,7 +110,8 @@
   uint32_t val_;
 };
 
-void compat_test() NO_THREAD_SAFETY_ANALYSIS {
+template <size_t kAlignment>
+void RunTest() NO_THREAD_SAFETY_ANALYSIS {
   byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
@@ -123,7 +124,7 @@
         ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
 
     for (int j = 0; j < 10000; ++j) {
-      size_t offset = (r.next() % heap_capacity) & ~(0x7);
+      size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
       bool set = r.next() % 2 == 1;
 
       if (set) {
@@ -137,15 +138,15 @@
       size_t count = 0;
       SimpleCounter c(&count);
 
-      size_t offset = (r.next() % heap_capacity) & ~(0x7);
+      size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
       size_t remain = heap_capacity - offset;
-      size_t end = offset + ((r.next() % (remain + 1)) & ~(0x7));
+      size_t end = offset + RoundDown(r.next() % (remain + 1), kAlignment);
 
       space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
                                      reinterpret_cast<uintptr_t>(heap_begin) + end, c);
 
       size_t manual = 0;
-      for (uintptr_t k = offset; k < end; k += kObjectAlignment) {
+      for (uintptr_t k = offset; k < end; k += kAlignment) {
         if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
           manual++;
         }
@@ -156,8 +157,12 @@
   }
 }
 
-TEST_F(SpaceBitmapTest, Visitor) {
-  compat_test();
+TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
+  RunTest<kObjectAlignment>();
+}
+
+TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
+  RunTest<kPageSize>();
 }
 
 }  // namespace accounting
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index d99136a..6380cba 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -185,12 +185,12 @@
     }
   }
   for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
-    space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
-    accounting::ObjectSet* live_set = space->GetLiveObjects();
-    accounting::ObjectSet* mark_set = space->GetMarkObjects();
-    heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
-    heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
-    down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
+    space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
+    accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
+    heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
+    heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
+    space->SwapBitmaps();
   }
 }
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index f07e6f1..8af4fd8 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -99,7 +99,6 @@
                        name_prefix +
                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
       gc_barrier_(new Barrier(0)),
-      large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
       is_concurrent_(is_concurrent) {
 }
@@ -293,14 +292,20 @@
   TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
+    // We want to have the main space instead of non moving if possible.
     if (bitmap != nullptr &&
         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
       current_space_bitmap_ = bitmap;
-      return;
+      // If we are not the non moving space exit the loop early since this will be good enough.
+      if (space != heap_->GetNonMovingSpace()) {
+        break;
+      }
     }
   }
-  GetHeap()->DumpSpaces();
-  LOG(FATAL) << "Could not find a default mark bitmap";
+  if (current_space_bitmap_ == nullptr) {
+    heap_->DumpSpaces();
+    LOG(FATAL) << "Could not find a default mark bitmap";
+  }
 }
 
 void MarkSweep::ExpandMarkStack() {
@@ -322,7 +327,7 @@
 }
 
 inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
-  DCHECK(obj != NULL);
+  DCHECK(obj != nullptr);
   if (MarkObjectParallel(obj)) {
     MutexLock mu(Thread::Current(), mark_stack_lock_);
     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
@@ -343,6 +348,31 @@
   reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
 }
 
+class MarkSweepMarkObjectSlowPath {
+ public:
+  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+  }
+
+  void operator()(const Object* obj) const ALWAYS_INLINE {
+    if (kProfileLargeObjects) {
+      // TODO: Differentiate between marking and testing somehow.
+      ++mark_sweep_->large_object_test_;
+      ++mark_sweep_->large_object_mark_;
+    }
+    space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
+    if (UNLIKELY(!IsAligned<kPageSize>(obj) ||
+                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
+      LOG(ERROR) << "Attempting see if it's a bad root";
+      mark_sweep_->VerifyRoots();
+      LOG(FATAL) << "Can't mark invalid object";
+    }
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
 inline void MarkSweep::MarkObjectNonNull(Object* obj) {
   DCHECK(obj != nullptr);
   if (kUseBakerOrBrooksReadBarrier) {
@@ -353,27 +383,24 @@
     if (kCountMarkedObjects) {
       ++mark_immune_count_;
     }
-    DCHECK(IsMarked(obj));
-    return;
-  }
-  // Try to take advantage of locality of references within a space, failing this find the space
-  // the hard way.
-  accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
-  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
-    object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
+    DCHECK(mark_bitmap_->Test(obj));
+  } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
+    if (kCountMarkedObjects) {
+      ++mark_fastpath_count_;
+    }
+    if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
+      PushOnMarkStack(obj);  // This object was not previously marked.
+    }
+  } else {
     if (kCountMarkedObjects) {
       ++mark_slowpath_count_;
     }
-    if (UNLIKELY(object_bitmap == nullptr)) {
-      MarkLargeObject(obj, true);
-      return;
+    MarkSweepMarkObjectSlowPath visitor(this);
+    // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
+    // will check again.
+    if (!mark_bitmap_->Set(obj, visitor)) {
+      PushOnMarkStack(obj);  // Was not already marked, push.
     }
-  } else if (kCountMarkedObjects) {
-    ++mark_fastpath_count_;
-  }
-  // This object was not previously marked.
-  if (!object_bitmap->Set(obj)) {
-    PushOnMarkStack(obj);
   }
 }
 
@@ -387,34 +414,6 @@
   mark_stack_->PushBack(obj);
 }
 
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
-  // TODO: support >1 discontinuous space.
-  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
-  if (kProfileLargeObjects) {
-    ++large_object_test_;
-  }
-  if (UNLIKELY(!large_objects->Test(obj))) {
-    if (!large_object_space->Contains(obj)) {
-      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
-      LOG(ERROR) << "Attempting see if it's a bad root";
-      VerifyRoots();
-      LOG(FATAL) << "Can't mark bad root";
-    }
-    if (kProfileLargeObjects) {
-      ++large_object_mark_;
-    }
-    if (set) {
-      large_objects->Set(obj);
-    } else {
-      large_objects->Clear(obj);
-    }
-    return true;
-  }
-  return false;
-}
-
 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
   DCHECK(obj != nullptr);
   if (kUseBakerOrBrooksReadBarrier) {
@@ -428,19 +427,11 @@
   // Try to take advantage of locality of references within a space, failing this find the space
   // the hard way.
   accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
-  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
-    accounting::ContinuousSpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
-    if (new_bitmap != NULL) {
-      object_bitmap = new_bitmap;
-    } else {
-      // TODO: Remove the Thread::Current here?
-      // TODO: Convert this to some kind of atomic marking?
-      MutexLock mu(Thread::Current(), large_object_lock_);
-      return MarkLargeObject(obj, true);
-    }
+  if (LIKELY(object_bitmap->HasAddress(obj))) {
+    return !object_bitmap->AtomicTestAndSet(obj);
   }
-  // Return true if the object was not previously marked.
-  return !object_bitmap->AtomicTestAndSet(obj);
+  MarkSweepMarkObjectSlowPath visitor(this);
+  return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
 }
 
 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
@@ -719,7 +710,7 @@
 
 size_t MarkSweep::GetThreadCount(bool paused) const {
   if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
-    return 0;
+    return 1;
   }
   if (paused) {
     return heap_->GetParallelGCThreadCount() + 1;
@@ -733,7 +724,7 @@
   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
   size_t thread_count = GetThreadCount(paused);
   // The parallel version with only one thread is faster for card scanning, TODO: fix.
-  if (kParallelCardScan && thread_count > 0) {
+  if (kParallelCardScan && thread_count > 1) {
     Thread* self = Thread::Current();
     // Can't have a different split for each space since multiple spaces can have their cards being
     // scanned at the same time.
@@ -944,14 +935,11 @@
 
 void MarkSweep::VerifyIsLive(const Object* obj) {
   if (!heap_->GetLiveBitmap()->Test(obj)) {
-    space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
-    if (!large_object_space->GetLiveObjects()->Test(obj)) {
-      if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
-          heap_->allocation_stack_->End()) {
-        // Object not found!
-        heap_->DumpSpaces();
-        LOG(FATAL) << "Found dead object " << obj;
-      }
+    if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+        heap_->allocation_stack_->End()) {
+      // Object not found!
+      heap_->DumpSpaces();
+      LOG(FATAL) << "Found dead object " << obj;
     }
   }
 }
@@ -1086,8 +1074,8 @@
   }
   // Handle the large object space.
   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
-  accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
+  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
   if (swap_bitmaps) {
     std::swap(large_live_objects, large_mark_objects);
   }
@@ -1131,7 +1119,6 @@
   timings_.EndSplit();
 
   DCHECK(mark_stack_->IsEmpty());
-  TimingLogger::ScopedSplit("Sweep", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -1149,13 +1136,13 @@
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
   size_t freed_objects = 0;
   size_t freed_bytes = 0;
-  GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
   freed_large_objects_.FetchAndAdd(freed_objects);
   freed_large_object_bytes_.FetchAndAdd(freed_bytes);
-  GetHeap()->RecordFree(freed_objects, freed_bytes);
+  heap_->RecordFree(freed_objects, freed_bytes);
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 6dbb270..41a7764 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -227,11 +227,6 @@
   // Marks an object atomically, safe to use from multiple threads.
   void MarkObjectNonNullParallel(mirror::Object* obj);
 
-  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
-  // mark, otherwise we unmark.
-  bool MarkLargeObject(const mirror::Object* obj, bool set)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_);
-
   // Returns true if we need to add obj to a mark stack.
   bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
 
@@ -315,7 +310,6 @@
   size_t live_stack_freeze_size_;
 
   UniquePtr<Barrier> gc_barrier_;
-  Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
 
   const bool is_concurrent_;
@@ -326,8 +320,6 @@
   friend class CheckBitmapVisitor;
   friend class CheckReferenceVisitor;
   friend class art::gc::Heap;
-  friend class InternTableEntryIsUnmarked;
-  friend class MarkIfReachesAllocspaceVisitor;
   friend class MarkObjectVisitor;
   friend class ModUnionCheckReferences;
   friend class ModUnionClearCardVisitor;
@@ -336,10 +328,9 @@
   friend class ModUnionTableBitmap;
   friend class ModUnionTableReferenceCache;
   friend class ModUnionScanImageRootVisitor;
-  friend class ScanBitmapVisitor;
-  friend class ScanImageRootVisitor;
   template<bool kUseFinger> friend class MarkStackTask;
   friend class FifoMarkStackChunk;
+  friend class MarkSweepMarkObjectSlowPath;
 
   DISALLOW_COPY_AND_ASSIGN(MarkSweep);
 };
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index d03baf1..55140f6 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -26,6 +26,21 @@
 namespace gc {
 namespace collector {
 
+class BitmapSetSlowPathVisitor {
+ public:
+  explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
+  }
+
+  void operator()(const mirror::Object* obj) const {
+    CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
+    // Marking a large object, make sure its aligned as a sanity check.
+    CHECK(IsAligned<kPageSize>(obj));
+  }
+
+ private:
+  SemiSpace* const semi_space_;
+};
+
 inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
   DCHECK(from_space_->HasAddress(obj));
   LockWord lock_word = obj->GetLockWord(false);
@@ -53,7 +68,7 @@
     if (from_space_->HasAddress(obj)) {
       mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
       // If the object has already been moved, return the new forward address.
-      if (forward_address == nullptr) {
+      if (UNLIKELY(forward_address == nullptr)) {
         forward_address = MarkNonForwardedObject(obj);
         DCHECK(forward_address != nullptr);
         // Make sure to only update the forwarding address AFTER you copy the object so that the
@@ -65,25 +80,17 @@
       }
       obj_ptr->Assign(forward_address);
     } else {
-      accounting::ContinuousSpaceBitmap* object_bitmap =
-          heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
-      if (LIKELY(object_bitmap != nullptr)) {
-        if (generational_) {
-          // If a bump pointer space only collection, we should not
-          // reach here as we don't/won't mark the objects in the
-          // non-moving space (except for the promoted objects.)  Note
-          // the non-moving space is added to the immune space.
-          DCHECK(whole_heap_collection_);
-        }
-        if (!object_bitmap->Set(obj)) {
-          // This object was not previously marked.
-          MarkStackPush(obj);
-        }
-      } else {
-        CHECK(!to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
-        if (MarkLargeObject(obj)) {
-          MarkStackPush(obj);
-        }
+      BitmapSetSlowPathVisitor visitor(this);
+      if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
+        // If a bump pointer space only collection, we should not
+        // reach here as we don't/won't mark the objects in the
+        // non-moving space (except for the promoted objects.)  Note
+        // the non-moving space is added to the immune space.
+        DCHECK(!generational_ || whole_heap_collection_);
+      }
+      if (!mark_bitmap_->Set(obj, visitor)) {
+        // This object was not previously marked.
+        MarkStackPush(obj);
       }
     }
   }
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 4a1bf18..b67bbb1 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -126,6 +126,11 @@
   CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
   // Set the initial bitmap.
   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
+  {
+    // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    mark_bitmap_ = heap_->GetMarkBitmap();
+  }
 }
 
 void SemiSpace::ProcessReferences(Thread* self) {
@@ -314,8 +319,8 @@
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
-  timings_.EndSplit();
 
+  timings_.NewSplit("UpdateAndMarkRememberedSets");
   for (auto& space : heap_->GetContinuousSpaces()) {
     // If the space is immune and has no mod union table (the
     // non-moving space when the bump pointer space only collection is
@@ -353,6 +358,7 @@
   }
 
   if (is_large_object_space_immune_) {
+    timings_.NewSplit("VisitLargeObjects");
     DCHECK(generational_ && !whole_heap_collection_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
@@ -364,13 +370,13 @@
     // classes (primitive array classes) that could move though they
     // don't contain any other references.
     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
+    accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
     SemiSpaceScanObjectVisitor visitor(this);
-    for (const Object* obj : large_live_objects->GetObjects()) {
-      visitor(const_cast<Object*>(obj));
-    }
+    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
+                                        reinterpret_cast<uintptr_t>(large_object_space->End()),
+                                        visitor);
   }
-
+  timings_.EndSplit();
   // Recursively process the mark stack.
   ProcessMarkStack();
 }
@@ -452,19 +458,6 @@
   mark_stack_->PushBack(obj);
 }
 
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool SemiSpace::MarkLargeObject(const Object* obj) {
-  // TODO: support >1 discontinuous space.
-  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  DCHECK(large_object_space->Contains(obj));
-  accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
-  if (UNLIKELY(!large_objects->Test(obj))) {
-    large_objects->Set(obj);
-    return true;
-  }
-  return false;
-}
-
 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
   if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
     // We will dirty the current page and somewhere in the middle of the next page. This means
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index b6726b2..3d635f0 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -201,6 +201,8 @@
   // Cached live bitmap as an optimization.
   accounting::ContinuousSpaceBitmap* to_space_live_bitmap_;
   space::ContinuousMemMapAllocSpace* from_space_;
+  // Cached mark bitmap as an optimization.
+  accounting::HeapBitmap* mark_bitmap_;
 
   Thread* self_;
 
@@ -248,6 +250,7 @@
   static constexpr int kDefaultWholeHeapCollectionInterval = 5;
 
  private:
+  friend class BitmapSetSlowPathVisitor;
   DISALLOW_COPY_AND_ASSIGN(SemiSpace);
 };
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index de7d0b8..502da12 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -355,15 +355,15 @@
   }
   if (kUseRosAlloc) {
     main_space_ = space::RosAllocSpace::CreateFromMemMap(mem_map, "main rosalloc space",
-                                                          kDefaultStartingSize, initial_size,
-                                                          growth_limit, capacity, low_memory_mode_,
-                                                          can_move_objects);
+                                                         kDefaultStartingSize, initial_size,
+                                                         growth_limit, capacity, low_memory_mode_,
+                                                         can_move_objects);
     CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
   } else {
     main_space_ = space::DlMallocSpace::CreateFromMemMap(mem_map, "main dlmalloc space",
-                                                          kDefaultStartingSize, initial_size,
-                                                          growth_limit, capacity,
-                                                          can_move_objects);
+                                                         kDefaultStartingSize, initial_size,
+                                                         growth_limit, capacity,
+                                                         can_move_objects);
     CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
   }
   main_space_->SetFootprintLimit(main_space_->Capacity());
@@ -569,7 +569,7 @@
     space2 = space1;
   }
   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
-                 large_object_space_->GetLiveObjects(), stack);
+                 large_object_space_->GetLiveBitmap(), stack);
 }
 
 void Heap::DeleteThreadPool() {
@@ -606,10 +606,8 @@
   } else {
     DCHECK(space->IsDiscontinuousSpace());
     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
-    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
-    live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
-    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
-    mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+    live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+    mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
     discontinuous_spaces_.push_back(discontinuous_space);
   }
   if (space->IsAllocSpace()) {
@@ -649,10 +647,8 @@
   } else {
     DCHECK(space->IsDiscontinuousSpace());
     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
-    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
-    live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
-    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
-    mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+    live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+    mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
                         discontinuous_space);
     DCHECK(it != discontinuous_spaces_.end());
@@ -1050,7 +1046,7 @@
     return temp_space_->Contains(obj);
   }
   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
-  space::DiscontinuousSpace* d_space = NULL;
+  space::DiscontinuousSpace* d_space = nullptr;
   if (c_space != nullptr) {
     if (c_space->GetLiveBitmap()->Test(obj)) {
       return true;
@@ -1058,7 +1054,7 @@
   } else {
     d_space = FindDiscontinuousSpaceFromObject(obj, true);
     if (d_space != nullptr) {
-      if (d_space->GetLiveObjects()->Test(obj)) {
+      if (d_space->GetLiveBitmap()->Test(obj)) {
         return true;
       }
     }
@@ -1096,7 +1092,7 @@
     }
   } else {
     d_space = FindDiscontinuousSpaceFromObject(obj, true);
-    if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
+    if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
       return true;
     }
   }
@@ -1761,7 +1757,7 @@
 
 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
                           accounting::ContinuousSpaceBitmap* bitmap2,
-                          accounting::ObjectSet* large_objects,
+                          accounting::LargeObjectBitmap* large_objects,
                           accounting::ObjectStack* stack) {
   DCHECK(bitmap1 != nullptr);
   DCHECK(bitmap2 != nullptr);
@@ -2888,7 +2884,7 @@
   }
   // Clear the marked objects in the discontinous space object sets.
   for (const auto& space : GetDiscontinuousSpaces()) {
-    space->GetMarkObjects()->Clear();
+    space->GetMarkBitmap()->Clear();
   }
 }
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 35724e3..ceba8b6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -55,7 +55,6 @@
 namespace accounting {
   class HeapBitmap;
   class ModUnionTable;
-  class ObjectSet;
   class RememberedSet;
 }  // namespace accounting
 
@@ -477,7 +476,8 @@
   // TODO: Refactor?
   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
-                      accounting::ObjectSet* large_objects, accounting::ObjectStack* stack)
+                      accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
+                      accounting::ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Mark the specified allocation stack as live.
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 0b353c7..ce11b3d 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,12 +16,14 @@
 
 #include "large_object_space.h"
 
+#include "gc/accounting/space_bitmap-inl.h"
 #include "base/logging.h"
 #include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "UniquePtr.h"
 #include "image.h"
 #include "os.h"
+#include "space-inl.h"
 #include "thread-inl.h"
 #include "utils.h"
 
@@ -74,26 +76,27 @@
 };
 
 void LargeObjectSpace::SwapBitmaps() {
-  live_objects_.swap(mark_objects_);
+  live_bitmap_.swap(mark_bitmap_);
   // Swap names to get more descriptive diagnostics.
-  std::string temp_name = live_objects_->GetName();
-  live_objects_->SetName(mark_objects_->GetName());
-  mark_objects_->SetName(temp_name);
+  std::string temp_name = live_bitmap_->GetName();
+  live_bitmap_->SetName(mark_bitmap_->GetName());
+  mark_bitmap_->SetName(temp_name);
 }
 
-LargeObjectSpace::LargeObjectSpace(const std::string& name)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
-      total_objects_allocated_(0) {
+      total_objects_allocated_(0), begin_(begin), end_(end) {
 }
 
 
 void LargeObjectSpace::CopyLiveToMarked() {
-  mark_objects_->CopyFrom(*live_objects_.get());
+  mark_bitmap_->CopyFrom(live_bitmap_.get());
 }
 
+// TODO: Use something cleaner than 0xFFFFFFFF.
 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
-    : LargeObjectSpace(name),
+    : LargeObjectSpace(name, reinterpret_cast<byte*>(0xFFFFFFFF), nullptr),
       lock_("large object map space lock", kAllocSpaceLock) {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
@@ -118,7 +121,9 @@
   large_objects_.push_back(obj);
   mem_maps_.Put(obj, mem_map);
   size_t allocation_size = mem_map->Size();
-  DCHECK(bytes_allocated != NULL);
+  DCHECK(bytes_allocated != nullptr);
+  begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
+  end_ = std::max(end_, reinterpret_cast<byte*>(obj) + allocation_size);
   *bytes_allocated = allocation_size;
   if (usable_size != nullptr) {
     *usable_size = allocation_size;
@@ -191,9 +196,7 @@
 }
 
 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
-    : LargeObjectSpace(name),
-      begin_(begin),
-      end_(end),
+    : LargeObjectSpace(name, begin, end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
   free_end_ = end - begin;
@@ -389,27 +392,41 @@
   }
 }
 
-void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
-  // Sweep large objects
-  accounting::ObjectSet* large_live_objects = GetLiveObjects();
-  accounting::ObjectSet* large_mark_objects = GetMarkObjects();
-  if (swap_bitmaps) {
-    std::swap(large_live_objects, large_mark_objects);
-  }
-  DCHECK(freed_objects != nullptr);
-  DCHECK(freed_bytes != nullptr);
-  // O(n*log(n)) but hopefully there are not too many large objects.
-  size_t objects = 0;
-  size_t bytes = 0;
-  Thread* self = Thread::Current();
-  for (const mirror::Object* obj : large_live_objects->GetObjects()) {
-    if (!large_mark_objects->Test(obj)) {
-      bytes += Free(self, const_cast<mirror::Object*>(obj));
-      ++objects;
+void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
+  Thread* self = context->self;
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+  // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
+  // the bitmaps as an optimization.
+  if (!context->swap_bitmaps) {
+    accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
+    for (size_t i = 0; i < num_ptrs; ++i) {
+      bitmap->Clear(ptrs[i]);
     }
   }
-  *freed_objects += objects;
-  *freed_bytes += bytes;
+  context->freed_objects += num_ptrs;
+  context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+}
+
+void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
+                             size_t* out_freed_bytes) {
+  if (Begin() >= End()) {
+    return;
+  }
+  accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
+  accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
+  if (swap_bitmaps) {
+    std::swap(live_bitmap, mark_bitmap);
+  }
+  DCHECK(out_freed_objects != nullptr);
+  DCHECK(out_freed_bytes != nullptr);
+  SweepCallbackContext scc(swap_bitmaps, this);
+  accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
+                                           reinterpret_cast<uintptr_t>(Begin()),
+                                           reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
+  *out_freed_objects += scc.freed_objects;
+  *out_freed_bytes += scc.freed_bytes;
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 18e518f..0daefba 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -49,11 +49,11 @@
     return num_objects_allocated_;
   }
 
-  uint64_t GetTotalBytesAllocated() {
+  uint64_t GetTotalBytesAllocated() const {
     return total_bytes_allocated_;
   }
 
-  uint64_t GetTotalObjectsAllocated() {
+  uint64_t GetTotalObjectsAllocated() const {
     return total_objects_allocated_;
   }
 
@@ -73,20 +73,36 @@
     return this;
   }
 
-  void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+  void Sweep(bool swap_bitmaps, size_t* out_freed_objects, size_t* out_freed_bytes);
 
   virtual bool CanMoveObjects() const OVERRIDE {
     return false;
   }
 
+  // Current address at which the space begins, which may vary as the space is filled.
+  byte* Begin() const {
+    return begin_;
+  }
+
+  // Current address at which the space ends, which may vary as the space is filled.
+  byte* End() const {
+    return end_;
+  }
+
  protected:
-  explicit LargeObjectSpace(const std::string& name);
+  explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+
+  static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   // Approximate number of bytes which have been allocated into the space.
-  size_t num_bytes_allocated_;
-  size_t num_objects_allocated_;
-  size_t total_bytes_allocated_;
-  size_t total_objects_allocated_;
+  uint64_t num_bytes_allocated_;
+  uint64_t num_objects_allocated_;
+  uint64_t total_bytes_allocated_;
+  uint64_t total_objects_allocated_;
+
+  // Begin and end, may change as more large objects are allocated.
+  byte* begin_;
+  byte* end_;
 
   friend class Space;
 
@@ -242,9 +258,6 @@
   typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
                    accounting::GcAllocator<AllocationHeader*> > FreeBlocks;
 
-  byte* const begin_;
-  byte* const end_;
-
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
   UniquePtr<MemMap> mem_map_;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index eaf14fb..7493c19 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -226,7 +226,6 @@
 
 void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
-  DCHECK(context->space->IsMallocSpace());
   space::MallocSpace* space = context->space->AsMallocSpace();
   Thread* self = context->self;
   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 01e8b04..4e28416 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -70,9 +70,15 @@
 
 DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
                                        GcRetentionPolicy gc_retention_policy) :
-    Space(name, gc_retention_policy),
-    live_objects_(new accounting::ObjectSet("large live objects")),
-    mark_objects_(new accounting::ObjectSet("large marked objects")) {
+    Space(name, gc_retention_policy) {
+  // TODO: Fix this if we ever support objects not in the low 32 bit.
+  const size_t capacity = static_cast<size_t>(std::numeric_limits<uint32_t>::max());
+  live_bitmap_.reset(accounting::LargeObjectBitmap::Create("large live objects", nullptr,
+                                                           capacity));
+  CHECK(live_bitmap_.get() != nullptr);
+  mark_bitmap_.reset(accounting::LargeObjectBitmap::Create("large marked objects", nullptr,
+                                                           capacity));
+  CHECK(mark_bitmap_.get() != nullptr);
 }
 
 void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
@@ -84,13 +90,7 @@
   if (live_bitmap == mark_bitmap) {
     return;
   }
-  SweepCallbackContext scc;
-  scc.swap_bitmaps = swap_bitmaps;
-  scc.heap = Runtime::Current()->GetHeap();
-  scc.self = Thread::Current();
-  scc.space = this;
-  scc.freed_objects = 0;
-  scc.freed_bytes = 0;
+  SweepCallbackContext scc(swap_bitmaps, this);
   if (swap_bitmaps) {
     std::swap(live_bitmap, mark_bitmap);
   }
@@ -136,6 +136,11 @@
   mark_bitmap_->SetName(temp_name);
 }
 
+Space::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
+    : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()), freed_objects(0),
+      freed_bytes(0) {
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 2b27f87..0a87a16 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -173,10 +173,11 @@
 
  protected:
   struct SweepCallbackContext {
-    bool swap_bitmaps;
-    Heap* heap;
-    space::Space* space;
-    Thread* self;
+   public:
+    SweepCallbackContext(bool swap_bitmaps, space::Space* space);
+    const bool swap_bitmaps;
+    space::Space* const space;
+    Thread* const self;
     size_t freed_objects;
     size_t freed_bytes;
   };
@@ -313,15 +314,15 @@
 // is suitable for use for large primitive arrays.
 class DiscontinuousSpace : public Space {
  public:
-  accounting::ObjectSet* GetLiveObjects() const {
-    return live_objects_.get();
+  accounting::LargeObjectBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
   }
 
-  accounting::ObjectSet* GetMarkObjects() const {
-    return mark_objects_.get();
+  accounting::LargeObjectBitmap* GetMarkBitmap() const {
+    return mark_bitmap_.get();
   }
 
-  virtual bool IsDiscontinuousSpace() const {
+  virtual bool IsDiscontinuousSpace() const OVERRIDE {
     return true;
   }
 
@@ -330,8 +331,8 @@
  protected:
   DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
 
-  UniquePtr<accounting::ObjectSet> live_objects_;
-  UniquePtr<accounting::ObjectSet> mark_objects_;
+  UniquePtr<accounting::LargeObjectBitmap> live_bitmap_;
+  UniquePtr<accounting::LargeObjectBitmap> mark_bitmap_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 1b06b63..0466413 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -101,7 +101,7 @@
   DCHECK(context->space->IsZygoteSpace());
   ZygoteSpace* zygote_space = context->space->AsZygoteSpace();
   Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
-  accounting::CardTable* card_table = context->heap->GetCardTable();
+  accounting::CardTable* card_table = Runtime::Current()->GetHeap()->GetCardTable();
   // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
   // the bitmaps as an optimization.
   if (!context->swap_bitmaps) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 2505855..b195dea 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -51,14 +51,14 @@
       OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false);
 }
 
-inline LockWord Object::GetLockWord(bool is_volatile) {
-  return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), is_volatile));
+inline LockWord Object::GetLockWord(bool as_volatile) {
+  return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), as_volatile));
 }
 
-inline void Object::SetLockWord(LockWord new_val, bool is_volatile) {
+inline void Object::SetLockWord(LockWord new_val, bool as_volatile) {
   // Force use of non-transactional mode and do not check.
   SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue(),
-                           is_volatile);
+                           as_volatile);
 }
 
 inline bool Object::CasLockWord(LockWord old_val, LockWord new_val) {