Some clean up for mark sweep files

Bug: 19534862
Change-Id: Ib619e8c4b9d902269d123baa18be56c94114e070
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 56edcc9..e72277f 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -29,7 +29,8 @@
 namespace collector {
 
 template<typename MarkVisitor, typename ReferenceVisitor>
-inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+inline void MarkSweep::ScanObjectVisit(mirror::Object* obj,
+                                       const MarkVisitor& visitor,
                                        const ReferenceVisitor& ref_visitor) {
   DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
   obj->VisitReferences(visitor, ref_visitor);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2d1f312..77a288b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -95,10 +95,13 @@
     : GarbageCollector(heap,
                        name_prefix +
                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
-      current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
+      current_space_bitmap_(nullptr),
+      mark_bitmap_(nullptr),
+      mark_stack_(nullptr),
       gc_barrier_(new Barrier(0)),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
-      is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
+      is_concurrent_(is_concurrent),
+      live_stack_freeze_size_(0) {
   std::string error_msg;
   MemMap* mem_map = MemMap::MapAnonymous(
       "mark sweep sweep array free buffer", nullptr,
@@ -173,7 +176,10 @@
 void MarkSweep::ProcessReferences(Thread* self) {
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   GetHeap()->GetReferenceProcessor()->ProcessReferences(
-      true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
+      true,
+      GetTimings(),
+      GetCurrentIteration()->GetClearSoftReferences(),
+      this);
 }
 
 void MarkSweep::PausePhase() {
@@ -265,8 +271,9 @@
 void MarkSweep::UpdateAndMarkModUnion() {
   for (const auto& space : heap_->GetContinuousSpaces()) {
     if (immune_region_.ContainsSpace(space)) {
-      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
-          "UpdateAndMarkImageModUnionTable";
+      const char* name = space->IsZygoteSpace()
+          ? "UpdateAndMarkZygoteModUnionTable"
+          : "UpdateAndMarkImageModUnionTable";
       TimingLogger::ScopedTiming t(name, GetTimings());
       accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
       CHECK(mod_union_table != nullptr);
@@ -365,10 +372,10 @@
 
 class MarkSweepMarkObjectSlowPath {
  public:
-  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr,
+  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
+                                       mirror::Object* holder = nullptr,
                                        MemberOffset offset = MemberOffset(0))
-      : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
-  }
+      : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
 
   void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     if (kProfileLargeObjects) {
@@ -445,7 +452,8 @@
   MemberOffset offset_;
 };
 
-inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder,
+inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
+                                         mirror::Object* holder,
                                          MemberOffset offset) {
   DCHECK(obj != nullptr);
   if (kUseBakerOrBrooksReadBarrier) {
@@ -512,7 +520,8 @@
 }
 
 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
-inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder,
+inline void MarkSweep::MarkObject(mirror::Object* obj,
+                                  mirror::Object* holder,
                                   MemberOffset offset) {
   if (obj != nullptr) {
     MarkObjectNonNull(obj, holder, offset);
@@ -534,14 +543,16 @@
   MarkSweep* const collector_;
 };
 
-void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
+void MarkSweep::VisitRoots(mirror::Object*** roots,
+                           size_t count,
                            const RootInfo& info ATTRIBUTE_UNUSED) {
   for (size_t i = 0; i < count; ++i) {
     MarkObjectNonNull(*roots[i]);
   }
 }
 
-void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                           size_t count,
                            const RootInfo& info ATTRIBUTE_UNUSED) {
   for (size_t i = 0; i < count; ++i) {
     MarkObjectNonNull(roots[i]->AsMirrorPtr());
@@ -600,8 +611,10 @@
   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
       : mark_sweep_(mark_sweep) {}
 
-  void operator()(mirror::Object* obj) const ALWAYS_INLINE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+  void operator()(mirror::Object* obj) const
+      ALWAYS_INLINE
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -615,12 +628,11 @@
 
 class DelayReferenceReferentVisitor {
  public:
-  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
-  }
+  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
-      SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     collector_->DelayReferenceReferent(klass, ref);
   }
 
@@ -631,7 +643,9 @@
 template <bool kUseFinger = false>
 class MarkStackTask : public Task {
  public:
-  MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
+  MarkStackTask(ThreadPool* thread_pool,
+                MarkSweep* mark_sweep,
+                size_t mark_stack_size,
                 StackReference<mirror::Object>* mark_stack)
       : mark_sweep_(mark_sweep),
         thread_pool_(thread_pool),
@@ -656,8 +670,10 @@
                                             MarkSweep* mark_sweep)
         : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
 
-    void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
-        ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+    ALWAYS_INLINE void operator()(mirror::Object* obj,
+                    MemberOffset offset,
+                    bool is_static ATTRIBUTE_UNUSED) const
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       Mark(obj->GetFieldObject<mirror::Object>(offset));
     }
 
@@ -678,7 +694,7 @@
     }
 
    private:
-    void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+    ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
       if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
         if (kUseFinger) {
           std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -697,12 +713,13 @@
 
   class ScanObjectParallelVisitor {
    public:
-    explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
+    ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
         : chunk_task_(chunk_task) {}
 
     // No thread safety analysis since multiple threads will use this visitor.
-    void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
-        REQUIRES(Locks::heap_bitmap_lock_) {
+    void operator()(mirror::Object* obj) const
+        REQUIRES(Locks::heap_bitmap_lock_)
+        SHARED_REQUIRES(Locks::mutator_lock_) {
       MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
       MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
       DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -733,7 +750,9 @@
     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
       mark_stack_pos_ /= 2;
-      auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
+      auto* task = new MarkStackTask(thread_pool_,
+                                     mark_sweep_,
+                                     kMaxSize - mark_stack_pos_,
                                      mark_stack_ + mark_stack_pos_);
       thread_pool_->AddTask(Thread::Current(), task);
     }
@@ -747,9 +766,9 @@
   }
 
   // Scans all of the objects
-  virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
-    UNUSED(self);
+  virtual void Run(Thread* self ATTRIBUTE_UNUSED)
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     ScanObjectParallelVisitor visitor(this);
     // TODO: Tune this.
     static const size_t kFifoSize = 4;
@@ -782,16 +801,21 @@
 
 class CardScanTask : public MarkStackTask<false> {
  public:
-  CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
+  CardScanTask(ThreadPool* thread_pool,
+               MarkSweep* mark_sweep,
                accounting::ContinuousSpaceBitmap* bitmap,
-               uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
-               StackReference<mirror::Object>* mark_stack_obj, bool clear_card)
+               uint8_t* begin,
+               uint8_t* end,
+               uint8_t minimum_age,
+               size_t mark_stack_size,
+               StackReference<mirror::Object>* mark_stack_obj,
+               bool clear_card)
       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
         bitmap_(bitmap),
         begin_(begin),
         end_(end),
-        minimum_age_(minimum_age), clear_card_(clear_card) {
-  }
+        minimum_age_(minimum_age),
+        clear_card_(clear_card) {}
 
  protected:
   accounting::ContinuousSpaceBitmap* const bitmap_;
@@ -807,9 +831,9 @@
   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
     ScanObjectParallelVisitor visitor(this);
     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
-    size_t cards_scanned = clear_card_ ?
-                           card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
-                           card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
+    size_t cards_scanned = clear_card_
+        ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
+        : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
     VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
         << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
     // Finish by emptying our local mark stack.
@@ -877,9 +901,15 @@
         mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
         DCHECK_EQ(mark_stack_end, mark_stack_->End());
         // Add the new task to the thread pool.
-        auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
-                                      card_begin + card_increment, minimum_age,
-                                      mark_stack_increment, mark_stack_end, clear_card);
+        auto* task = new CardScanTask(thread_pool,
+                                      this,
+                                      space->GetMarkBitmap(),
+                                      card_begin,
+                                      card_begin + card_increment,
+                                      minimum_age,
+                                      mark_stack_increment,
+                                      mark_stack_end,
+                                      clear_card);
         thread_pool->AddTask(self, task);
         card_begin += card_increment;
       }
@@ -915,10 +945,16 @@
         ScanObjectVisitor visitor(this);
         bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
         if (clear_card) {
-          card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
+          card_table->Scan<true>(space->GetMarkBitmap(),
+                                 space->Begin(),
+                                 space->End(),
+                                 visitor,
                                  minimum_age);
         } else {
-          card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
+          card_table->Scan<false>(space->GetMarkBitmap(),
+                                  space->Begin(),
+                                  space->End(),
+                                  visitor,
                                   minimum_age);
         }
       }
@@ -928,11 +964,15 @@
 
 class RecursiveMarkTask : public MarkStackTask<false> {
  public:
-  RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
-                    accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
-      : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
-        end_(end) {
-  }
+  RecursiveMarkTask(ThreadPool* thread_pool,
+                    MarkSweep* mark_sweep,
+                    accounting::ContinuousSpaceBitmap* bitmap,
+                    uintptr_t begin,
+                    uintptr_t end)
+      : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
+        bitmap_(bitmap),
+        begin_(begin),
+        end_(end) {}
 
  protected:
   accounting::ContinuousSpaceBitmap* const bitmap_;
@@ -989,7 +1029,10 @@
             delta = RoundUp(delta, KB);
             if (delta < 16 * KB) delta = end - begin;
             begin += delta;
-            auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
+            auto* task = new RecursiveMarkTask(thread_pool,
+                                               this,
+                                               current_space_bitmap_,
+                                               start,
                                                begin);
             thread_pool->AddTask(self, task);
           }
@@ -1036,7 +1079,8 @@
  public:
   explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+  virtual mirror::Object* IsMarked(mirror::Object* obj)
+      OVERRIDE
       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     mark_sweep_->VerifyIsLive(obj);
     return obj;
@@ -1077,7 +1121,8 @@
     }
   }
 
-  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                  size_t count,
                   const RootInfo& info ATTRIBUTE_UNUSED)
       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_) {
@@ -1251,7 +1296,8 @@
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
       TimingLogger::ScopedTiming split(
-          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
+          GetTimings());
       RecordFree(alloc_space->Sweep(swap_bitmaps));
     }
   }
@@ -1274,12 +1320,13 @@
 
 class MarkVisitor {
  public:
-  explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
-  }
+  ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
 
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
-      ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+  ALWAYS_INLINE void operator()(mirror::Object* obj,
+                                MemberOffset offset,
+                                bool is_static ATTRIBUTE_UNUSED) const
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -1288,14 +1335,16 @@
   }
 
   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!root->IsNull()) {
       VisitRoot(root);
     }
   }
 
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 371bba5..8f7df78 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -33,9 +33,9 @@
 namespace art {
 
 namespace mirror {
-  class Class;
-  class Object;
-  class Reference;
+class Class;
+class Object;
+class Reference;
 }  // namespace mirror
 
 class Thread;
@@ -46,8 +46,8 @@
 class Heap;
 
 namespace accounting {
-  template<typename T> class AtomicStack;
-  typedef AtomicStack<mirror::Object> ObjectStack;
+template<typename T> class AtomicStack;
+typedef AtomicStack<mirror::Object> ObjectStack;
 }  // namespace accounting
 
 namespace collector {
@@ -60,12 +60,14 @@
 
   virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
   void InitializePhase();
-  void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
-  void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
-  void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void MarkingPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+  void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+  void ReclaimPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void FinishPhase();
   virtual void MarkReachableObjects()
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool IsConcurrent() const {
     return is_concurrent_;
@@ -87,20 +89,30 @@
 
   // Marks all objects in the root set at the start of a garbage collection.
   void MarkRoots(Thread* self)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkNonThreadRoots()
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkConcurrentRoots(VisitRootFlags flags)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Builds a mark stack and recursively mark until it empties.
   void RecursiveMark()
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
   // the image. Mark that portion of the heap as immune.
@@ -108,26 +120,35 @@
 
   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
   void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Remarks the root set after completing the concurrent mark.
   void ReMarkRoots()
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ProcessReferences(Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Update and mark references from immune spaces.
   void UpdateAndMarkModUnion()
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Pre clean cards to reduce how much work is needed in the pause.
   void PreCleanCards()
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
   // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
-  virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_)
+  virtual void Sweep(bool swap_bitmaps)
+      REQUIRES(Locks::heap_bitmap_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
@@ -135,20 +156,27 @@
 
   // Sweep only pointers within an array. WARNING: Trashes objects.
   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Blackens an object.
   void ScanObject(mirror::Object* obj)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // No thread safety analysis due to lambdas.
   template<typename MarkVisitor, typename ReferenceVisitor>
-  void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+  void ScanObjectVisit(mirror::Object* obj,
+                       const MarkVisitor& visitor,
                        const ReferenceVisitor& ref_visitor)
-    SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SweepSystemWeaks(Thread* self)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+      REQUIRES(!Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
       SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -161,22 +189,36 @@
       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                          size_t count,
                           const RootInfo& info) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Marks an object.
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   Barrier& GetBarrier() {
     return *gc_barrier_;
@@ -191,13 +233,17 @@
   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
       SHARED_REQUIRES(Locks::heap_bitmap_lock_);
 
-  void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
+  void MarkObjectNonNull(mirror::Object* obj,
+                         mirror::Object* holder = nullptr,
                          MemberOffset offset = MemberOffset(0))
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Marks an object atomically, safe to use from multiple threads.
   void MarkObjectNonNullParallel(mirror::Object* obj)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if we need to add obj to a mark stack.
   bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -208,9 +254,12 @@
       NO_THREAD_SAFETY_ANALYSIS;
 
   // Expand mark stack to 2x its current size.
-  void ExpandMarkStack() REQUIRES(mark_stack_lock_)
+  void ExpandMarkStack()
+      REQUIRES(mark_stack_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_)
+
+  void ResizeMarkStack(size_t new_size)
+      REQUIRES(mark_stack_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns how many threads we should use for the current GC phase based on if we are paused,
@@ -218,24 +267,34 @@
   size_t GetThreadCount(bool paused) const;
 
   // Push a single reference on a mark stack.
-  void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
+  void PushOnMarkStack(mirror::Object* obj)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Blackens objects grayed during a garbage collection.
   void ScanGrayObjects(bool paused, uint8_t minimum_age)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_)
+  virtual void ProcessMarkStack()
+      OVERRIDE
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_) {
     ProcessMarkStack(false);
   }
 
   // Recursively blackens objects on the mark stack.
   void ProcessMarkStack(bool paused)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ProcessMarkStackParallel(size_t thread_count)
-      REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
   // IsExclusiveHeld.
@@ -293,23 +352,15 @@
   std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
 
  private:
-  friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
   friend class CardScanTask;
   friend class CheckBitmapVisitor;
   friend class CheckReferenceVisitor;
   friend class CheckpointMarkThreadRoots;
-  friend class art::gc::Heap;
+  friend class Heap;
   friend class FifoMarkStackChunk;
   friend class MarkObjectVisitor;
   template<bool kUseFinger> friend class MarkStackTask;
   friend class MarkSweepMarkObjectSlowPath;
-  friend class ModUnionCheckReferences;
-  friend class ModUnionClearCardVisitor;
-  friend class ModUnionReferenceVisitor;
-  friend class ModUnionScanImageRootVisitor;
-  friend class ModUnionTableBitmap;
-  friend class ModUnionTableReferenceCache;
-  friend class ModUnionVisitor;
   friend class VerifyRootMarkedVisitor;
   friend class VerifyRootVisitor;
 
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5be3db7..6c32658 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -25,8 +25,7 @@
 namespace collector {
 
 StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
-    : PartialMarkSweep(heap, is_concurrent,
-                       name_prefix.empty() ? "sticky " : name_prefix) {
+    : PartialMarkSweep(heap, is_concurrent, name_prefix.empty() ? "sticky " : name_prefix) {
   cumulative_timings_.SetName(GetName());
 }
 
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index e8f0672..abaf978 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -38,13 +38,15 @@
   // alloc space will be marked as immune.
   void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void MarkReachableObjects() OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_);
+  void MarkReachableObjects()
+      OVERRIDE
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void Sweep(bool swap_bitmaps) OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_);
+  void Sweep(bool swap_bitmaps)
+      OVERRIDE
+      REQUIRES(Locks::heap_bitmap_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);