Fix race with LOS Begin() and End()

There was a race for the first large object allocation that cause
callers of Begin() and End() to see a null End() and non-null
Begin(). The fix is to hold the lock and get both Begin() and End().

Bug: 32387879

Test: test-art-host CC

Change-Id: I6173bf3a55d3ba017ffa5b5e9f566025c65b7555
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index c43b048..8bb90e1 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1518,8 +1518,9 @@
   accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
   accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
   // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
-  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
-                                reinterpret_cast<uintptr_t>(los->End()),
+  std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
+  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
+                                reinterpret_cast<uintptr_t>(range.second),
                                 [mark_bitmap, los, self](mirror::Object* obj)
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2cb1767..2ff4a3f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -408,8 +408,9 @@
     // classes (primitive array classes) that could move though they
     // don't contain any other references.
     accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
-    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
-                                        reinterpret_cast<uintptr_t>(los->End()),
+    std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
+    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
+                                        reinterpret_cast<uintptr_t>(range.second),
                                         [this](mirror::Object* obj)
         REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
       ScanObject(obj);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2d5d7cb..e71a397 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -606,9 +606,12 @@
     std::swap(live_bitmap, mark_bitmap);
   }
   AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
+  std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
-                                           reinterpret_cast<uintptr_t>(Begin()),
-                                           reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
+                                           reinterpret_cast<uintptr_t>(range.first),
+                                           reinterpret_cast<uintptr_t>(range.second),
+                                           SweepCallback,
+                                           &scc);
   return scc.freed;
 }
 
@@ -617,6 +620,16 @@
   UNIMPLEMENTED(FATAL);
 }
 
+std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
+  MutexLock mu(Thread::Current(), lock_);
+  return std::make_pair(Begin(), End());
+}
+
+std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
+  MutexLock mu(Thread::Current(), lock_);
+  return std::make_pair(Begin(), End());
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 0320e79..38e28b1 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -104,6 +104,10 @@
   // objects.
   virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
 
+  // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
+  // End() from different allocations.
+  virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
+
  protected:
   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
@@ -139,6 +143,8 @@
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+
  protected:
   struct LargeObject {
     MemMap* mem_map;
@@ -172,6 +178,8 @@
   void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
   void Dump(std::ostream& os) const REQUIRES(!lock_);
 
+  std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
+
  protected:
   FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
   size_t GetSlotIndexForAddress(uintptr_t address) const {