Add a way to determine if a large object is a zygote object

Also fix a slight memory leak in LargeObjectMapSpace.

Bug: 20674158

(cherry picked from commit 8f23620d45399286564986d2541cda761b3fe0ac)

Change-Id: I2416df484e5b84a8c5cc0b5664c8cb102dc235f6
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 59d0259..f039f6b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2252,6 +2252,7 @@
   // Set all the cards in the mod-union table since we don't know which objects contain references
   // to large objects.
   mod_union_table->SetCards();
+  large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
   AddModUnionTable(mod_union_table);
   if (collector::SemiSpace::kUseRememberedSet) {
     // Add a new remembered set for the post-zygote non-moving space.
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index da4a930..9436c3f 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -41,8 +41,8 @@
     // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
     // freed since they are held live by the class linker.
     MutexLock mu(Thread::Current(), lock_);
-    for (auto& m : mem_maps_) {
-      delete m.second;
+    for (auto& m : large_objects_) {
+      delete m.second.mem_map;
     }
   }
 
@@ -139,8 +139,7 @@
     CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap;
   }
   MutexLock mu(self, lock_);
-  large_objects_.push_back(obj);
-  mem_maps_.Put(obj, mem_map);
+  large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
   const size_t allocation_size = mem_map->BaseSize();
   DCHECK(bytes_allocated != nullptr);
   begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
@@ -161,28 +160,43 @@
   return obj;
 }
 
+bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
+  MutexLock mu(self, lock_);
+  auto it = large_objects_.find(obj);
+  CHECK(it != large_objects_.end());
+  return it->second.is_zygote;
+}
+
+void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+  MutexLock mu(self, lock_);
+  for (auto& pair : large_objects_) {
+    pair.second.is_zygote = true;
+  }
+}
+
 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
   MutexLock mu(self, lock_);
-  MemMaps::iterator found = mem_maps_.find(ptr);
-  if (UNLIKELY(found == mem_maps_.end())) {
-    Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+  auto it = large_objects_.find(ptr);
+  if (UNLIKELY(it == large_objects_.end())) {
+    Runtime::Current()->GetHeap()->DumpSpaces(LOG(INTERNAL_FATAL));
     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
   }
-  const size_t map_size = found->second->BaseSize();
+  MemMap* mem_map = it->second.mem_map;
+  const size_t map_size = mem_map->BaseSize();
   DCHECK_GE(num_bytes_allocated_, map_size);
   size_t allocation_size = map_size;
   num_bytes_allocated_ -= allocation_size;
   --num_objects_allocated_;
-  delete found->second;
-  mem_maps_.erase(found);
+  delete mem_map;
+  large_objects_.erase(it);
   return allocation_size;
 }
 
 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
   MutexLock mu(Thread::Current(), lock_);
-  auto found = mem_maps_.find(obj);
-  CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
-  size_t alloc_size = found->second->BaseSize();
+  auto it = large_objects_.find(obj);
+  CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
+  size_t alloc_size = it->second.mem_map->BaseSize();
   if (usable_size != nullptr) {
     *usable_size = alloc_size;
   }
@@ -202,8 +216,8 @@
 
 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
-  for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
-    MemMap* mem_map = it->second;
+  for (auto& pair : large_objects_) {
+    MemMap* mem_map = pair.second.mem_map;
     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
     callback(nullptr, nullptr, 0, arg);
   }
@@ -213,22 +227,25 @@
   Thread* self = Thread::Current();
   if (lock_.IsExclusiveHeld(self)) {
     // We hold lock_ so do the check.
-    return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
+    return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
   } else {
     MutexLock mu(self, lock_);
-    return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
+    return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
   }
 }
 
 // Keeps track of allocation sizes + whether or not the previous allocation is free.
-// Used to coalesce free blocks and find the best fit block for an allocation.
+// Used to coalesce free blocks and find the best fit block for an allocation for best fit object
+// allocation. Each allocation has an AllocationInfo which contains the size of the previous free
+// block preceding it. Implemented in such a way that we can also find the iterator for any
+// allocation info pointer.
 class AllocationInfo {
  public:
   AllocationInfo() : prev_free_(0), alloc_size_(0) {
   }
   // Return the number of pages that the allocation info covers.
   size_t AlignSize() const {
-    return alloc_size_ & ~kFlagFree;
+    return alloc_size_ & kFlagsMask;
   }
   // Returns the allocation size in bytes.
   size_t ByteSize() const {
@@ -237,11 +254,21 @@
   // Updates the allocation size and whether or not it is free.
   void SetByteSize(size_t size, bool free) {
     DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
-    alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U);
+    alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
   }
+  // Returns true if the block is free.
   bool IsFree() const {
     return (alloc_size_ & kFlagFree) != 0;
   }
+  // Return true if the large object is a zygote object.
+  bool IsZygoteObject() const {
+    return (alloc_size_ & kFlagZygote) != 0;
+  }
+  // Change the object to be a zygote object.
+  void SetZygoteObject() {
+    alloc_size_ |= kFlagZygote;
+  }
+  // Return true if this is a zygote large object.
   // Finds and returns the next non free allocation info after ourself.
   AllocationInfo* GetNextInfo() {
     return this + AlignSize();
@@ -275,10 +302,9 @@
   }
 
  private:
-  // Used to implement best fit object allocation. Each allocation has an AllocationInfo which
-  // contains the size of the previous free block preceding it. Implemented in such a way that we
-  // can also find the iterator for any allocation info pointer.
-  static constexpr uint32_t kFlagFree = 0x8000000;
+  static constexpr uint32_t kFlagFree = 0x80000000;  // If block is free.
+  static constexpr uint32_t kFlagZygote = 0x40000000;  // If the large object is a zygote object.
+  static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote);  // Combined flags for masking.
   // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
   // allocation before us is not free.
   // These variables are undefined in the middle of allocations / free blocks.
@@ -493,7 +519,7 @@
 }
 
 void FreeListSpace::Dump(std::ostream& os) const {
-  MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
+  MutexLock mu(Thread::Current(), lock_);
   os << GetName() << " -"
      << " begin: " << reinterpret_cast<void*>(Begin())
      << " end: " << reinterpret_cast<void*>(End()) << "\n";
@@ -519,6 +545,24 @@
   }
 }
 
+bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
+  const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
+  DCHECK(info != nullptr);
+  return info->IsZygoteObject();
+}
+
+void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+  MutexLock mu(self, lock_);
+  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
+      *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
+      cur_info = cur_info->GetNextInfo()) {
+    if (!cur_info->IsFree()) {
+      cur_info->SetZygoteObject();
+    }
+  }
+}
+
 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
   space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index d1f9386..45ed0cd 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -98,6 +98,12 @@
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Return true if the large object is a zygote large object. Potentially slow.
+  virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
+  // Called when we create the zygote space, mark all existing large objects as zygote large
+  // objects.
+  virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
+
  protected:
   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
@@ -133,16 +139,20 @@
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
  protected:
+  struct LargeObject {
+    MemMap* mem_map;
+    bool is_zygote;
+  };
   explicit LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_);
+
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::vector<mirror::Object*, TrackingAllocator<mirror::Object*, kAllocatorTagLOS>> large_objects_
+  AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
       GUARDED_BY(lock_);
-  typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
-      TrackingAllocator<std::pair<mirror::Object*, MemMap*>, kAllocatorTagLOSMaps>> MemMaps;
-  MemMaps mem_maps_ GUARDED_BY(lock_);
 };
 
 // A continuous large object space with a free-list to handle holes.
@@ -177,6 +187,8 @@
   }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
   void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE;
 
   class SortByPrevFree {
    public:
@@ -192,7 +204,7 @@
   std::unique_ptr<MemMap> allocation_info_map_;
   AllocationInfo* allocation_info_;
 
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   // Free bytes at the end of the space.
   size_t free_end_ GUARDED_BY(lock_);
   FreeBlocks free_blocks_ GUARDED_BY(lock_);
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index f04a7ca..05b484a 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -34,6 +34,7 @@
 
 void LargeObjectSpaceTest::LargeObjectTest() {
   size_t rand_seed = 0;
+  Thread* const self = Thread::Current();
   for (size_t i = 0; i < 2; ++i) {
     LargeObjectSpace* los = nullptr;
     if (i == 0) {
@@ -51,8 +52,8 @@
         size_t request_size = test_rand(&rand_seed) % max_allocation_size;
         size_t allocation_size = 0;
         size_t bytes_tl_bulk_allocated;
-        mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size,
-                                         nullptr, &bytes_tl_bulk_allocated);
+        mirror::Object* obj = los->Alloc(self, request_size, &allocation_size, nullptr,
+                                         &bytes_tl_bulk_allocated);
         ASSERT_TRUE(obj != nullptr);
         ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
         ASSERT_GE(allocation_size, request_size);
@@ -70,8 +71,21 @@
         }
       }
 
+      // Check the zygote flag for the first phase.
+      if (phase == 0) {
+        for (const auto& pair : requests) {
+          mirror::Object* obj = pair.first;
+          ASSERT_FALSE(los->IsZygoteLargeObject(self, obj));
+        }
+        los->SetAllLargeObjectsAsZygoteObjects(self);
+        for (const auto& pair : requests) {
+          mirror::Object* obj = pair.first;
+          ASSERT_TRUE(los->IsZygoteLargeObject(self, obj));
+        }
+      }
+
       // Free 1 / 2 the allocations the first phase, and all the second phase.
-      size_t limit = !phase ? requests.size() / 2 : 0;
+      size_t limit = phase == 0 ? requests.size() / 2 : 0;
       while (requests.size() > limit) {
         mirror::Object* obj = requests.back().first;
         size_t request_size = requests.back().second;
@@ -88,7 +102,7 @@
 
     size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
     // Checks that the coalescing works.
-    mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr,
+    mirror::Object* obj = los->Alloc(self, 100 * MB, &bytes_allocated, nullptr,
                                      &bytes_tl_bulk_allocated);
     EXPECT_TRUE(obj != nullptr);
     los->Free(Thread::Current(), obj);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 922fc5f..6e0e56e 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -891,8 +891,8 @@
     return;
   }
 
-  gc::space::ContinuousSpace* space =
-      Runtime::Current()->GetHeap()->FindContinuousSpaceFromObject(obj, true);
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+  const gc::space::ContinuousSpace* const space = heap->FindContinuousSpaceFromObject(obj, true);
   HprofHeapId heap_type = HPROF_HEAP_APP;
   if (space != nullptr) {
     if (space->IsZygoteSpace()) {
@@ -900,6 +900,11 @@
     } else if (space->IsImageSpace()) {
       heap_type = HPROF_HEAP_IMAGE;
     }
+  } else {
+    const auto* los = heap->GetLargeObjectsSpace();
+    if (los->Contains(obj) && los->IsZygoteLargeObject(Thread::Current(), obj)) {
+      heap_type = HPROF_HEAP_ZYGOTE;
+    }
   }
   CheckHeapSegmentConstraints();