Try normal allocation if large object allocation fails

If a large object allocation fails, we now try the normal allocators.

Bug: 18504942
Change-Id: I18b9759d6af885556941542c57fec584f18197f1
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d1fb600..15d55d0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -48,11 +48,20 @@
   }
   // Need to check that we arent the large object allocator since the large object allocation code
   // path this function. If we didn't check we would have an infinite loop.
-  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
-    return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
-                                                            pre_fence_visitor);
-  }
   mirror::Object* obj;
+  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
+    obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
+                                                           pre_fence_visitor);
+    if (obj != nullptr) {
+      return obj;
+    } else {
+      // There should be an OOM exception, since we are retrying, clear it.
+      self->ClearException();
+    }
+    // If the large object allocation failed, try to use the normal spaces (main space,
+    // non moving space). This can happen if there is significant virtual address space
+    // fragmentation.
+  }
   AllocationTimer alloc_timer(this, &obj);
   size_t bytes_allocated;
   size_t usable_size;
@@ -171,10 +180,13 @@
 }
 
 template <bool kInstrumented, typename PreFenceVisitor>
-inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
+inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass,
                                               size_t byte_count,
                                               const PreFenceVisitor& pre_fence_visitor) {
-  return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
+  // Save and restore the class in case it moves.
+  StackHandleScope<1> hs(self);
+  auto klass_wrapper = hs.NewHandleWrapper(klass);
+  return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
                                                                          kAllocatorTypeLOS,
                                                                          pre_fence_visitor);
 }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 24f4f17..530ec18 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -643,7 +643,7 @@
 
   // We don't force this to be inlined since it is a slow path.
   template <bool kInstrumented, typename PreFenceVisitor>
-  mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
+  mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
                                    const PreFenceVisitor& pre_fence_visitor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);