Merge "Snap for 4455664 from 7eb0e9a52d978d67cb6be727ee6f1e88cdf95934 to oreo-vts-release" into oreo-vts-release
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 7067fd1..a267766 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -883,8 +883,7 @@
   }
 };
 
-// Disabled test due to b/37318304.
-TEST_F(Dex2oatReturnCodeTest, DISABLED_TestCreateRuntime) {
+TEST_F(Dex2oatReturnCodeTest, TestCreateRuntime) {
   int status = RunTest({ "--boot-image=/this/does/not/exist/yolo.oat" });
   EXPECT_EQ(static_cast<int>(dex2oat::ReturnCode::kCreateRuntime), WEXITSTATUS(status)) << output_;
 }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index ab2146a..0748c7b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2175,7 +2175,9 @@
   // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
   // objects, but it's ok and necessary.
   size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
-  size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+  size_t region_space_alloc_size = (obj_size <= space::RegionSpace::kRegionSize)
+      ? RoundUp(obj_size, space::RegionSpace::kAlignment)
+      : RoundUp(obj_size, space::RegionSpace::kRegionSize);
   size_t region_space_bytes_allocated = 0U;
   size_t non_moving_space_bytes_allocated = 0U;
   size_t bytes_allocated = 0U;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 3910a03..3a57a81 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -315,18 +315,21 @@
       DCHECK(first_reg->IsFree());
       first_reg->UnfreeLarge(this, time_);
       ++num_non_free_regions_;
-      first_reg->SetTop(first_reg->Begin() + num_bytes);
+      size_t allocated = num_regs * kRegionSize;
+      // We make 'top' all usable bytes, as the caller of this
+      // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
+      first_reg->SetTop(first_reg->Begin() + allocated);
       for (size_t p = left + 1; p < right; ++p) {
         DCHECK_LT(p, num_regions_);
         DCHECK(regions_[p].IsFree());
         regions_[p].UnfreeLargeTail(this, time_);
         ++num_non_free_regions_;
       }
-      *bytes_allocated = num_bytes;
+      *bytes_allocated = allocated;
       if (usable_size != nullptr) {
-        *usable_size = num_regs * kRegionSize;
+        *usable_size = allocated;
       }
-      *bytes_tl_bulk_allocated = num_bytes;
+      *bytes_tl_bulk_allocated = allocated;
       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
     } else {
       // right points to the non-free region. Start with the one after it.
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 4dea0fa..07aea95 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -411,7 +411,9 @@
       DCHECK(IsInUnevacFromSpace());
       DCHECK(!IsLargeTail());
       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
-      live_bytes_ += live_bytes;
+      // For large allocations, we always consider all bytes in the
+      // regions live.
+      live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
       DCHECK_LE(live_bytes_, BytesAllocated());
     }
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6b10dfc..201701a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -545,47 +545,27 @@
   //
   // We map in the stack by reading every page from the stack bottom (highest address)
   // to the stack top. (We then madvise this away.) This must be done by reading from the
-  // current stack pointer downwards.
+  // current stack pointer downwards. Any access more than a page below the current SP
+  // might cause a segv.
+  // TODO: This comment may be out of date. It seems possible to speed this up. As
+  //       this is normally done once in the zygote on startup, ignore for now.
   //
-  // Accesses too far below the current machine register corresponding to the stack pointer (e.g.,
-  // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We
-  // thus have to move the stack pointer. We do this portably by using a recursive function with a
-  // large stack frame size.
+  // AddressSanitizer does not like the part of this functions that reads every stack page.
+  // Looks a lot like an out-of-bounds access.
 
-  // (Defensively) first remove the protection on the protected region as we'll want to read
+  // (Defensively) first remove the protection on the protected region as will want to read
   // and write it. Ignore errors.
   UnprotectStack();
 
   VLOG(threads) << "Need to map in stack for thread at " << std::hex <<
       static_cast<void*>(pregion);
 
-  struct RecurseDownStack {
-    // This function has an intentionally large stack size.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
-    NO_INLINE
-    static void Touch(uintptr_t target) {
-      volatile size_t zero = 0;
-      // Use a large local volatile array to ensure a large frame size. Do not use anything close
-      // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but
-      // there is no pragma support for this.
-      // Note: for ASAN we need to shrink the array a bit, as there's other overhead.
-      constexpr size_t kAsanMultiplier =
-#ifdef ADDRESS_SANITIZER
-          2u;
-#else
-          1u;
-#endif
-      volatile char space[kPageSize - (kAsanMultiplier * 256)];
-      char sink ATTRIBUTE_UNUSED = space[zero];
-      if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) {
-        Touch(target);
-      }
-      zero *= 2;  // Try to avoid tail recursion.
-    }
-#pragma GCC diagnostic pop
-  };
-  RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion));
+  // Read every page from the high address to the low.
+  volatile uint8_t dont_optimize_this;
+  UNUSED(dont_optimize_this);
+  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
+    dont_optimize_this = *p;
+  }
 
   VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
       static_cast<void*>(pregion) << " to " <<
diff --git a/test/659-unpadded-array/expected.txt b/test/659-unpadded-array/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/659-unpadded-array/expected.txt
diff --git a/test/659-unpadded-array/info.txt b/test/659-unpadded-array/info.txt
new file mode 100644
index 0000000..905c529
--- /dev/null
+++ b/test/659-unpadded-array/info.txt
@@ -0,0 +1,3 @@
+Regression test for the concurrent GC whose region space had
+a bug when the request for allocation ended up using 'usable_size'
+instead of the initially requested number of bytes.
diff --git a/test/659-unpadded-array/src/Main.java b/test/659-unpadded-array/src/Main.java
new file mode 100644
index 0000000..80fd6e2
--- /dev/null
+++ b/test/659-unpadded-array/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMRuntime;
+
+public class Main {
+  public static void main(String[] args) {
+    // Call our optimization API, we used to have a bug in the RegionSpace on large
+    // objects allocated through it.
+    Object[] o = (Object[]) VMRuntime.getRuntime().newUnpaddedArray(Object.class, 70000);
+
+    // Make the test run for 30 seconds to be less dependent on GC heuristics.
+    long time = System.currentTimeMillis();
+    int i = 1;
+    do {
+      allocateIntArray(i);
+      for (int j = 0; j < o.length; j++) {
+        if (o[j] != null) {
+          // Just print, not throw, to get into "interesting" issues (eg the first
+          // element that will not be null is the class of the object, the second is
+          // actually the first element of the int array).
+          System.out.println("Unexpected value: " + o[j]);
+        }
+      }
+      if (i < 100000) {
+        i++;
+      } else {
+        i = 0;
+      }
+    } while (System.currentTimeMillis() - time < 30000);
+  }
+
+  static void allocateIntArray(int i) {
+    int[] intArray = new int[i];
+    for (int j = 0; j < intArray.length; j++) {
+      intArray[j] = 1;
+    }
+  }
+}