Merge "Fix JNI compiler frame size adjustments."
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c50b4ae..1ec0c76 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -48,7 +48,7 @@
 ART_TEST_KNOWN_FAILING :=
 
 # Keep going after encountering a test failure?
-ART_TEST_KEEP_GOING ?= false
+ART_TEST_KEEP_GOING ?= true
 
 # Do you want all tests, even those that are time consuming?
 ART_TEST_FULL ?= true
@@ -65,12 +65,6 @@
 # Do you want tests with the GC stress mode enabled run?
 ART_TEST_GC_STRESS ?= $(ART_TEST_FULL)
 
-# Do you want oat tests with relocation enabled?
-ART_TEST_OAT_RELOCATE ?= true
-
-# Do you want oat tests with relocation disabled?
-ART_TEST_OAT_NO_RELOCATE ?= $(ART_TEST_FULL)
-
 # Do you want run-tests with relocation enabled?
 ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL)
 
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 2cba0ec..6e27190 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -146,6 +146,7 @@
   compiler/optimizing/pretty_printer_test.cc \
   compiler/optimizing/register_allocator_test.cc \
   compiler/optimizing/ssa_test.cc \
+  compiler/optimizing/stack_map_test.cc \
   compiler/output_stream_test.cc \
   compiler/utils/arena_allocator_test.cc \
   compiler/utils/dedupe_set_test.cc \
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
new file mode 100644
index 0000000..3c6ad8f
--- /dev/null
+++ b/compiler/optimizing/stack_map_stream.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
+#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
+
+#include "base/bit_vector.h"
+#include "memory_region.h"
+#include "stack_map.h"
+#include "utils/allocation.h"
+#include "utils/growable_array.h"
+
+namespace art {
+
+/**
+ * Collects and builds a CodeInfo for a method.
+ */
+template<typename T>
+class StackMapStream : public ValueObject {
+ public:
+  explicit StackMapStream(ArenaAllocator* allocator)
+      : stack_maps_(allocator, 10),
+        dex_register_maps_(allocator, 10 * 4),
+        inline_infos_(allocator, 2),
+        stack_mask_max_(-1) {}
+
+  // Compute bytes needed to encode a mask with the given maximum element.
+  static uint32_t StackMaskEncodingSize(int max_element) {
+    int number_of_bits = max_element + 1;  // Need room for max element too.
+    return RoundUp(number_of_bits, kBitsPerByte) / kBitsPerByte;
+  }
+
+  // See runtime/stack_map.h to know what these fields contain.
+  struct StackMapEntry {
+    uint32_t dex_pc;
+    T native_pc;
+    uint32_t register_mask;
+    BitVector* sp_mask;
+    uint32_t num_dex_registers;
+    uint8_t inlining_depth;
+    size_t dex_register_maps_start_index;
+    size_t inline_infos_start_index;
+  };
+
+  struct DexRegisterEntry {
+    DexRegisterMap::LocationKind kind;
+    int32_t value;
+  };
+
+  struct InlineInfoEntry {
+    uint32_t method_index;
+  };
+
+  void AddStackMapEntry(uint32_t dex_pc,
+                        T native_pc,
+                        uint32_t register_mask,
+                        BitVector* sp_mask,
+                        uint32_t num_dex_registers,
+                        uint8_t inlining_depth) {
+    StackMapEntry entry;
+    entry.dex_pc = dex_pc;
+    entry.native_pc = native_pc;
+    entry.register_mask = register_mask;
+    entry.sp_mask = sp_mask;
+    entry.num_dex_registers = num_dex_registers;
+    entry.inlining_depth = inlining_depth;
+    entry.dex_register_maps_start_index = dex_register_maps_.Size();
+    entry.inline_infos_start_index = inline_infos_.Size();
+    stack_maps_.Add(entry);
+
+    stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
+    if (inlining_depth > 0) {
+      number_of_stack_maps_with_inline_info_++;
+    }
+  }
+
+  void AddDexRegisterEntry(DexRegisterMap::LocationKind kind, int32_t value) {
+    DexRegisterEntry entry;
+    entry.kind = kind;
+    entry.value = value;
+    dex_register_maps_.Add(entry);
+  }
+
+  void AddInlineInfoEntry(uint32_t method_index) {
+    InlineInfoEntry entry;
+    entry.method_index = method_index;
+    inline_infos_.Add(entry);
+  }
+
+  size_t ComputeNeededSize() const {
+    return CodeInfo<T>::kFixedSize
+        + ComputeStackMapSize()
+        + ComputeDexRegisterMapSize()
+        + ComputeInlineInfoSize();
+  }
+
+  size_t ComputeStackMapSize() const {
+    return stack_maps_.Size() * (StackMap<T>::kFixedSize + StackMaskEncodingSize(stack_mask_max_));
+  }
+
+  size_t ComputeDexRegisterMapSize() const {
+    // We currently encode all dex register information per stack map.
+    return stack_maps_.Size() * DexRegisterMap::kFixedSize
+      // For each dex register entry.
+      + (dex_register_maps_.Size() * DexRegisterMap::SingleEntrySize());
+  }
+
+  size_t ComputeInlineInfoSize() const {
+    return inline_infos_.Size() * InlineInfo::SingleEntrySize()
+      // For encoding the depth.
+      + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
+  }
+
+  size_t ComputeInlineInfoStart() const {
+    return ComputeDexRegisterMapStart() + ComputeDexRegisterMapSize();
+  }
+
+  size_t ComputeDexRegisterMapStart() const {
+    return CodeInfo<T>::kFixedSize + ComputeStackMapSize();
+  }
+
+  void FillIn(MemoryRegion region) {
+    CodeInfo<T> code_info(region);
+
+    size_t stack_mask_size = StackMaskEncodingSize(stack_mask_max_);
+    uint8_t* memory_start = region.start();
+
+    MemoryRegion dex_register_maps_region = region.Subregion(
+      ComputeDexRegisterMapStart(),
+      ComputeDexRegisterMapSize());
+
+    MemoryRegion inline_infos_region = region.Subregion(
+      ComputeInlineInfoStart(),
+      ComputeInlineInfoSize());
+
+    code_info.SetNumberOfStackMaps(stack_maps_.Size());
+    code_info.SetStackMaskSize(stack_mask_size);
+
+    uintptr_t next_dex_register_map_offset = 0;
+    uintptr_t next_inline_info_offset = 0;
+    for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
+      StackMap<T> stack_map = code_info.GetStackMapAt(i);
+      StackMapEntry entry = stack_maps_.Get(i);
+
+      stack_map.SetDexPc(entry.dex_pc);
+      stack_map.SetNativePc(entry.native_pc);
+      stack_map.SetRegisterMask(entry.register_mask);
+      stack_map.SetStackMask(*entry.sp_mask);
+
+      // Set the register map.
+      MemoryRegion region = dex_register_maps_region.Subregion(
+          next_dex_register_map_offset,
+          DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
+      next_dex_register_map_offset += region.size();
+      DexRegisterMap dex_register_map(region);
+      stack_map.SetDexRegisterMapOffset(region.start() - memory_start);
+
+      for (size_t i = 0; i < entry.num_dex_registers; ++i) {
+        DexRegisterEntry register_entry =
+            dex_register_maps_.Get(i + entry.dex_register_maps_start_index);
+        dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value);
+      }
+
+      // Set the inlining info.
+      if (entry.inlining_depth != 0) {
+        MemoryRegion region = inline_infos_region.Subregion(
+            next_inline_info_offset,
+            InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
+        next_inline_info_offset += region.size();
+        InlineInfo inline_info(region);
+
+        stack_map.SetInlineDescriptorOffset(region.start() - memory_start);
+
+        inline_info.SetDepth(entry.inlining_depth);
+        for (size_t i = 0; i < entry.inlining_depth; ++i) {
+          InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index);
+          inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index);
+        }
+      } else {
+        stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);
+      }
+    }
+  }
+
+ private:
+  GrowableArray<StackMapEntry> stack_maps_;
+  GrowableArray<DexRegisterEntry> dex_register_maps_;
+  GrowableArray<InlineInfoEntry> inline_infos_;
+  int stack_mask_max_;
+  size_t number_of_stack_maps_with_inline_info_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackMapStream);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
new file mode 100644
index 0000000..a70259e
--- /dev/null
+++ b/compiler/optimizing/stack_map_test.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_map.h"
+#include "stack_map_stream.h"
+#include "utils/arena_bit_vector.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
+  for (size_t i = 0; i < region.size_in_bits(); ++i) {
+    if (region.LoadBit(i) != bit_vector.IsBitSet(i)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TEST(StackMapTest, Test1) {
+  ArenaPool pool;
+  ArenaAllocator arena(&pool);
+  StackMapStream<size_t> stream(&arena);
+
+  ArenaBitVector sp_mask(&arena, 0, false);
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, 2, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+
+  size_t size = stream.ComputeNeededSize();
+  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  MemoryRegion region(memory, size);
+  stream.FillIn(region);
+
+  CodeInfo<size_t> code_info(region);
+  ASSERT_EQ(0u, code_info.GetStackMaskSize());
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+  StackMap<size_t> stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePc());
+  ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+  ASSERT_FALSE(stack_map.HasInlineInfo());
+
+  MemoryRegion stack_mask = stack_map.GetStackMask();
+  ASSERT_TRUE(SameBits(stack_mask, sp_mask));
+
+  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
+  ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
+  ASSERT_EQ(0, dex_registers.GetValue(0));
+  ASSERT_EQ(-2, dex_registers.GetValue(1));
+}
+
+TEST(StackMapTest, Test2) {
+  ArenaPool pool;
+  ArenaAllocator arena(&pool);
+  StackMapStream<size_t> stream(&arena);
+
+  ArenaBitVector sp_mask1(&arena, 0, true);
+  sp_mask1.SetBit(2);
+  sp_mask1.SetBit(4);
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+  stream.AddInlineInfoEntry(42);
+  stream.AddInlineInfoEntry(82);
+
+  ArenaBitVector sp_mask2(&arena, 0, true);
+  sp_mask2.SetBit(3);
+  sp_mask1.SetBit(8);
+  stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, 1, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 0);
+
+  size_t size = stream.ComputeNeededSize();
+  void* memory = arena.Alloc(size, kArenaAllocMisc);
+  MemoryRegion region(memory, size);
+  stream.FillIn(region);
+
+  CodeInfo<size_t> code_info(region);
+  ASSERT_EQ(1u, code_info.GetStackMaskSize());
+  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+
+  StackMap<size_t> stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePc());
+  ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+
+  MemoryRegion stack_mask = stack_map.GetStackMask();
+  ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
+
+  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
+  ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
+  ASSERT_EQ(0, dex_registers.GetValue(0));
+  ASSERT_EQ(-2, dex_registers.GetValue(1));
+
+  InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+  ASSERT_EQ(2u, inline_info.GetDepth());
+  ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
+  ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
+
+  stack_map = code_info.GetStackMapAt(1);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(128u)));
+  ASSERT_EQ(1u, stack_map.GetDexPc());
+  ASSERT_EQ(128u, stack_map.GetNativePc());
+  ASSERT_EQ(0xFFu, stack_map.GetRegisterMask());
+
+  stack_mask = stack_map.GetStackMask();
+  ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
+
+  ASSERT_FALSE(stack_map.HasInlineInfo());
+}
+
+}  // namespace art
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 6fa34c4..17d3fce 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -104,6 +104,7 @@
   mirror/string.cc \
   mirror/throwable.cc \
   monitor.cc \
+  native_bridge.cc \
   native/dalvik_system_DexFile.cc \
   native/dalvik_system_VMDebug.cc \
   native/dalvik_system_VMRuntime.cc \
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 04f1cc1..d64a030 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -382,6 +382,20 @@
     return location_;
   }
 
+  // For normal dex files, location and base location coincide. If a dex file is part of a multidex
+  // archive, the base location is the name of the originating jar/apk, stripped of any internal
+  // classes*.dex path.
+  const std::string GetBaseLocation() const {
+    if (IsMultiDexLocation(location_.c_str())) {
+      std::pair<const char*, const char*> pair = SplitMultiDexLocation(location_.c_str());
+      std::string res(pair.first);
+      delete[] pair.first;
+      return res;
+    } else {
+      return location_;
+    }
+  }
+
   // For DexFiles directly from .dex files, this is the checksum from the DexFile::Header.
   // For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex.
   uint32_t GetLocationChecksum() const {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 48ae84d..d6cf52f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1605,6 +1605,10 @@
         // Remove the main space so that we don't try to trim it, this doens't work for debug
         // builds since RosAlloc attempts to read the magic number from a protected page.
         RemoveSpace(main_space_);
+        RemoveRememberedSet(main_space_);
+        RemoveRememberedSet(main_space_backup_.get());
+        main_space_backup_.reset(nullptr);
+        main_space_ = nullptr;
         temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
                                                                 mem_map.release());
         AddSpace(temp_space_);
@@ -3114,6 +3118,7 @@
   CHECK(space != nullptr);
   auto it = remembered_sets_.find(space);
   CHECK(it != remembered_sets_.end());
+  delete it->second;
   remembered_sets_.erase(it);
   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
 }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 56c6d6d..8ffadd5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -572,6 +572,7 @@
 
   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
   void AddRememberedSet(accounting::RememberedSet* remembered_set);
+  // Also deletes the remebered set.
   void RemoveRememberedSet(space::Space* space);
 
   bool IsCompilingBoot() const;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f9c7ec6..64cca3d 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -41,6 +41,7 @@
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
+#include "native_bridge.h"
 #include "parsed_options.h"
 #include "reflection.h"
 #include "runtime.h"
@@ -362,6 +363,7 @@
   SharedLibrary(const std::string& path, void* handle, mirror::Object* class_loader)
       : path_(path),
         handle_(handle),
+        needs_native_bridge_(false),
         class_loader_(class_loader),
         jni_on_load_lock_("JNI_OnLoad lock"),
         jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
@@ -422,10 +424,30 @@
     jni_on_load_cond_.Broadcast(self);
   }
 
+  void SetNeedsNativeBridge() {
+    needs_native_bridge_ = true;
+  }
+
+  bool NeedsNativeBridge() const {
+    return needs_native_bridge_;
+  }
+
   void* FindSymbol(const std::string& symbol_name) {
     return dlsym(handle_, symbol_name.c_str());
   }
 
+  void* FindSymbolWithNativeBridge(const std::string& symbol_name, mirror::ArtMethod* m)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(NeedsNativeBridge());
+
+    uint32_t len = 0;
+    const char* shorty = nullptr;
+    if (m != nullptr) {
+      shorty = m->GetShorty(&len);
+    }
+    return NativeBridge::GetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+  }
+
   void VisitRoots(RootCallback* visitor, void* arg) {
     if (class_loader_ != nullptr) {
       visitor(&class_loader_, arg, 0, kRootVMInternal);
@@ -445,6 +467,9 @@
   // The void* returned by dlopen(3).
   void* handle_;
 
+  // True if a native bridge is required.
+  bool needs_native_bridge_;
+
   // The ClassLoader this library is associated with.
   mirror::Object* class_loader_;
 
@@ -505,9 +530,17 @@
         continue;
       }
       // Try the short name then the long name...
-      void* fn = library->FindSymbol(jni_short_name);
-      if (fn == nullptr) {
-        fn = library->FindSymbol(jni_long_name);
+      void* fn = nullptr;
+      if (UNLIKELY(library->NeedsNativeBridge())) {
+        fn = library->FindSymbolWithNativeBridge(jni_short_name, m);
+        if (fn == nullptr) {
+          fn = library->FindSymbolWithNativeBridge(jni_long_name, m);
+        }
+      } else {
+        fn = library->FindSymbol(jni_short_name);
+        if (fn == nullptr) {
+          fn = library->FindSymbol(jni_long_name);
+        }
       }
       if (fn != nullptr) {
         VLOG(jni) << "[Found native code for " << PrettyMethod(m)
@@ -3267,7 +3300,15 @@
   // This can execute slowly for a large library on a busy system, so we
   // want to switch from kRunnable while it executes.  This allows the GC to ignore us.
   self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
-  void* handle = dlopen(path.empty() ? nullptr : path.c_str(), RTLD_LAZY);
+  const char* path_str = path.empty() ? nullptr : path.c_str();
+  void* handle = dlopen(path_str, RTLD_LAZY);
+  bool needs_native_bridge = false;
+  if (handle == nullptr) {
+    if (NativeBridge::IsSupported(path_str)) {
+      handle = NativeBridge::LoadLibrary(path_str, RTLD_LAZY);
+      needs_native_bridge = true;
+    }
+  }
   self->TransitionFromSuspendedToRunnable();
 
   VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
@@ -3300,7 +3341,14 @@
       << "]";
 
   bool was_successful = false;
-  void* sym = dlsym(handle, "JNI_OnLoad");
+  void* sym = nullptr;
+  if (UNLIKELY(needs_native_bridge)) {
+    library->SetNeedsNativeBridge();
+    sym = library->FindSymbolWithNativeBridge("JNI_OnLoad", nullptr);
+  } else {
+    sym = dlsym(handle, "JNI_OnLoad");
+  }
+
   if (sym == nullptr) {
     VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
     was_successful = true;
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 849ab1c..bab2e86 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -56,14 +56,31 @@
     return ComputeInternalPointer<T>(offset);
   }
 
+  // Load a single bit in the region. The bit at offset 0 is the least
+  // significant bit in the first byte.
+  bool LoadBit(uintptr_t bit_offset) const {
+    uint8_t bit_mask;
+    uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
+    return byte & bit_mask;
+  }
+
+  void StoreBit(uintptr_t bit_offset, bool value) const {
+    uint8_t bit_mask;
+    uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
+    if (value) {
+      *byte |= bit_mask;
+    } else {
+      *byte &= ~bit_mask;
+    }
+  }
+
   void CopyFrom(size_t offset, const MemoryRegion& from) const;
 
   // Compute a sub memory region based on an existing one.
-  void Subregion(const MemoryRegion& from, uintptr_t offset, uintptr_t size) {
-    CHECK_GE(from.size(), size);
-    CHECK_LE(offset,  from.size() - size);
-    pointer_ = reinterpret_cast<void*>(from.start() + offset);
-    size_ = size;
+  MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const {
+    CHECK_GE(this->size(), size);
+    CHECK_LE(offset,  this->size() - size);
+    return MemoryRegion(reinterpret_cast<void*>(start() + offset), size);
   }
 
   // Compute an extended memory region based on an existing one.
@@ -90,8 +107,6 @@
 
   void* pointer_;
   size_t size_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryRegion);
 };
 
 }  // namespace art
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 440d3d0..ac1a310 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -97,7 +97,7 @@
   void operator=(const NullableScopedUtfChars&);
 };
 
-static jlong DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jlong DexFile_openDexFile(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
   ScopedUtfChars sourceName(env, javaSourceName);
   if (sourceName.c_str() == NULL) {
     return 0;
@@ -498,7 +498,7 @@
   NATIVE_METHOD(DexFile, getClassNameList, "(J)[Ljava/lang/String;"),
   NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
   NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)Z"),
-  NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)J"),
+  NATIVE_METHOD(DexFile, openDexFile, "(Ljava/lang/String;Ljava/lang/String;I)J"),
 };
 
 void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index f2b8a03..fefddae 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -62,25 +62,28 @@
  */
 static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) {
   ScopedUtfChars name(env, javaName);
-  if (name.c_str() == NULL) {
-    return NULL;
+  if (name.c_str() == nullptr) {
+    return nullptr;
   }
 
   const std::vector<const DexFile*>& path = Runtime::Current()->GetClassLinker()->GetBootClassPath();
   if (index < 0 || size_t(index) >= path.size()) {
-    return NULL;
+    return nullptr;
   }
   const DexFile* dex_file = path[index];
-  const std::string& location(dex_file->GetLocation());
+
+  // For multidex locations, e.g., x.jar:classes2.dex, we want to look into x.jar.
+  const std::string& location(dex_file->GetBaseLocation());
+
   std::string error_msg;
   std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(location.c_str(), &error_msg));
   if (zip_archive.get() == nullptr) {
     LOG(WARNING) << "Failed to open zip archive '" << location << "': " << error_msg;
-    return NULL;
+    return nullptr;
   }
   std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(name.c_str(), &error_msg));
-  if (zip_entry.get() == NULL) {
-    return NULL;
+  if (zip_entry.get() == nullptr) {
+    return nullptr;
   }
 
   std::string url;
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
new file mode 100644
index 0000000..de04a99
--- /dev/null
+++ b/runtime/native_bridge.cc
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_bridge.h"
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include "jni.h"
+
+#include "base/mutex.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "thread.h"
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
+
+
+namespace art {
+
+// Is native-bridge support enabled?
+static constexpr bool kNativeBridgeEnabled = false;
+
+// Default library name for native-bridge.
+static constexpr const char* kDefaultNativeBridge = "libnativebridge.so";
+
+#ifdef HAVE_ANDROID_OS
+// TODO: This will be removed once we have native-bridge command-line arguments.
+
+// Property that defines the library name of native-bridge.
+static constexpr const char* kPropNativeBridge = "persist.native.bridge";
+
+// Property that enables native-bridge.
+static constexpr const char* kPropEnableNativeBridge = "persist.enable.native.bridge";
+#endif
+
+// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
+static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
+
+// ART interfaces to native-bridge.
+struct NativeBridgeArtCallbacks {
+  // Log utility, reserve unused.
+  int (*logger)(int prio, const char* tag, const char* fmt, ...);
+
+  // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   mid [IN] Java methodID.
+  // Returns:
+  //   short descriptor for method.
+  const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
+
+  // Get number of native methods for specified class.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   clazz [IN] Java class object.
+  // Returns:
+  //   number of native methods.
+  int (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+
+  // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
+  // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   clazz [IN] Java class object.
+  //   methods [OUT] array of method with the name, shorty, and fnPtr.
+  //   method_count [IN] max number of elements in methods.
+  // Returns:
+  //   number of method it actually wrote to methods.
+  int (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods, uint32_t method_count);
+};
+
+// Native-bridge interfaces to ART
+struct NativeBridgeCallbacks {
+  // Initialize native-bridge. Native-bridge's internal implementation must ensure MT safety and
+  // that the native-bridge is initialized only once. Thus it is OK to call this interface for an
+  // already initialized native-bridge.
+  //
+  // Parameters:
+  //   art_cbs [IN] the pointer to NativeBridgeArtCallbacks.
+  // Returns:
+  //   true iff initialization was successful.
+  bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
+
+  // Load a shared library that is supported by the native-bridge.
+  //
+  // Parameters:
+  //   libpath [IN] path to the shared library
+  //   flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
+  // Returns:
+  //   The opaque handle of the shared library if sucessful, otherwise NULL
+  void* (*loadLibrary)(const char* libpath, int flag);
+
+  // Get a native-bridge trampoline for specified native method. The trampoline has same
+  // sigature as the native method.
+  //
+  // Parameters:
+  //   handle [IN] the handle returned from loadLibrary
+  //   shorty [IN] short descriptor of native method
+  //   len [IN] length of shorty
+  // Returns:
+  //   address of trampoline if successful, otherwise NULL
+  void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
+
+  // Check whether native library is valid and is for an ABI that is supported by native-bridge.
+  //
+  // Parameters:
+  //   libpath [IN] path to the shared library
+  // Returns:
+  //   TRUE if library is supported by native-bridge, FALSE otherwise
+  bool (*isSupported)(const char* libpath);
+};
+
+static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+  ScopedObjectAccess soa(env);
+  StackHandleScope<1> scope(soa.Self());
+  mirror::ArtMethod* m = soa.DecodeMethod(mid);
+  MethodHelper mh(scope.NewHandle(m));
+  return mh.GetShorty();
+}
+
+static int GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+  if (clazz == nullptr)
+    return 0;
+
+  ScopedObjectAccess soa(env);
+  mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+  size_t method_count = 0;
+  for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+    mirror::ArtMethod* m = c->GetDirectMethod(i);
+    if (m->IsNative())
+      method_count++;
+  }
+  for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+    mirror::ArtMethod* m = c->GetVirtualMethod(i);
+    if (m->IsNative())
+      method_count++;
+  }
+  return method_count;
+}
+
+static int GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+                            uint32_t method_count) {
+  if ((clazz == nullptr) || (methods == nullptr))
+    return 0;
+
+  ScopedObjectAccess soa(env);
+  mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+  size_t count = 0;
+  for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+    mirror::ArtMethod* m = c->GetDirectMethod(i);
+    if (m->IsNative() && count < method_count) {
+      methods[count].name = m->GetName();
+      methods[count].signature = m->GetShorty();
+      methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+      count++;
+    }
+  }
+  for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+    mirror::ArtMethod* m = c->GetVirtualMethod(i);
+    if (m->IsNative() && count < method_count) {
+      methods[count].name = m->GetName();
+      methods[count].signature = m->GetShorty();
+      methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+      count++;
+    }
+  }
+  return count;
+}
+
+NativeBridgeArtCallbacks NativeBridgeArtItf = {
+  nullptr,
+  GetMethodShorty,
+  GetNativeMethodCount,
+  GetNativeMethods
+};
+
+bool NativeBridge::Init() {
+  if (!kNativeBridgeEnabled) {
+    return false;
+  }
+
+  MutexLock mu(Thread::Current(), lock_);
+
+  if (!initialized_) {
+    const char* libnb_path = kDefaultNativeBridge;
+#ifdef HAVE_ANDROID_OS
+    char prop_buf[PROP_VALUE_MAX];
+    property_get(kPropEnableNativeBridge, prop_buf, "false");
+    if (strcmp(prop_buf, "true") != 0)
+      return false;
+
+    // If prop persist.native.bridge set, overwrite the default name.
+    int name_len = property_get(kPropNativeBridge, prop_buf, kDefaultNativeBridge);
+    if (name_len > 0)
+      libnb_path = prop_buf;
+#endif
+    void* handle = dlopen(libnb_path, RTLD_LAZY);
+    if (handle == nullptr)
+      return false;
+
+    callbacks_ = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
+                                                                kNativeBridgeInterfaceSymbol));
+    if (callbacks_ == nullptr) {
+      dlclose(handle);
+      return false;
+    }
+
+    callbacks_->initialize(&NativeBridgeArtItf);
+    initialized_ = true;
+  }
+
+  return initialized_;
+}
+
+void* NativeBridge::LoadLibrary(const char* libpath, int flag) {
+  if (Init())
+    return callbacks_->loadLibrary(libpath, flag);
+  return nullptr;
+}
+
+void* NativeBridge::GetTrampoline(void* handle, const char* name, const char* shorty,
+                                  uint32_t len) {
+  if (Init())
+    return callbacks_->getTrampoline(handle, name, shorty, len);
+  return nullptr;
+}
+
+bool NativeBridge::IsSupported(const char* libpath) {
+  if (Init())
+    return callbacks_->isSupported(libpath);
+  return false;
+}
+
+bool NativeBridge::initialized_ = false;
+Mutex NativeBridge::lock_("native bridge lock");
+NativeBridgeCallbacks* NativeBridge::callbacks_ = nullptr;
+
+};  // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
new file mode 100644
index 0000000..dd895d2
--- /dev/null
+++ b/runtime/native_bridge.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
+#define ART_RUNTIME_NATIVE_BRIDGE_H_
+
+#include "base/mutex.h"
+
+namespace art {
+
+struct NativeBridgeCallbacks;
+
+class NativeBridge {
+ public:
+  // Load a shared library that is supported by the native-bridge.
+  static void* LoadLibrary(const char* libpath, int flag);
+  // Get a native-bridge trampoline for specified native method.
+  static void* GetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
+  // True if native library is valid and is for an ABI that is supported by native-bridge.
+  static bool  IsSupported(const char* libpath);
+
+ private:
+  static bool  Init();
+  static bool  initialized_ GUARDED_BY(lock_);
+  static Mutex lock_;
+  static NativeBridgeCallbacks* callbacks_;
+};
+
+};  // namespace art
+
+#endif  // ART_RUNTIME_NATIVE_BRIDGE_H_
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
new file mode 100644
index 0000000..7d3a48f
--- /dev/null
+++ b/runtime/stack_map.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STACK_MAP_H_
+#define ART_RUNTIME_STACK_MAP_H_
+
+#include "base/bit_vector.h"
+#include "memory_region.h"
+
+namespace art {
+
+/**
+ * Classes in the following file are wrapper on stack map information backed
+ * by a MemoryRegion. As such they read and write to the region, they don't have
+ * their own fields.
+ */
+
+/**
+ * Inline information for a specific PC. The information is of the form:
+ * [inlining_depth, [method_dex reference]+]
+ */
+class InlineInfo {
+ public:
+  explicit InlineInfo(MemoryRegion region) : region_(region) {}
+
+  uint8_t GetDepth() const {
+    return region_.Load<uint8_t>(kDepthOffset);
+  }
+
+  void SetDepth(uint8_t depth) {
+    region_.Store<uint8_t>(kDepthOffset, depth);
+  }
+
+  uint32_t GetMethodReferenceIndexAtDepth(uint8_t depth) const {
+    return region_.Load<uint32_t>(kFixedSize + depth * SingleEntrySize());
+  }
+
+  void SetMethodReferenceIndexAtDepth(uint8_t depth, uint32_t index) {
+    region_.Store<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
+  }
+
+  static size_t SingleEntrySize() {
+    return sizeof(uint32_t);
+  }
+
+ private:
+  static constexpr int kDepthOffset = 0;
+  static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
+
+  static constexpr uint32_t kNoInlineInfo = -1;
+
+  MemoryRegion region_;
+
+  template<typename T> friend class CodeInfo;
+  template<typename T> friend class StackMap;
+  template<typename T> friend class StackMapStream;
+};
+
+/**
+ * Information on dex register values for a specific PC. The information is
+ * of the form:
+ * [location_kind, register_value]+.
+ *
+ * The location_kind for a Dex register can either be:
+ * - Constant: register_value holds the constant,
+ * - Stack: register_value holds the stack offset,
+ * - Register: register_value holds the register number.
+ */
+class DexRegisterMap {
+ public:
+  explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+
+  enum LocationKind {
+    kInStack,
+    kInRegister,
+    kConstant
+  };
+
+  LocationKind GetLocationKind(uint16_t register_index) const {
+    return region_.Load<LocationKind>(
+        kFixedSize + register_index * SingleEntrySize());
+  }
+
+  void SetRegisterInfo(uint16_t register_index, LocationKind kind, int32_t value) {
+    size_t entry = kFixedSize + register_index * SingleEntrySize();
+    region_.Store<LocationKind>(entry, kind);
+    region_.Store<int32_t>(entry + sizeof(LocationKind), value);
+  }
+
+  int32_t GetValue(uint16_t register_index) const {
+    return region_.Load<int32_t>(
+        kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
+  }
+
+  static size_t SingleEntrySize() {
+    return sizeof(LocationKind) + sizeof(int32_t);
+  }
+
+ private:
+  static constexpr int kFixedSize = 0;
+
+  MemoryRegion region_;
+
+  template <typename T> friend class CodeInfo;
+  template <typename T> friend class StackMapStream;
+};
+
+/**
+ * A Stack Map holds compilation information for a specific PC necessary for:
+ * - Mapping it to a dex PC,
+ * - Knowing which stack entries are objects,
+ * - Knowing which registers hold objects,
+ * - Knowing the inlining information,
+ * - Knowing the values of dex registers.
+ *
+ * The information is of the form:
+ * [dex_pc, native_pc, dex_register_map_offset, inlining_info_offset, register_mask, stack_mask].
+ *
+ * Note that register_mask is fixed size, but stack_mask is variable size, depending on the
+ * stack size of a method.
+ */
+template <typename T>
+class StackMap {
+ public:
+  explicit StackMap(MemoryRegion region) : region_(region) {}
+
+  uint32_t GetDexPc() const {
+    return region_.Load<uint32_t>(kDexPcOffset);
+  }
+
+  void SetDexPc(uint32_t dex_pc) {
+    region_.Store<uint32_t>(kDexPcOffset, dex_pc);
+  }
+
+  T GetNativePc() const {
+    return region_.Load<T>(kNativePcOffset);
+  }
+
+  void SetNativePc(T native_pc) {
+    return region_.Store<T>(kNativePcOffset, native_pc);
+  }
+
+  uint32_t GetDexRegisterMapOffset() const {
+    return region_.Load<uint32_t>(kDexRegisterMapOffsetOffset);
+  }
+
+  void SetDexRegisterMapOffset(uint32_t offset) {
+    return region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+  }
+
+  uint32_t GetInlineDescriptorOffset() const {
+    return region_.Load<uint32_t>(kInlineDescriptorOffsetOffset);
+  }
+
+  void SetInlineDescriptorOffset(uint32_t offset) {
+    return region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+  }
+
+  uint32_t GetRegisterMask() const {
+    return region_.Load<uint32_t>(kRegisterMaskOffset);
+  }
+
+  void SetRegisterMask(uint32_t mask) {
+    region_.Store<uint32_t>(kRegisterMaskOffset, mask);
+  }
+
+  MemoryRegion GetStackMask() const {
+    return region_.Subregion(kStackMaskOffset, StackMaskSize());
+  }
+
+  void SetStackMask(const BitVector& sp_map) {
+    MemoryRegion region = GetStackMask();
+    for (size_t i = 0; i < region.size_in_bits(); i++) {
+      region.StoreBit(i, sp_map.IsBitSet(i));
+    }
+  }
+
+  bool HasInlineInfo() const {
+    return GetInlineDescriptorOffset() != InlineInfo::kNoInlineInfo;
+  }
+
+  bool Equals(const StackMap& other) {
+    return region_.pointer() == other.region_.pointer()
+       && region_.size() == other.region_.size();
+  }
+
+ private:
+  static constexpr int kDexPcOffset = 0;
+  static constexpr int kNativePcOffset = kDexPcOffset + sizeof(uint32_t);
+  static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffset + sizeof(T);
+  static constexpr int kInlineDescriptorOffsetOffset =
+      kDexRegisterMapOffsetOffset + sizeof(uint32_t);
+  static constexpr int kRegisterMaskOffset = kInlineDescriptorOffsetOffset + sizeof(uint32_t);
+  static constexpr int kFixedSize = kRegisterMaskOffset + sizeof(uint32_t);
+  static constexpr int kStackMaskOffset = kFixedSize;
+
+  size_t StackMaskSize() const { return region_.size() - kFixedSize; }
+
+  MemoryRegion region_;
+
+  template <typename U> friend class CodeInfo;
+  template <typename U> friend class StackMapStream;
+};
+
+
+/**
+ * Wrapper around all compiler information collected for a method.
+ * The information is of the form:
+ * [number_of_stack_maps, stack_mask_size, StackMap+, DexRegisterInfo+, InlineInfo*].
+ */
+template <typename T>
+class CodeInfo {
+ public:
+  explicit CodeInfo(MemoryRegion region) : region_(region) {}
+
+  StackMap<T> GetStackMapAt(size_t i) const {
+    size_t size = StackMapSize();
+    return StackMap<T>(GetStackMaps().Subregion(i * size, size));
+  }
+
+  uint32_t GetStackMaskSize() const {
+    return region_.Load<uint32_t>(kStackMaskSizeOffset);
+  }
+
+  void SetStackMaskSize(uint32_t size) {
+    region_.Store<uint32_t>(kStackMaskSizeOffset, size);
+  }
+
+  size_t GetNumberOfStackMaps() const {
+    return region_.Load<uint32_t>(kNumberOfStackMapsOffset);
+  }
+
+  void SetNumberOfStackMaps(uint32_t number_of_stack_maps) {
+    region_.Store<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
+  }
+
+  size_t StackMapSize() const {
+    return StackMap<T>::kFixedSize + GetStackMaskSize();
+  }
+
+  DexRegisterMap GetDexRegisterMapOf(StackMap<T> stack_map, uint32_t number_of_dex_registers) {
+    uint32_t offset = stack_map.GetDexRegisterMapOffset();
+    return DexRegisterMap(region_.Subregion(offset,
+        DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
+  }
+
+  InlineInfo GetInlineInfoOf(StackMap<T> stack_map) {
+    uint32_t offset = stack_map.GetInlineDescriptorOffset();
+    uint8_t depth = region_.Load<uint8_t>(offset);
+    return InlineInfo(region_.Subregion(offset,
+        InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
+  }
+
+  StackMap<T> GetStackMapForDexPc(uint32_t dex_pc) {
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap<T> stack_map = GetStackMapAt(i);
+      if (stack_map.GetDexPc() == dex_pc) {
+        return stack_map;
+      }
+    }
+    LOG(FATAL) << "Unreachable";
+    return StackMap<T>(MemoryRegion());
+  }
+
+  StackMap<T> GetStackMapForNativePc(T native_pc) {
+    // TODO: stack maps are sorted by native pc, we can do a binary search.
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap<T> stack_map = GetStackMapAt(i);
+      if (stack_map.GetNativePc() == native_pc) {
+        return stack_map;
+      }
+    }
+    LOG(FATAL) << "Unreachable";
+    return StackMap<T>(MemoryRegion());
+  }
+
+ private:
+  static constexpr int kNumberOfStackMapsOffset = 0;
+  static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
+  static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t);
+
+  MemoryRegion GetStackMaps() const {
+    return region_.size() == 0
+        ? MemoryRegion()
+        : region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
+  }
+
+  MemoryRegion region_;
+  template<typename U> friend class StackMapStream;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_STACK_MAP_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8151464..ddba708 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1972,10 +1972,13 @@
   return result;
 }
 
+// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
+//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
 struct CurrentMethodVisitor FINAL : public StackVisitor {
-  CurrentMethodVisitor(Thread* thread, Context* context)
+  CurrentMethodVisitor(Thread* thread, Context* context, bool fail_on_error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {}
+      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
+        fail_on_error_(fail_on_error) {}
   bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::ArtMethod* m = GetMethod();
     if (m->IsRuntimeMethod()) {
@@ -1986,16 +1989,17 @@
       this_object_ = GetThisObject();
     }
     method_ = m;
-    dex_pc_ = GetDexPc();
+    dex_pc_ = GetDexPc(fail_on_error_);
     return false;
   }
   mirror::Object* this_object_;
   mirror::ArtMethod* method_;
   uint32_t dex_pc_;
+  const bool fail_on_error_;
 };
 
 mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
-  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr);
+  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, false);
   visitor.WalkStack(false);
   if (dex_pc != nullptr) {
     *dex_pc = visitor.dex_pc_;
@@ -2005,7 +2009,7 @@
 
 ThrowLocation Thread::GetCurrentLocationForThrow() {
   Context* context = GetLongJumpContext();
-  CurrentMethodVisitor visitor(this, context);
+  CurrentMethodVisitor visitor(this, context, true);
   visitor.WalkStack(false);
   ReleaseLongJumpContext(context);
   return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
diff --git a/test/114-ParallelGC/src/Main.java b/test/114-ParallelGC/src/Main.java
index fb110bd..2285872 100644
--- a/test/114-ParallelGC/src/Main.java
+++ b/test/114-ParallelGC/src/Main.java
@@ -39,7 +39,7 @@
 
     public void run() {
         List l = new ArrayList();
-        for (int i = 0; i < 500; i++) {
+        for (int i = 0; i < 400; i++) {
             l.add(new ArrayList(i));
         }
     }
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ac47da6..78493dc 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -112,8 +112,7 @@
 
 # Tests that need more than 2MB of RAM or are running into other corner cases in GC stress related
 # to OOMEs.
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
-  114-ParallelGC
+TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
 
 ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
 ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))