Merge "Disable stack mprotects when running on valgrind" into lmp-mr1-dev
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 07c1f1e..8f3e1ef 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -173,6 +173,14 @@
   -DNVALGRIND \
   -Wno-unused-value
 
+
+ifdef ART_IMT_SIZE
+  art_cflags += -DIMT_SIZE=$(ART_IMT_SIZE)
+else
+  # Default is 64
+  art_cflags += -DIMT_SIZE=64
+endif
+
 ifeq ($(ART_SMALL_MODE),true)
   art_cflags += -DART_SMALL_MODE=1
 endif
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index b59ab13..00245c4 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -146,7 +146,7 @@
   CHECK(code != nullptr);
   const byte* base = reinterpret_cast<const byte*>(code);  // Base of data points at code.
   base -= kPointerSize;  // Move backward so that code_offset != 0.
-  uint32_t  code_offset = kPointerSize;
+  uint32_t code_offset = kPointerSize;
   return OatFile::OatMethod(base, code_offset);
 }
 
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 6aee563..2aafb74 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -762,8 +762,10 @@
       } else {
         DCHECK(cur_block->fall_through == NullBasicBlockId);
         DCHECK(cur_block->taken == NullBasicBlockId);
-        // Unreachable instruction, mark for no continuation.
+        // Unreachable instruction, mark for no continuation and end basic block.
         flags &= ~Instruction::kContinue;
+        FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
+                  /* immed_pred_block_p */ NULL);
       }
     } else {
       cur_block->AppendMIR(insn);
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index d5c9327..f268123 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -900,7 +900,11 @@
       r_method = TargetReg(kArg2, kRef);
       LoadCurrMethodDirect(r_method);
     }
-    LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
+    // Method to declaring class.
+    LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+                TargetReg(kArg0, kRef), kNotVolatile);
+    // Declaring class to dex cache strings.
+    LoadRefDisp(TargetReg(kArg0, kRef), mirror::Class::DexCacheStringsOffset().Int32Value(),
                 TargetReg(kArg0, kRef), kNotVolatile);
 
     // Might call out to helper, which will return resolved string in kRet0
@@ -955,8 +959,14 @@
       RegLocation rl_method = LoadCurrMethod();
       RegStorage res_reg = AllocTempRef();
       RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-      LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg,
+
+      // Method to declaring class.
+      LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+                  res_reg, kNotVolatile);
+      // Declaring class to dex cache strings.
+      LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
                   kNotVolatile);
+
       LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
       StoreValue(rl_dest, rl_result);
     }
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index dedbf8d..a20592c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -980,7 +980,7 @@
   if (obj->IsArtMethod()) {
     // Size without pointer fields since we don't want to overrun the buffer if target art method
     // is 32 bits but source is 64 bits.
-    n = mirror::ArtMethod::SizeWithoutPointerFields();
+    n = mirror::ArtMethod::SizeWithoutPointerFields(sizeof(void*));
   } else {
     n = obj->SizeOf();
   }
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 945048e..bd7bbc6 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -317,6 +317,9 @@
       if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
         return &lhs->GetVmapTable() < &rhs->GetVmapTable();
       }
+      if (UNLIKELY(&lhs->GetGcMap() != &rhs->GetGcMap())) {
+        return &lhs->GetGcMap() < &rhs->GetGcMap();
+      }
       return false;
     }
   };
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 701f44f..98fe0fa 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -46,7 +46,7 @@
 #define METHOD_DEX_CACHE_METHODS_OFFSET 12
 
 // Verified by object_test.
-#define METHOD_QUICK_CODE_OFFSET_32 48
+#define METHOD_QUICK_CODE_OFFSET_32 44
 #define METHOD_QUICK_CODE_OFFSET_64 56
 #define METHOD_PORTABLE_CODE_OFFSET_32 56
 #define METHOD_PORTABLE_CODE_OFFSET_64 72
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index d05f7af..c1af0fb 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -58,9 +58,9 @@
 
 inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
                                                   mirror::ArtMethod* referrer) {
-  mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
+  mirror::Class* declaring_class = referrer->GetDeclaringClass();
+  mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
   if (UNLIKELY(resolved_string == NULL)) {
-    mirror::Class* declaring_class = referrer->GetDeclaringClass();
     StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     const DexFile& dex_file = *dex_cache->GetDexFile();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3d9bbb6..c6df340 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2715,6 +2715,7 @@
 
   klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
   klass->SetDexTypeIndex(dex_class_def.class_idx_);
+  klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
 
   const byte* class_data = dex_file.GetClassData(dex_class_def);
   if (class_data == nullptr) {
@@ -2863,7 +2864,6 @@
   dst->SetDeclaringClass(klass.Get());
   dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
 
-  dst->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
   dst->SetDexCacheResolvedMethods(klass->GetDexCache()->GetResolvedMethods());
   dst->SetDexCacheResolvedTypes(klass->GetDexCache()->GetResolvedTypes());
 
@@ -4000,7 +4000,6 @@
 
   // The proxy method doesn't have its own dex cache or dex file and so it steals those of its
   // interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
-  CHECK_EQ(prototype->GetDexCacheStrings(), method->GetDexCacheStrings());
   CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
   CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
   CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 384a2bf..928780a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -164,11 +164,8 @@
     EXPECT_TRUE(method->GetName() != nullptr);
     EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
 
-    EXPECT_TRUE(method->GetDexCacheStrings() != nullptr);
     EXPECT_TRUE(method->HasDexCacheResolvedMethods());
     EXPECT_TRUE(method->HasDexCacheResolvedTypes());
-    EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetStrings(),
-              method->GetDexCacheStrings());
     EXPECT_TRUE(method->HasSameDexCacheResolvedMethods(
         method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods()));
     EXPECT_TRUE(method->HasSameDexCacheResolvedTypes(
@@ -205,6 +202,8 @@
     EXPECT_FALSE(klass->IsArrayClass());
     EXPECT_TRUE(klass->GetComponentType() == NULL);
     EXPECT_TRUE(klass->IsInSamePackage(klass.Get()));
+    EXPECT_TRUE(klass->GetDexCacheStrings() != nullptr);
+    EXPECT_EQ(klass->GetDexCacheStrings(), klass->GetDexCache()->GetStrings());
     std::string temp2;
     EXPECT_TRUE(mirror::Class::IsInSamePackage(klass->GetDescriptor(&temp),
                                                klass->GetDescriptor(&temp2)));
@@ -392,7 +391,8 @@
 
     bool error = false;
 
-    if (!klass->IsClassClass() && !is_static) {
+    // Art method have a different size due to the padding field.
+    if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !is_static) {
       size_t expected_size = is_static ? klass->GetClassSize(): klass->GetObjectSize();
       if (sizeof(T) != expected_size) {
         LOG(ERROR) << "Class size mismatch:"
@@ -489,7 +489,6 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_),                      "declaringClass"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_),           "dexCacheResolvedMethods"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_),             "dexCacheResolvedTypes"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_),                    "dexCacheStrings"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_),           "dexCodeItemOffset"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_),               "dexMethodIndex"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_),                   "methodIndex"));
@@ -504,6 +503,7 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_),              "clinitThreadId"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, component_type_),                "componentType"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_),                     "dexCache"));
+    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_),             "dexCacheStrings"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_),             "dexClassDefIndex"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_),                  "dexTypeIndex"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_),                "directMethods"));
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5bb9abd..8212d44 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -991,6 +991,22 @@
   Trim();
 }
 
+class TrimIndirectReferenceTableClosure : public Closure {
+ public:
+  explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
+  }
+  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+    ATRACE_BEGIN("Trimming reference table");
+    thread->GetJniEnv()->locals.Trim();
+    ATRACE_END();
+    barrier_->Pass(Thread::Current());
+  }
+
+ private:
+  Barrier* const barrier_;
+};
+
+
 void Heap::Trim() {
   Thread* self = Thread::Current();
   {
@@ -1012,6 +1028,22 @@
     WaitForGcToCompleteLocked(kGcCauseTrim, self);
     collector_type_running_ = kCollectorTypeHeapTrim;
   }
+  // Trim reference tables.
+  {
+    ScopedObjectAccess soa(self);
+    JavaVMExt* vm = soa.Vm();
+    // Trim globals indirect reference table.
+    {
+      WriterMutexLock mu(self, vm->globals_lock);
+      vm->globals.Trim();
+    }
+    // Trim locals indirect reference tables.
+    Barrier barrier(0);
+    TrimIndirectReferenceTableClosure closure(&barrier);
+    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+    size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+    barrier.Increment(self, barrier_count);
+  }
   uint64_t start_ns = NanoTime();
   // Trim the managed spaces.
   uint64_t total_alloc_space_allocated = 0;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 2b1a257..c5526bb 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -159,13 +159,12 @@
   DCHECK(table_ != NULL);
   DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
 
-  int idx = ExtractIndex(iref);
-
   if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
       Thread::Current()->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
     LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
     return true;
   }
+  const int idx = ExtractIndex(iref);
   if (idx < bottomIndex) {
     // Wrong segment.
     LOG(WARNING) << "Attempt to remove index outside index area (" << idx
@@ -233,6 +232,13 @@
   return true;
 }
 
+void IndirectReferenceTable::Trim() {
+  const size_t top_index = Capacity();
+  auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
+  uint8_t* release_end = table_mem_map_->End();
+  madvise(release_start, release_end - release_start, MADV_DONTNEED);
+}
+
 void IndirectReferenceTable::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
                                         RootType root_type) {
   for (auto ref : *this) {
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5a178ea..51b2fcf 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -344,6 +344,9 @@
     return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
   }
 
+  // Release pages past the end of the table that may have previously held references.
+  void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   // Extract the table index from an indirect reference.
   static uint32_t ExtractIndex(IndirectRef iref) {
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 9af835f..15340df 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -64,7 +64,8 @@
 
 inline mirror::String* MethodHelper::ResolveString(uint32_t string_idx) {
   mirror::ArtMethod* method = GetMethod();
-  mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
+  mirror::Class* declaringClass = method->GetDeclaringClass();
+  mirror::String* s = declaringClass->GetDexCacheStrings()->Get(string_idx);
   if (UNLIKELY(s == nullptr)) {
     StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index e70a48e..b28ea4d 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -83,11 +83,6 @@
   return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_));
 }
 
-inline ObjectArray<String>* ArtMethod::GetDexCacheStrings() {
-  return GetFieldObject<ObjectArray<String>>(
-      OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_));
-}
-
 inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() {
   return GetFieldObject<ObjectArray<ArtMethod>>(
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_));
@@ -551,11 +546,6 @@
   return interface_method;
 }
 
-inline void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
-                        new_dex_cache_strings);
-}
-
 inline void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
                         new_dex_cache_methods);
@@ -569,7 +559,11 @@
 inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
   // Using the default, check the class object size to make sure it matches the size of the
   // object.
-  DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this));
+  size_t this_size = sizeof(*this);
+#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
+  this_size += sizeof(void*) - sizeof(uint32_t);
+#endif
+  DCHECK_EQ(GetClass()->GetObjectSize(), this_size);
 }
 
 
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 1fa42c6..7066cf6 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -41,6 +41,8 @@
 typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh,
     const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result);
 
+#define ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
+
 // C++ mirror of java.lang.reflect.ArtMethod.
 class MANAGED ArtMethod FINAL : public Object {
  public:
@@ -198,14 +200,6 @@
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx);
   }
 
-  ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static MemberOffset DexCacheStringsOffset() {
-    return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_);
-  }
-
   static MemberOffset DexCacheResolvedMethodsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_);
   }
@@ -390,10 +384,6 @@
   const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // When building the oat need a convenient place to stuff the offset of the native GC map.
-  // void SetOatNativeGcMapOffset(uint32_t gc_map_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // uint32_t GetOatNativeGcMapOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   template <bool kCheckFrameSize = true>
   uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
@@ -427,17 +417,17 @@
   void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
-    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+    return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
   }
 
   static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
-    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+    return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
   }
 
   static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
-    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+    return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
   }
 
@@ -549,13 +539,19 @@
 
   ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static size_t SizeWithoutPointerFields() {
-    return sizeof(ArtMethod) - sizeof(PtrSizedFields);
+  static size_t SizeWithoutPointerFields(size_t pointer_size) {
+    size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields);
+#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
+    // Add 4 bytes if 64 bit, otherwise 0.
+    total += pointer_size - sizeof(uint32_t);
+#endif
+    return total;
   }
 
   // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
   static size_t InstanceSize(size_t pointer_size) {
-    return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+    return SizeWithoutPointerFields(pointer_size) +
+        (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
   }
 
  protected:
@@ -569,9 +565,6 @@
   // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
   HeapReference<ObjectArray<Class>> dex_cache_resolved_types_;
 
-  // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
-  HeapReference<ObjectArray<String>> dex_cache_strings_;
-
   // Access flags; low 16 bits are defined by spec.
   uint32_t access_flags_;
 
@@ -590,7 +583,7 @@
   // ifTable.
   uint32_t method_index_;
 
-  // Add alignment word here if necessary.
+  // Fake padding field gets inserted here.
 
   // Must be the last fields in the method.
   struct PACKED(4) PtrSizedFields {
@@ -623,8 +616,13 @@
   ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static size_t PtrSizedFieldsOffset() {
-    return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+  static size_t PtrSizedFieldsOffset(size_t pointer_size) {
+    size_t offset = OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
+    // Add 4 bytes if 64 bit, otherwise 0.
+    offset += pointer_size - sizeof(uint32_t);
+#endif
+    return offset;
   }
 
   friend struct art::ArtMethodOffsets;  // for verifying offset information
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 70ab5c8..beb2f72 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -772,6 +772,14 @@
   }
 }
 
+inline void Class::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
+  SetFieldObject<false>(DexCacheStringsOffset(), new_dex_cache_strings);
+}
+
+inline ObjectArray<String>* Class::GetDexCacheStrings() {
+  return GetFieldObject<ObjectArray<String>>(DexCacheStringsOffset());
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 63aa675..7ef4239 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -62,6 +62,10 @@
 #define CLASS_OFFSET_FROM_CLZ(rshift) \
     MemberOffset((static_cast<int>(rshift) * CLASS_OFFSET_ALIGNMENT))
 
+#ifndef IMT_SIZE
+#error IMT_SIZE not defined
+#endif
+
 namespace art {
 
 struct ClassOffsets;
@@ -83,7 +87,7 @@
   // Interface method table size. Increasing this value reduces the chance of two interface methods
   // colliding in the interface method table but increases the size of classes that implement
   // (non-marker) interfaces.
-  static constexpr size_t kImtSize = 64;
+  static constexpr size_t kImtSize = IMT_SIZE;
 
   // imtable entry embedded in class object.
   struct MANAGED ImTableEntry {
@@ -1017,6 +1021,13 @@
   bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static MemberOffset DexCacheStringsOffset() {
+    return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
+  }
+
   // Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
   // fence.
   class InitializeClassVisitor {
@@ -1062,6 +1073,9 @@
   // runtime such as arrays and primitive classes).
   HeapReference<DexCache> dex_cache_;
 
+  // Short cuts to dex_cache_ member for fast compiled code access.
+  HeapReference<ObjectArray<String>> dex_cache_strings_;
+
   // static, private, and <init> methods
   HeapReference<ObjectArray<ArtMethod>> direct_methods_;
 
diff --git a/runtime/oat.cc b/runtime/oat.cc
index a8fd250..34c1362 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '4', '4', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '5', '\0' };
 
 static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
   size_t estimate = 0U;