Use arenas for the verifier

Improvements are from using arenas for verifier. The things that
were moved into arenas are register lines, reg types, and reg type
descriptors.

Also some minor cleanup. Fixed double space formatting error in
string piece.

========================================
Before:
=======================================
compile GmsCore 50x on host -j1:
real  3m5.510s
user  2m48.139s
sys 0m11.753s

50 random APKs:
real  1m18.157s
user  1m8.167s
sys 0m8.071s

RAM (GmsCore):
(threads: 1) arena alloc=0B java alloc=27MB native alloc=64MB free=792KB
(threads: 4) arena alloc=0B java alloc=27MB native alloc=64MB free=397KB
Perf:

=============================
After:
=============================
compile GmsCore 50x on host -j1:
real  2m31.455s
user  2m14.784s
sys 0m10.888s

50 random APKs:
Speed up from arena allocating:
real  1m12.094s
user  1m2.325s
sys 0m7.872s

Speed up from areans + CreateFindClassDefIndex + unordered_map for DexFileVerifier.
real  1m8.654s
user  0m58.955s
sys 0m7.777s

RAM (GmsCore):
(threads: 1) arena alloc=1049KB java alloc=27MB native alloc=65MB free=538KB
(threads: 4) arena alloc=4MB java alloc=27MB native alloc=69MB free=650KB

Perf on host key results:
_int_malloc: 3.60% -> 1.39%
malloc: 1.48% -> 0.65%
_int_free: 1.59% -> 0.61%
RegTypeCache::FromClass: 1.47% -> 0.86%
malloc_consolidate: 0.89% -> 0.63%

Real speed up is ~22% on GmsCore, ~14% on the set of 60 APKs.

Bug: 10921004

Change-Id: If13d4ab4284a176f93a26a412549b6b1149dfb16
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 1704688..71afa0f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -94,6 +94,8 @@
   "CodeGen      ",
   "ParallelMove ",
   "GraphChecker ",
+  "LSE          ",
+  "Verifier     ",
 };
 
 template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 7747bdf..ace6c38 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -107,6 +107,7 @@
   kArenaAllocParallelMoveResolver,
   kArenaAllocGraphChecker,
   kArenaAllocLSE,
+  kArenaAllocVerifier,
   kNumArenaAllocKinds
 };
 
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 9c83cf5..46743e9 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -165,7 +165,7 @@
   // Pointer to char data, not necessarily zero terminated.
   const char* ptr_;
   // Length of data.
-  size_type   length_;
+  size_type length_;
 };
 
 // This large function is defined inline so that in a fairly common case where
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 48a12e5..2871f76 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -248,7 +248,7 @@
 
   // VRegA
   bool HasVRegA() const;
-  int32_t VRegA() const;
+  ALWAYS_INLINE int32_t VRegA() const;
 
   int8_t VRegA_10t() const {
     return VRegA_10t(Fetch16(0));
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 2d9fd53..f52d011 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -38,6 +38,10 @@
   return insn_flags_[index];
 }
 
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+  return insn_flags_[index];
+}
+
 inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
   return class_loader_.Get();
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4051a1c..e1d4160 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,12 +58,14 @@
 // On VLOG(verifier), should we dump the whole state when we run into a hard failure?
 static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
 
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+    : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
 void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
                                  uint32_t insns_size, uint16_t registers_size,
                                  MethodVerifier* verifier) {
   DCHECK_GT(insns_size, 0U);
-  register_lines_.reset(new RegisterLine*[insns_size]());
-  size_ = insns_size;
+  register_lines_.resize(insns_size);
   for (uint32_t i = 0; i < insns_size; i++) {
     bool interesting = false;
     switch (mode) {
@@ -80,19 +82,12 @@
         break;
     }
     if (interesting) {
-      register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+      register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
     }
   }
 }
 
-PcToRegisterLineTable::~PcToRegisterLineTable() {
-  for (size_t i = 0; i < size_; i++) {
-    delete register_lines_[i];
-    if (kIsDebugBuild) {
-      register_lines_[i] = nullptr;
-    }
-  }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
 
 // Note: returns true on failure.
 ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
@@ -398,7 +393,10 @@
                                bool need_precise_constants, bool verify_to_dump,
                                bool allow_thread_suspension)
     : self_(self),
-      reg_types_(can_load_classes),
+      arena_stack_(Runtime::Current()->GetArenaPool()),
+      arena_(&arena_stack_),
+      reg_types_(can_load_classes, arena_),
+      reg_table_(arena_),
       work_insn_idx_(DexFile::kDexNoIndex),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -702,7 +700,11 @@
   }
 
   // Allocate and initialize an array to hold instruction data.
-  insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+  insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+  DCHECK(insn_flags_ != nullptr);
+  std::uninitialized_fill_n(insn_flags_.get(),
+                            code_item_->insns_size_in_code_units_,
+                            InstructionFlags());
   // Run through the instructions and see if the width checks out.
   bool result = ComputeWidthsAndCountOps();
   // Flag instructions guarded by a "try" block and check exception handlers.
@@ -848,7 +850,7 @@
         break;
     }
     size_t inst_size = inst->SizeInCodeUnits();
-    insn_flags_[dex_pc].SetIsOpcode();
+    GetInstructionFlags(dex_pc).SetIsOpcode();
     dex_pc += inst_size;
     inst = inst->RelativeAt(inst_size);
   }
@@ -881,7 +883,7 @@
                                         << " endAddr=" << end << " (size=" << insns_size << ")";
       return false;
     }
-    if (!insn_flags_[start].IsOpcode()) {
+    if (!GetInstructionFlags(start).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD)
           << "'try' block starts inside an instruction (" << start << ")";
       return false;
@@ -889,7 +891,7 @@
     uint32_t dex_pc = start;
     const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
     while (dex_pc < end) {
-      insn_flags_[dex_pc].SetInTry();
+      GetInstructionFlags(dex_pc).SetInTry();
       size_t insn_size = inst->SizeInCodeUnits();
       dex_pc += insn_size;
       inst = inst->RelativeAt(insn_size);
@@ -903,7 +905,7 @@
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       uint32_t dex_pc= iterator.GetHandlerAddress();
-      if (!insn_flags_[dex_pc].IsOpcode()) {
+      if (!GetInstructionFlags(dex_pc).IsOpcode()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD)
             << "exception handler starts at bad address (" << dex_pc << ")";
         return false;
@@ -913,7 +915,7 @@
             << "exception handler begins with move-result* (" << dex_pc << ")";
         return false;
       }
-      insn_flags_[dex_pc].SetBranchTarget();
+      GetInstructionFlags(dex_pc).SetBranchTarget();
       // Ensure exception types are resolved so that they don't need resolution to be delivered,
       // unresolved exception types will be ignored by exception delivery
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
@@ -935,8 +937,8 @@
   const Instruction* inst = Instruction::At(code_item_->insns_);
 
   /* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
-  insn_flags_[0].SetBranchTarget();
-  insn_flags_[0].SetCompileTimeInfoPoint();
+  GetInstructionFlags(0).SetBranchTarget();
+  GetInstructionFlags(0).SetCompileTimeInfoPoint();
 
   uint32_t insns_size = code_item_->insns_size_in_code_units_;
   for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
@@ -948,18 +950,18 @@
     // All invoke points are marked as "Throw" points already.
     // We are relying on this to also count all the invokes as interesting.
     if (inst->IsBranch()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
       // The compiler also needs safepoints for fall-through to loop heads.
       // Such a loop head must be a target of a branch.
       int32_t offset = 0;
       bool cond, self_ok;
       bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
       DCHECK(target_ok);
-      insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
     } else if (inst->IsSwitch() || inst->IsThrow()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
     } else if (inst->IsReturn()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
     }
     dex_pc += inst->SizeInCodeUnits();
     inst = inst->Next();
@@ -1202,7 +1204,7 @@
   }
   // Make sure the array-data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
                                       << ", data offset " << array_data_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1245,13 +1247,13 @@
   int32_t abs_offset = cur_offset + offset;
   if (abs_offset < 0 ||
       (uint32_t) abs_offset >= insn_count ||
-      !insn_flags_[abs_offset].IsOpcode()) {
+      !GetInstructionFlags(abs_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
                                       << reinterpret_cast<void*>(abs_offset) << ") at "
                                       << reinterpret_cast<void*>(cur_offset);
     return false;
   }
-  insn_flags_[abs_offset].SetBranchTarget();
+  GetInstructionFlags(abs_offset).SetBranchTarget();
   return true;
 }
 
@@ -1315,7 +1317,7 @@
   }
   // Make sure the switch data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
                                       << ", switch offset " << switch_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1387,14 +1389,14 @@
     int32_t abs_offset = cur_offset + offset;
     if (abs_offset < 0 ||
         abs_offset >= static_cast<int32_t>(insn_count) ||
-        !insn_flags_[abs_offset].IsOpcode()) {
+        !GetInstructionFlags(abs_offset).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
                                         << " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
                                         << reinterpret_cast<void*>(cur_offset)
                                         << "[" << targ << "]";
       return false;
     }
-    insn_flags_[abs_offset].SetBranchTarget();
+    GetInstructionFlags(abs_offset).SetBranchTarget();
   }
   return true;
 }
@@ -1435,7 +1437,6 @@
                   registers_size,
                   this);
 
-
   work_line_.reset(RegisterLine::Create(registers_size, this));
   saved_line_.reset(RegisterLine::Create(registers_size, this));
 
@@ -1491,7 +1492,7 @@
       vios->Stream() << reg_line->Dump(this) << "\n";
     }
     vios->Stream()
-        << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+        << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
     const bool kDumpHexOfInstruction = false;
     if (kDumpHexOfInstruction) {
       vios->Stream() << inst->DumpHex(5) << " ";
@@ -1677,7 +1678,7 @@
   const uint32_t insns_size = code_item_->insns_size_in_code_units_;
 
   /* Begin by marking the first instruction as "changed". */
-  insn_flags_[0].SetChanged();
+  GetInstructionFlags(0).SetChanged();
   uint32_t start_guess = 0;
 
   /* Continue until no instructions are marked "changed". */
@@ -1688,7 +1689,7 @@
     // Find the first marked one. Use "start_guess" as a way to find one quickly.
     uint32_t insn_idx = start_guess;
     for (; insn_idx < insns_size; insn_idx++) {
-      if (insn_flags_[insn_idx].IsChanged())
+      if (GetInstructionFlags(insn_idx).IsChanged())
         break;
     }
     if (insn_idx == insns_size) {
@@ -1708,7 +1709,7 @@
     // situation where we have a stray "changed" flag set on an instruction that isn't a branch
     // target.
     work_insn_idx_ = insn_idx;
-    if (insn_flags_[insn_idx].IsBranchTarget()) {
+    if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
       work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
     } else if (kIsDebugBuild) {
       /*
@@ -1734,8 +1735,8 @@
       return false;
     }
     /* Clear "changed" and mark as visited. */
-    insn_flags_[insn_idx].SetVisited();
-    insn_flags_[insn_idx].ClearChanged();
+    GetInstructionFlags(insn_idx).SetVisited();
+    GetInstructionFlags(insn_idx).ClearChanged();
   }
 
   if (gDebugVerify) {
@@ -1762,10 +1763,10 @@
            (insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
-        insn_flags_[insn_idx].SetVisited();
+        GetInstructionFlags(insn_idx).SetVisited();
       }
 
-      if (!insn_flags_[insn_idx].IsVisited()) {
+      if (!GetInstructionFlags(insn_idx).IsVisited()) {
         if (dead_start < 0)
           dead_start = insn_idx;
       } else if (dead_start >= 0) {
@@ -1895,8 +1896,8 @@
   // We need to ensure the work line is consistent while performing validation. When we spot a
   // peephole pattern we compute a new line for either the fallthrough instruction or the
   // branch target.
-  std::unique_ptr<RegisterLine> branch_line;
-  std::unique_ptr<RegisterLine> fallthrough_line;
+  ArenaUniquePtr<RegisterLine> branch_line;
+  ArenaUniquePtr<RegisterLine> fallthrough_line;
 
   switch (inst->Opcode()) {
     case Instruction::NOP:
@@ -2144,9 +2145,9 @@
       work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
       // Check whether the previous instruction is a move-object with vAA as a source, creating
       // untracked lock aliasing.
-      if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+      if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
         uint32_t prev_idx = work_insn_idx_ - 1;
-        while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+        while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
           prev_idx--;
         }
         const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
@@ -2427,10 +2428,10 @@
       uint32_t instance_of_idx = 0;
       if (0 != work_insn_idx_) {
         instance_of_idx = work_insn_idx_ - 1;
-        while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+        while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
           instance_of_idx--;
         }
-        if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+        if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
                         "Unable to get previous instruction of if-eqz/if-nez for work index ",
                         work_insn_idx_)) {
           break;
@@ -2486,15 +2487,15 @@
           update_line->SetRegisterType<LockOp::kKeep>(this,
                                                       instance_of_inst->VRegB_22c(),
                                                       cast_type);
-          if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+          if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
             // of the move-object.
             uint32_t move_idx = instance_of_idx - 1;
-            while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+            while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
               move_idx--;
             }
-            if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+            if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
                             "Unable to get previous instruction of if-eqz/if-nez for work index ",
                             work_insn_idx_)) {
               break;
@@ -2786,8 +2787,7 @@
         work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
       }
       if (return_type == nullptr) {
-        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
-                                                 false);
+        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
         work_line_->SetResultRegisterType(this, *return_type);
@@ -2860,7 +2860,7 @@
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
-        descriptor =  dex_file_->StringByTypeIdx(return_type_idx);
+        descriptor = dex_file_->StringByTypeIdx(return_type_idx);
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
@@ -3309,7 +3309,7 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (nullptr != branch_line.get()) {
+    if (nullptr != branch_line) {
       if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
         return false;
       }
@@ -3364,7 +3364,7 @@
    * Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
    * "try" block when they throw, control transfers out of the method.)
    */
-  if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+  if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
     bool has_catch_all_handler = false;
     CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
 
@@ -3434,11 +3434,11 @@
     if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
       return false;
     }
-    if (nullptr != fallthrough_line.get()) {
+    if (nullptr != fallthrough_line) {
       // Make workline consistent with fallthrough computed from peephole optimization.
       work_line_->CopyFromLine(fallthrough_line.get());
     }
-    if (insn_flags_[next_insn_idx].IsReturn()) {
+    if (GetInstructionFlags(next_insn_idx).IsReturn()) {
       // For returns we only care about the operand to the return, all other registers are dead.
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
       AdjustReturnLine(this, ret_inst, work_line_.get());
@@ -3456,7 +3456,7 @@
        * We're not recording register data for the next instruction, so we don't know what the
        * prior state was. We have to assume that something has changed and re-evaluate it.
        */
-      insn_flags_[next_insn_idx].SetChanged();
+      GetInstructionFlags(next_insn_idx).SetChanged();
     }
   }
 
@@ -3480,7 +3480,7 @@
   }
 
   DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
-  DCHECK(insn_flags_[*start_guess].IsOpcode());
+  DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
 
   if (have_pending_runtime_throw_failure_) {
     have_any_pending_runtime_throw_failure_ = true;
@@ -3491,30 +3491,55 @@
   return true;
 }  // NOLINT(readability/fn_size)
 
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+  Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+                                           << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+  return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
 const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
-  const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
-  const RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
-  const RegType& result = klass != nullptr ?
-      FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
-      reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
-  if (result.IsConflict()) {
-    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
-        << "' in " << referrer;
-    return result;
+  const RegType* result = nullptr;
+  if (klass != nullptr) {
+    bool precise = klass->CannotBeAssignedFromOtherTypes();
+    if (precise && !IsInstantiableOrPrimitive(klass)) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      UninstantiableError(descriptor);
+      precise = false;
+    }
+    result = reg_types_.FindClass(klass, precise);
+    if (result == nullptr) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      result = reg_types_.InsertClass(descriptor, klass, precise);
+    }
+  } else {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    result = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
-  if (klass == nullptr && !result.IsUnresolvedTypes()) {
-    dex_cache_->SetResolvedType(class_idx, result.GetClass());
+  DCHECK(result != nullptr);
+  if (result->IsConflict()) {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+        << "' in " << GetDeclaringClass();
+    return *result;
+  }
+  if (klass == nullptr && !result->IsUnresolvedTypes()) {
+    dex_cache_->SetResolvedType(class_idx, result->GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
   // check at runtime if access is allowed and so pass here. If result is
   // primitive, skip the access check.
-  if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
-      !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
-    Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
-                                    << referrer << "' -> '" << result << "'";
+  if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+    const RegType& referrer = GetDeclaringClass();
+    if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+      Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+                                      << referrer << "' -> '" << result << "'";
+    }
   }
-  return result;
+  return *result;
 }
 
 const RegType& MethodVerifier::GetCaughtExceptionType() {
@@ -3720,9 +3745,10 @@
       } else {
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
-        res_method_class = &reg_types_.FromDescriptor(GetClassLoader(),
-                                                      dex_file_->StringByTypeIdx(class_idx),
-                                                      false);
+        res_method_class = &reg_types_.FromDescriptor(
+            GetClassLoader(),
+            dex_file_->StringByTypeIdx(class_idx),
+            false);
       }
       if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
         Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
@@ -4476,14 +4502,16 @@
         field->GetType<false>();
 
     if (field_type_class != nullptr) {
-      field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+      field_type = &FromClass(field->GetTypeDescriptor(),
+                              field_type_class,
                               field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
       Thread* self = Thread::Current();
       DCHECK(!can_load_classes_ || self->IsExceptionPending());
       self->ClearException();
       field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
-                                              field->GetTypeDescriptor(), false);
+                                              field->GetTypeDescriptor(),
+                                              false);
     }
     if (field_type == nullptr) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
@@ -4604,14 +4632,14 @@
                                      bool update_merge_line) {
   bool changed = true;
   RegisterLine* target_line = reg_table_.GetLine(next_insn);
-  if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+  if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
     /*
      * We haven't processed this instruction before, and we haven't touched the registers here, so
      * there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
      * only way a register can transition out of "unknown", so this is not just an optimization.)
      */
     target_line->CopyFromLine(merge_line);
-    if (insn_flags_[next_insn].IsReturn()) {
+    if (GetInstructionFlags(next_insn).IsReturn()) {
       // Verify that the monitor stack is empty on return.
       merge_line->VerifyMonitorStackEmpty(this);
 
@@ -4621,10 +4649,9 @@
       AdjustReturnLine(this, ret_inst, target_line);
     }
   } else {
-    std::unique_ptr<RegisterLine> copy(gDebugVerify ?
-                                 RegisterLine::Create(target_line->NumRegs(), this) :
-                                 nullptr);
+    ArenaUniquePtr<RegisterLine> copy;
     if (gDebugVerify) {
+      copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
       copy->CopyFromLine(target_line);
     }
     changed = target_line->MergeRegisters(this, merge_line);
@@ -4643,13 +4670,13 @@
     }
   }
   if (changed) {
-    insn_flags_[next_insn].SetChanged();
+    GetInstructionFlags(next_insn).SetChanged();
   }
   return true;
 }
 
 InstructionFlags* MethodVerifier::CurrentInsnFlags() {
-  return &insn_flags_[work_insn_idx_];
+  return &GetInstructionFlags(work_insn_idx_);
 }
 
 const RegType& MethodVerifier::GetMethodReturnType() {
@@ -4685,8 +4712,7 @@
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
     if (mirror_method_ != nullptr) {
       mirror::Class* klass = mirror_method_->GetDeclaringClass();
-      declaring_class_ = &FromClass(descriptor, klass,
-                                    klass->CannotBeAssignedFromOtherTypes());
+      declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     } else {
       declaring_class_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     }
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ba694b7..7b51d6e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -21,7 +21,10 @@
 #include <sstream>
 #include <vector>
 
+#include "base/arena_allocator.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
 #include "dex_file.h"
 #include "handle.h"
 #include "instruction_flags.h"
@@ -107,7 +110,7 @@
 // execution of that instruction.
 class PcToRegisterLineTable {
  public:
-  PcToRegisterLineTable() : size_(0) {}
+  explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
   ~PcToRegisterLineTable();
 
   // Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -116,14 +119,12 @@
   void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
             uint16_t registers_size, MethodVerifier* verifier);
 
-  RegisterLine* GetLine(size_t idx) {
-    DCHECK_LT(idx, size_);
-    return register_lines_[idx];
+  RegisterLine* GetLine(size_t idx) const {
+    return register_lines_[idx].get();
   }
 
  private:
-  std::unique_ptr<RegisterLine*[]> register_lines_;
-  size_t size_;
+  ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
 
   DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
 };
@@ -240,7 +241,8 @@
   // Accessors used by the compiler via CompilerCallback
   const DexFile::CodeItem* CodeItem() const;
   RegisterLine* GetRegLine(uint32_t dex_pc);
-  const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
   mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
   MethodReference GetMethodReference() const;
@@ -275,7 +277,14 @@
     return IsConstructor() && !IsStatic();
   }
 
+  ScopedArenaAllocator& GetArena() {
+    return arena_;
+  }
+
  private:
+  void UninstantiableError(const char* descriptor);
+  static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Is the method being verified a constructor? See the comment on the field.
   bool IsConstructor() const {
     return is_constructor_;
@@ -687,19 +696,23 @@
   // The thread we're verifying on.
   Thread* const self_;
 
+  // Arena allocator.
+  ArenaStack arena_stack_;
+  ScopedArenaAllocator arena_;
+
   RegTypeCache reg_types_;
 
   PcToRegisterLineTable reg_table_;
 
   // Storage for the register status we're currently working on.
-  std::unique_ptr<RegisterLine> work_line_;
+  ArenaUniquePtr<RegisterLine> work_line_;
 
   // The address of the instruction we're currently working on, note that this is in 2 byte
   // quantities
   uint32_t work_insn_idx_;
 
   // Storage for the register status we're saving for later.
-  std::unique_ptr<RegisterLine> saved_line_;
+  ArenaUniquePtr<RegisterLine> saved_line_;
 
   const uint32_t dex_method_idx_;  // The method we're working on.
   // Its object representation if known.
@@ -715,7 +728,8 @@
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
   const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
-  std::unique_ptr<InstructionFlags[]> insn_flags_;
+  // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+  ArenaUniquePtr<InstructionFlags[]> insn_flags_;
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
   uint32_t interesting_dex_pc_;
   // The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f445132..11a53e5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -20,6 +20,7 @@
 #include "reg_type.h"
 
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "mirror/class.h"
 
 namespace art {
@@ -180,6 +181,10 @@
   return instance_;
 }
 
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+  return arena->Alloc(size, kArenaAllocMisc);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index b86a4c8..16cab03 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
 
 #include "reg_type-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/casts.h"
 #include "class_linker-inl.h"
@@ -46,20 +47,17 @@
 const DoubleHiType* DoubleHiType::instance_ = nullptr;
 const IntegerType* IntegerType::instance_ = nullptr;
 
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
@@ -121,11 +119,11 @@
 }
 
 std::string IntegerType::Dump() const {
-    return "Integer";
+  return "Integer";
 }
 
 const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -140,7 +138,7 @@
 }
 
 const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -154,14 +152,14 @@
   }
 }
 
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -182,7 +180,7 @@
   }
 }
 
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new FloatType(klass, descriptor, cache_id);
@@ -196,7 +194,7 @@
   }
 }
 
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new CharType(klass, descriptor, cache_id);
@@ -210,7 +208,7 @@
   }
 }
 
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ShortType(klass, descriptor, cache_id);
@@ -224,7 +222,7 @@
   }
 }
 
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ByteType(klass, descriptor, cache_id);
@@ -238,7 +236,7 @@
   }
 }
 
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                                uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -253,7 +251,7 @@
 }
 
 const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -267,7 +265,7 @@
   }
 }
 
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(BooleanType::instance_ == nullptr);
   instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -286,7 +284,7 @@
 }
 
 const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
-                                                   const std::string& descriptor,
+                                                   const StringPiece& descriptor,
                                                    uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -300,7 +298,7 @@
   }
 }
 
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -335,14 +333,14 @@
 
 std::string UnresolvedReferenceType::Dump() const {
   std::stringstream result;
-  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
 std::string UnresolvedUninitializedRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized Reference" << ": "
-      << PrettyDescriptor(GetDescriptor().c_str())
+      << PrettyDescriptor(GetDescriptor().as_string().c_str())
       << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
@@ -350,7 +348,7 @@
 std::string UnresolvedUninitializedThisRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized This Reference"
-      << PrettyDescriptor(GetDescriptor().c_str());
+      << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 2834a9a..80b751c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,9 +22,11 @@
 #include <set>
 #include <string>
 
+#include "base/arena_object.h"
 #include "base/bit_vector.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/stringpiece.h"
 #include "gc_root.h"
 #include "handle_scope.h"
 #include "object_callbacks.h"
@@ -35,6 +37,9 @@
 class Class;
 }  // namespace mirror
 
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
 namespace verifier {
 
 class RegTypeCache;
@@ -173,7 +178,7 @@
   bool IsJavaLangObjectArray() const
       SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
-  const std::string& GetDescriptor() const {
+  const StringPiece& GetDescriptor() const {
     DCHECK(HasClass() ||
            (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
             !IsUnresolvedSuperClass()));
@@ -274,10 +279,20 @@
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  static void* operator new(size_t size) noexcept {
+    return ::operator new(size);
+  }
+
+  static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+  static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
  protected:
-  RegType(mirror::Class* klass, const std::string& descriptor,
+  RegType(mirror::Class* klass,
+          const StringPiece& descriptor,
           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
-      : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+      : descriptor_(descriptor),
+        klass_(klass),
+        cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -285,9 +300,8 @@
 
   void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const std::string descriptor_;
-  mutable GcRoot<mirror::Class>
-      klass_;  // Non-const only due to moving classes.
+  const StringPiece descriptor_;
+  mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
@@ -311,7 +325,7 @@
 
   // Create the singleton instance.
   static const ConflictType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -319,7 +333,7 @@
   static void Destroy();
 
  private:
-  ConflictType(mirror::Class* klass, const std::string& descriptor,
+  ConflictType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -340,7 +354,7 @@
 
   // Create the singleton instance.
   static const UndefinedType* CreateInstance(mirror::Class* klass,
-                                             const std::string& descriptor,
+                                             const StringPiece& descriptor,
                                              uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -348,7 +362,7 @@
   static void Destroy();
 
  private:
-  UndefinedType(mirror::Class* klass, const std::string& descriptor,
+  UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -357,7 +371,7 @@
 
 class PrimitiveType : public RegType {
  public:
-  PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+  PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasClassVirtual() const OVERRIDE { return true; }
@@ -365,7 +379,7 @@
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(mirror::Class* klass, const std::string& descriptor,
+  Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -374,14 +388,14 @@
   bool IsInteger() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  IntegerType(mirror::Class* klass, const std::string& descriptor,
+  IntegerType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const IntegerType* instance_;
@@ -392,14 +406,14 @@
   bool IsBoolean() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  BooleanType(mirror::Class* klass, const std::string& descriptor,
+  BooleanType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
 
@@ -411,14 +425,14 @@
   bool IsByte() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ByteType(mirror::Class* klass, const std::string& descriptor,
+  ByteType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ByteType* instance_;
@@ -429,14 +443,14 @@
   bool IsShort() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ShortType(mirror::Class* klass, const std::string& descriptor,
+  ShortType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ShortType* instance_;
@@ -447,14 +461,14 @@
   bool IsChar() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  CharType(mirror::Class* klass, const std::string& descriptor,
+  CharType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const CharType* instance_;
@@ -465,14 +479,14 @@
   bool IsFloat() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  FloatType(mirror::Class* klass, const std::string& descriptor,
+  FloatType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const FloatType* instance_;
@@ -480,7 +494,7 @@
 
 class Cat2Type : public PrimitiveType {
  public:
-  Cat2Type(mirror::Class* klass, const std::string& descriptor,
+  Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -490,14 +504,14 @@
   bool IsLongLo() const OVERRIDE { return true; }
   bool IsLong() const OVERRIDE { return true; }
   static const LongLoType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongLoType(mirror::Class* klass, const std::string& descriptor,
+  LongLoType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongLoType* instance_;
@@ -508,14 +522,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsLongHi() const OVERRIDE { return true; }
   static const LongHiType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongHiType(mirror::Class* klass, const std::string& descriptor,
+  LongHiType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongHiType* instance_;
@@ -527,14 +541,14 @@
   bool IsDoubleLo() const OVERRIDE { return true; }
   bool IsDouble() const OVERRIDE { return true; }
   static const DoubleLoType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+  DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleLoType* instance_;
@@ -545,14 +559,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   virtual bool IsDoubleHi() const OVERRIDE { return true; }
   static const DoubleHiType* CreateInstance(mirror::Class* klass,
-                                      const std::string& descriptor,
+                                      const StringPiece& descriptor,
                                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+  DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleHiType* instance_;
@@ -677,7 +691,7 @@
 // instructions and must be passed to a constructor.
 class UninitializedType : public RegType {
  public:
-  UninitializedType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
                     uint32_t allocation_pc, uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
@@ -697,7 +711,7 @@
 class UninitializedReferenceType FINAL : public UninitializedType {
  public:
   UninitializedReferenceType(mirror::Class* klass,
-                             const std::string& descriptor,
+                             const StringPiece& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
@@ -713,7 +727,7 @@
 // constructor.
 class UnresolvedUninitializedRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const std::string& descriptor,
+  UnresolvedUninitializedRefType(const StringPiece& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
@@ -737,7 +751,7 @@
 class UninitializedThisReferenceType FINAL : public UninitializedType {
  public:
   UninitializedThisReferenceType(mirror::Class* klass,
-                                 const std::string& descriptor,
+                                 const StringPiece& descriptor,
                                  uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -758,7 +772,7 @@
 
 class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const std::string& descriptor,
+  UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
                                      uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, 0, cache_id) {
@@ -781,7 +795,7 @@
 // sub-class.
 class ReferenceType FINAL : public RegType {
  public:
-  ReferenceType(mirror::Class* klass, const std::string& descriptor,
+  ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -799,7 +813,7 @@
 // type.
 class PreciseReferenceType FINAL : public RegType {
  public:
-  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+  PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                        uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -815,7 +829,7 @@
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
-  UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
@@ -827,7 +841,7 @@
 // of this type must be conservative.
 class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
-  UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
@@ -882,8 +896,10 @@
 class UnresolvedMergedType FINAL : public UnresolvedType {
  public:
   // Note: the constructor will copy the unresolved BitVector, not use it directly.
-  UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
-                       const RegTypeCache* reg_type_cache, uint16_t cache_id)
+  UnresolvedMergedType(const RegType& resolved,
+                       const BitVector& unresolved,
+                       const RegTypeCache* reg_type_cache,
+                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The resolved part. See description below.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b6f253b..68af62e 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -118,6 +118,18 @@
   }
 }
 
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+  DCHECK(new_entry != nullptr);
+  entries_.push_back(new_entry);
+  if (new_entry->HasClass()) {
+    mirror::Class* klass = new_entry->GetClass();
+    DCHECK(!klass->IsPrimitive());
+    klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+  }
+  return *new_entry;
+}
+
 }  // namespace verifier
 }  // namespace art
 #endif  // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bb756e9..71ed4a2 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,7 +16,9 @@
 
 #include "reg_type_cache-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "base/stl_util.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
@@ -29,9 +31,10 @@
 
 bool RegTypeCache::primitive_initialized_ = false;
 uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+                                                               kMinSmallConstant + 1];
 
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
@@ -67,7 +70,8 @@
   DCHECK_EQ(entries_.size(), primitive_count_);
 }
 
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+                                            const char* descriptor,
                                             bool precise) {
   DCHECK(RegTypeCache::primitive_initialized_);
   if (descriptor[1] == '\0') {
@@ -159,13 +163,20 @@
   return klass;
 }
 
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+  char* ptr = arena_.AllocArray<char>(string_piece.length());
+  memcpy(ptr, string_piece.data(), string_piece.length());
+  return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+                                  const char* descriptor,
                                   bool precise) {
+  StringPiece sp_descriptor(descriptor);
   // Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
   // operations on the descriptor.
-  StringPiece descriptor_sp(descriptor);
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    if (MatchDescriptor(i, descriptor_sp, precise)) {
+    if (MatchDescriptor(i, sp_descriptor, precise)) {
       return *(entries_[i]);
     }
   }
@@ -186,12 +197,11 @@
     if (klass->CannotBeAssignedFromOtherTypes() || precise) {
       DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
       DCHECK(!klass->IsInterface());
-      entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
     } else {
-      entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
     }
-    AddEntry(entry);
-    return *entry;
+    return AddEntry(entry);
   } else {  // Class not resolved.
     // We tried loading the class and failed, this might get an exception raised
     // so we want to clear it before we go on.
@@ -202,9 +212,8 @@
       DCHECK(!Thread::Current()->IsExceptionPending());
     }
     if (IsValidDescriptor(descriptor)) {
-      RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
-      AddEntry(entry);
-      return *entry;
+      return AddEntry(
+          new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
       // could be done at runtime
@@ -213,50 +222,65 @@
   }
 }
 
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
   DCHECK(klass != nullptr);
   if (klass->IsPrimitive()) {
     // Note: precise isn't used for primitive classes. A char is assignable to an int. All
     // primitive classes are final.
-    return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
-  } else {
-    // Look for the reference in the list of entries to have.
-    for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      const RegType* cur_entry = entries_[i];
-      if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
-        return *cur_entry;
+    return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+  }
+  for (auto& pair : klass_entries_) {
+    mirror::Class* const reg_klass = pair.first.Read();
+    if (reg_klass == klass) {
+      const RegType* reg_type = pair.second;
+      if (MatchingPrecisionForClass(reg_type, precise)) {
+        return reg_type;
       }
     }
-    // No reference to the class was found, create new reference.
-    RegType* entry;
-    if (precise) {
-      entry = new PreciseReferenceType(klass, descriptor, entries_.size());
-    } else {
-      entry = new ReferenceType(klass, descriptor, entries_.size());
-    }
-    AddEntry(entry);
-    return *entry;
   }
+  return nullptr;
 }
 
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+                                         mirror::Class* klass,
+                                         bool precise) {
+  // No reference to the class was found, create new reference.
+  DCHECK(FindClass(klass, precise) == nullptr);
+  RegType* const reg_type = precise
+      ? static_cast<RegType*>(
+          new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+      : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+  return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+  DCHECK(klass != nullptr);
+  const RegType* reg_type = FindClass(klass, precise);
+  if (reg_type == nullptr) {
+    reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+  }
+  return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+    : entries_(arena.Adapter(kArenaAllocVerifier)),
+      klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+      can_load_classes_(can_load_classes),
+      arena_(arena) {
   if (kIsDebugBuild) {
     Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
-  entries_.reserve(64);
+  // The klass_entries_ array does not have primitives or small constants.
+  static constexpr size_t kNumReserveEntries = 32;
+  klass_entries_.reserve(kNumReserveEntries);
+  // We want to have room for additional entries after inserting primitives and small
+  // constants.
+  entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
   FillPrimitiveAndSmallConstantTypes();
 }
 
 RegTypeCache::~RegTypeCache() {
-  CHECK_LE(primitive_count_, entries_.size());
-  // Delete only the non primitive types.
-  if (entries_.size() == kNumPrimitivesAndSmallConstants) {
-    // All entries are from the global pool, nothing to delete.
-    return;
-  }
-  std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
-  std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
-  STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+  DCHECK_LE(primitive_count_, entries_.size());
 }
 
 void RegTypeCache::ShutDown() {
@@ -318,9 +342,9 @@
 }
 
 const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
-  BitVector types(1,                                    // Allocate at least a word.
-                  true,                                 // Is expandable.
-                  Allocator::GetMallocAllocator());     // TODO: Arenas in the verifier.
+  ArenaBitVector types(&arena_,
+                       kDefaultArenaBitVectorBytes * kBitsPerByte,  // Allocate at least 8 bytes.
+                       true);                                       // Is expandable.
   const RegType* left_resolved;
   if (left.IsUnresolvedMergedReference()) {
     const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
@@ -361,20 +385,15 @@
       const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
       // Use SameBitsSet. "types" is expandable to allow merging in the components, but the
       // BitVector in the final RegType will be made non-expandable.
-      if (&resolved_part == &resolved_parts_merged &&
-              types.SameBitsSet(&unresolved_part)) {
+      if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
         return *cur_entry;
       }
     }
   }
-
-  // Create entry.
-  RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
-                                            types,
-                                            this,
-                                            entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+                                                     types,
+                                                     this,
+                                                     entries_.size()));
 }
 
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -391,14 +410,12 @@
       }
     }
   }
-  RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = nullptr;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -409,7 +426,9 @@
         return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+                                                         allocation_pc,
+                                                         entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -421,17 +440,19 @@
         return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
-    entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UninitializedReferenceType(klass,
+                                                     descriptor,
+                                                     allocation_pc,
+                                                     entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
-    const std::string& descriptor(uninit_type.GetDescriptor());
+    const StringPiece& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
@@ -439,7 +460,7 @@
         return *cur_entry;
       }
     }
-    entry = new UnresolvedReferenceType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = uninit_type.GetClass();
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -450,7 +471,7 @@
           return *cur_entry;
         }
       }
-      entry = new ReferenceType(klass, "", entries_.size());
+      entry = new (&arena_) ReferenceType(klass, "", entries_.size());
     } else if (!klass->IsPrimitive()) {
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
@@ -469,18 +490,19 @@
           return *cur_entry;
         }
       }
-      entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass,
+                                                 uninit_type.GetDescriptor(),
+                                                 entries_.size());
     } else {
       return Conflict();
     }
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -489,7 +511,7 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -498,10 +520,9 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+    entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
@@ -515,12 +536,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstType(value, entries_.size());
+    entry = new (&arena_) PreciseConstType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
@@ -533,12 +553,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) PreciseConstLoType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
@@ -551,32 +570,30 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) PreciseConstHiType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
   if (!array.IsArrayTypes()) {
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
-    const std::string& descriptor(array.GetDescriptor());
-    const std::string component(descriptor.substr(1, descriptor.size() - 1));
-    return FromDescriptor(loader, component.c_str(), false);
+    const std::string descriptor(array.GetDescriptor().as_string());
+    return FromDescriptor(loader, descriptor.c_str() + 1, false);
   } else {
     mirror::Class* klass = array.GetClass()->GetComponentType();
     std::string temp;
+    const char* descriptor = klass->GetDescriptor(&temp);
     if (klass->IsErroneous()) {
       // Arrays may have erroneous component types, use unresolved in that case.
       // We assume that the primitive classes are not erroneous, so we know it is a
       // reference type.
-      return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+      return FromDescriptor(loader, descriptor, false);
     } else {
-      return FromClass(klass->GetDescriptor(&temp), klass,
-                       klass->CannotBeAssignedFromOtherTypes());
+      return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     }
   }
 }
@@ -618,10 +635,10 @@
   for (size_t i = primitive_count_; i < entries_.size(); ++i) {
     entries_[i]->VisitRoots(visitor, root_info);
   }
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
-  entries_.push_back(new_entry);
+  for (auto& pair : klass_entries_) {
+    GcRoot<mirror::Class>& root = pair.first;
+    root.VisitRoot(visitor, root_info);
+  }
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 93948a1..6f9a04e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -19,6 +19,7 @@
 
 #include "base/casts.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
 #include "object_callbacks.h"
 #include "reg_type.h"
 #include "runtime.h"
@@ -31,15 +32,19 @@
   class Class;
   class ClassLoader;
 }  // namespace mirror
+class ScopedArenaAllocator;
 class StringPiece;
 
 namespace verifier {
 
 class RegType;
 
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
 class RegTypeCache {
  public:
-  explicit RegTypeCache(bool can_load_classes);
+  explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
   ~RegTypeCache();
   static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
@@ -53,6 +58,13 @@
   const art::verifier::RegType& GetFromId(uint16_t id) const;
   const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  // Find a RegType, returns null if not found.
+  const RegType* FindClass(mirror::Class* klass, bool precise) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Insert a new class with a specified descriptor, must not already be in the cache.
+  const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Get or insert a reg type for a description, klass, and precision.
   const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat1Const(int32_t value, bool precise)
@@ -150,7 +162,13 @@
   const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void AddEntry(RegType* new_entry);
+  // Returns the pass in RegType.
+  template <class RegTypeType>
+  RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+  // verifier.
+  StringPiece AddString(const StringPiece& string_piece);
 
   template <class Type>
   static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
@@ -160,7 +178,8 @@
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
   static constexpr int32_t kMaxSmallConstant = 4;
-  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+                                                          kMinSmallConstant + 1];
 
   static constexpr size_t kNumPrimitivesAndSmallConstants =
       12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -172,11 +191,17 @@
   static uint16_t primitive_count_;
 
   // The actual storage for the RegTypes.
-  std::vector<const RegType*> entries_;
+  ScopedArenaVector<const RegType*> entries_;
+
+  // Fast lookup for quickly finding entries that have a matching class.
+  ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
 
   // Whether or not we're allowed to load classes.
   const bool can_load_classes_;
 
+  // Arena allocator.
+  ScopedArenaAllocator& arena_;
+
   DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
 };
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 971b1f5..22ac7e4 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -20,6 +20,7 @@
 
 #include "base/bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "common_runtime_test.h"
 #include "reg_type_cache-inl.h"
 #include "reg_type-inl.h"
@@ -29,12 +30,23 @@
 namespace art {
 namespace verifier {
 
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+  void PostRuntimeCreate() OVERRIDE {
+    stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+    allocator.reset(new ScopedArenaAllocator(stack.get()));
+  }
+
+  std::unique_ptr<ArenaStack> stack;
+  std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeTest, ConstLoHi) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -56,7 +68,7 @@
 
 TEST_F(RegTypeTest, Pairs) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   int64_t val = static_cast<int32_t>(1234);
   const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
   const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -80,7 +92,7 @@
 
 TEST_F(RegTypeTest, Primitives) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
 
   const RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -347,13 +359,13 @@
   EXPECT_TRUE(double_reg_type.HasClass());
 }
 
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
   // Tests matching precisions. A reference type that was created precise doesn't
   // match the one that is imprecise.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
   const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -368,7 +380,7 @@
   // Tests creating unresolved types. Miss for the first time asking the cache and
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -384,7 +396,7 @@
 TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -406,7 +418,7 @@
 TEST_F(RegTypeReferenceTest, Dump) {
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
@@ -431,7 +443,7 @@
   // Hit the second time. Then check for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. String class is final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -451,7 +463,7 @@
   // Hit the second time. Then I am checking for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -464,7 +476,7 @@
   // Tests merging logic
   // String and object , LUB is object.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
@@ -487,7 +499,7 @@
 TEST_F(RegTypeTest, MergingFloat) {
   // Testing merging logic with float and float constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& float_type = cache_new.Float();
@@ -518,7 +530,7 @@
 TEST_F(RegTypeTest, MergingLong) {
   // Testing merging logic with long and long constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& long_lo_type = cache_new.LongLo();
@@ -572,7 +584,7 @@
 TEST_F(RegTypeTest, MergingDouble) {
   // Testing merging logic with double and double constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& double_lo_type = cache_new.DoubleLo();
@@ -626,7 +638,7 @@
 TEST_F(RegTypeTest, ConstPrecision) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
   const RegType& precise_const = cache_new.FromCat1Const(10, true);
 
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 1df2428..57fb701 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -182,6 +182,21 @@
   }
 }
 
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+  void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+                                                (num_regs * sizeof(uint16_t)));
+  return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+    : num_regs_(num_regs),
+      monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      this_initialized_(false) {
+  std::uninitialized_fill_n(line_, num_regs_, 0u);
+  SetResultTypeToUnknown(verifier);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f48b1e1..37343b5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -412,12 +412,9 @@
   }
 }
 
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
-    uint32_t src,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+                             const RegisterLine::RegToLockDepthsMap& src_map,
+                             const RegisterLine::RegToLockDepthsMap& search_map) {
   auto it = src_map.find(src);
   if (it == src_map.end()) {
     // "Not locked" is trivially aliased.
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 46db1c6..b2f5555 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <vector>
 
+#include "base/scoped_arena_containers.h"
 #include "safe_map.h"
 
 namespace art {
@@ -58,11 +59,11 @@
 // stack of entered monitors (identified by code unit offset).
 class RegisterLine {
  public:
-  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
-    void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
-    RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
-    return rl;
-  }
+  // A map from register to a bit vector of indices into the monitors_ stack.
+  using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+  // Create a register line of num_regs registers.
+  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
   void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
@@ -311,11 +312,11 @@
   // Write a bit at each register location that holds a reference.
   void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
 
-  size_t GetMonitorEnterCount() {
+  size_t GetMonitorEnterCount() const {
     return monitors_.size();
   }
 
-  uint32_t GetMonitorEnterDexPc(size_t i) {
+  uint32_t GetMonitorEnterDexPc(size_t i) const {
     return monitors_[i];
   }
 
@@ -375,11 +376,7 @@
     reg_to_lock_depths_.erase(reg);
   }
 
-  RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : num_regs_(num_regs), this_initialized_(false) {
-    memset(&line_, 0, num_regs_ * sizeof(uint16_t));
-    SetResultTypeToUnknown(verifier);
-  }
+  RegisterLine(size_t num_regs, MethodVerifier* verifier);
 
   // Storage for the result register's type, valid after an invocation.
   uint16_t result_[2];
@@ -388,17 +385,18 @@
   const uint32_t num_regs_;
 
   // A stack of monitor enter locations.
-  std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+  ScopedArenaVector<uint32_t> monitors_;
+
   // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
   // stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
   // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
-  AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+  RegToLockDepthsMap reg_to_lock_depths_;
 
   // Whether "this" initialization (a constructor supercall) has happened.
   bool this_initialized_;
 
   // An array of RegType Ids associated with each dex register.
-  uint16_t line_[0];
+  uint16_t line_[1];
 
   DISALLOW_COPY_AND_ASSIGN(RegisterLine);
 };