Quick: Rewrite type inference pass.

Use method signatures, field types and types embedded in dex
insns for type inference. Perform the type inference in two
phases, first a simple pass that records all types implied
by individual insns, and then an iterative pass to propagate
those types further via phi, move, if-cc and aget/aput insns.

Bug: 19419671
Change-Id: Id38579d48a44fc5eadd13780afb6d370093056f9
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e0f0ae5..7d76795 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -191,6 +191,7 @@
   compiler/dex/mir_graph_test.cc \
   compiler/dex/mir_optimization_test.cc \
   compiler/dex/quick/quick_cfi_test.cc \
+  compiler/dex/type_inference_test.cc \
   compiler/dwarf/dwarf_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
@@ -227,6 +228,7 @@
   compiler/utils/arena_allocator_test.cc \
   compiler/utils/dedupe_set_test.cc \
   compiler/utils/swap_space_test.cc \
+  compiler/utils/test_dex_file_builder_test.cc \
   compiler/utils/arm/managed_register_arm_test.cc \
   compiler/utils/arm64/managed_register_arm64_test.cc \
   compiler/utils/x86/managed_register_x86_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index ac95abd..0ad77b4 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,6 +23,7 @@
 	dex/global_value_numbering.cc \
 	dex/gvn_dead_code_elimination.cc \
 	dex/local_value_numbering.cc \
+	dex/type_inference.cc \
 	dex/quick/arm/assemble_arm.cc \
 	dex/quick/arm/call_arm.cc \
 	dex/quick/arm/fp_arm.cc \
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b4559ef..c538d0b 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -15,7 +15,6 @@
  */
 
 #include "base/logging.h"
-#include "dataflow_iterator.h"
 #include "dataflow_iterator-inl.h"
 #include "dex/mir_field_info.h"
 #include "global_value_numbering.h"
@@ -260,10 +259,8 @@
       mir->ssa_rep = &ssa_reps_[i];
       mir->ssa_rep->num_uses = def->num_uses;
       mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->fp_use = nullptr;  // Not used by LVN.
       mir->ssa_rep->num_defs = def->num_defs;
       mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
       mir->dalvikInsn.opcode = def->opcode;
       mir->offset = i;  // LVN uses offset only for debug output
       mir->optimization_flags = 0u;
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index ec12221..d7f36f7 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -478,7 +478,7 @@
       mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR +  Instruction::ADD_INT);
 }
 
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
   int v_reg = mir_graph_->SRegToVReg(s_reg);
   MIR* phi = mir_graph_->NewMIR();
   phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -491,11 +491,9 @@
 
   mir_graph_->AllocateSSADefData(phi, 1);
   phi->ssa_rep->defs[0] = s_reg;
-  phi->ssa_rep->fp_def[0] = fp;
 
   size_t num_uses = bb_->predecessors.size();
   mir_graph_->AllocateSSAUseData(phi, num_uses);
-  std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
   size_t idx = 0u;
   for (BasicBlockId pred_id : bb_->predecessors) {
     BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
@@ -523,14 +521,12 @@
   // defining MIR for that dalvik reg, the preserved valus must come from its predecessors
   // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
   if (def_change == kNPos) {
-    bool fp = mir_to_kill->ssa_rep->fp_def[0];
     if (wide) {
       DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
-      DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
       DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
-      CreatePhi(new_s_reg + 1, fp);  // High word Phi.
+      CreatePhi(new_s_reg + 1);  // High word Phi.
     }
-    return CreatePhi(new_s_reg, fp);
+    return CreatePhi(new_s_reg);
   } else {
     DCHECK_LT(def_change, last_change);
     DCHECK_LE(last_change, vreg_chains_.NumMIRs());
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index 9a19f29..f2378f2 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -128,7 +128,7 @@
   void KillMIR(MIRData* data);
   static void KillMIR(MIR* mir);
   static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
-  MIR* CreatePhi(int s_reg, bool fp);
+  MIR* CreatePhi(int s_reg);
   MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
 
   // Update state variables going backwards through a MIR.
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527a..0393410 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -158,10 +158,8 @@
       mir->ssa_rep = &ssa_reps_[i];
       mir->ssa_rep->num_uses = def->num_uses;
       mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->fp_use = nullptr;  // Not used by LVN.
       mir->ssa_rep->num_defs = def->num_defs;
       mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->ssa_rep->fp_def = nullptr;  // Not used by LVN.
       mir->dalvikInsn.opcode = def->opcode;
       mir->offset = i;  // LVN uses offset only for debug output
       mir->optimization_flags = 0u;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 2a920a4..eaaf540 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -123,7 +123,7 @@
   DF_UA | DF_NULL_CHK_A | DF_REF_A,
 
   // 1F CHK_CAST vAA, type@BBBB
-  DF_UA | DF_REF_A | DF_UMS,
+  DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
 
   // 20 INSTANCE_OF vA, vB, type@CCCC
   DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
@@ -159,10 +159,10 @@
   DF_NOP,
 
   // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  DF_UA,
+  DF_UA | DF_CORE_A,
 
   // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  DF_UA,
+  DF_UA | DF_CORE_A,
 
   // 2D CMPL_FLOAT vAA, vBB, vCC
   DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
@@ -180,22 +180,22 @@
   DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
 
   // 32 IF_EQ vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 33 IF_NE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 34 IF_LT vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 35 IF_GE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 36 IF_GT vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 37 IF_LE vA, vB, +CCCC
-  DF_UA | DF_UB,
+  DF_UA | DF_UB | DF_SAME_TYPE_AB,
 
   // 38 IF_EQZ vAA, +BBBB
   DF_UA,
@@ -1080,8 +1080,6 @@
 
   if (mir->ssa_rep->num_uses_allocated < num_uses) {
     mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
-    // NOTE: will be filled in during type & size inference pass
-    mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
   }
 }
 
@@ -1090,7 +1088,6 @@
 
   if (mir->ssa_rep->num_defs_allocated < num_defs) {
     mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
-    mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
   }
 }
 
@@ -1287,35 +1284,27 @@
     if (df_attributes & DF_HAS_USES) {
       num_uses = 0;
       if (df_attributes & DF_UA) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
         if (df_attributes & DF_A_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
         }
       }
       if (df_attributes & DF_UB) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
         if (df_attributes & DF_B_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
         }
       }
       if (df_attributes & DF_UC) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
         HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
         if (df_attributes & DF_C_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
           HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
         }
       }
     }
     if (df_attributes & DF_HAS_DEFS) {
-      mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
       HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
       if (df_attributes & DF_A_WIDE) {
-        mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
         HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
       }
     }
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca56958..11773e7 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -179,6 +179,7 @@
   friend class GlobalValueNumberingTest;
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
+  friend class TypeInferenceTest;
 };
 
 class MirSFieldLoweringInfo : public MirFieldInfo {
@@ -254,6 +255,7 @@
   friend class GlobalValueNumberingTest;
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
+  friend class TypeInferenceTest;
 };
 
 }  // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4d34038..7d0729f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -695,9 +695,10 @@
   current_method_ = m_units_.size();
   current_offset_ = 0;
   // TODO: will need to snapshot stack image and use that as the mir context identification.
-  m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
-                     dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
-                     cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+  m_units_.push_back(new (arena_) DexCompilationUnit(
+      cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
+      current_code_item_, class_def_idx, method_idx, access_flags,
+      cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
   const uint16_t* code_ptr = current_code_item_->insns_;
   const uint16_t* code_end =
       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 85b1344..d7e4dd9 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -39,6 +39,7 @@
 class GlobalValueNumbering;
 class GvnDeadCodeElimination;
 class PassManager;
+class TypeInference;
 
 // Forward declaration.
 class MIRGraph;
@@ -64,6 +65,7 @@
   kNullTransferSrc0,     // Object copy src[0] -> dst.
   kNullTransferSrcN,     // Phi null check state transfer.
   kRangeCheckC,          // Range check of C.
+  kCheckCastA,           // Check cast of A.
   kFPA,
   kFPB,
   kFPC,
@@ -73,6 +75,7 @@
   kRefA,
   kRefB,
   kRefC,
+  kSameTypeAB,           // A and B have the same type but it can be core/ref/fp (IF_cc).
   kUsesMethodStar,       // Implicit use of Method*.
   kUsesIField,           // Accesses an instance field (IGET/IPUT).
   kUsesSField,           // Accesses a static field (SGET/SPUT).
@@ -101,6 +104,7 @@
 #define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
 #define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
 #define DF_RANGE_CHK_C          (UINT64_C(1) << kRangeCheckC)
+#define DF_CHK_CAST             (UINT64_C(1) << kCheckCastA)
 #define DF_FP_A                 (UINT64_C(1) << kFPA)
 #define DF_FP_B                 (UINT64_C(1) << kFPB)
 #define DF_FP_C                 (UINT64_C(1) << kFPC)
@@ -110,6 +114,7 @@
 #define DF_REF_A                (UINT64_C(1) << kRefA)
 #define DF_REF_B                (UINT64_C(1) << kRefB)
 #define DF_REF_C                (UINT64_C(1) << kRefC)
+#define DF_SAME_TYPE_AB         (UINT64_C(1) << kSameTypeAB)
 #define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
 #define DF_IFIELD               (UINT64_C(1) << kUsesIField)
 #define DF_SFIELD               (UINT64_C(1) << kUsesSField)
@@ -217,13 +222,11 @@
  */
 struct SSARepresentation {
   int32_t* uses;
-  bool* fp_use;
   int32_t* defs;
-  bool* fp_def;
-  int16_t num_uses_allocated;
-  int16_t num_defs_allocated;
-  int16_t num_uses;
-  int16_t num_defs;
+  uint16_t num_uses_allocated;
+  uint16_t num_defs_allocated;
+  uint16_t num_uses;
+  uint16_t num_defs;
 
   static uint32_t GetStartUseIndex(Instruction::Code opcode);
 };
@@ -334,7 +337,8 @@
     // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
     // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
     uint32_t sfield_lowering_info;
-    // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+    // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
+    // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
     uint32_t method_lowering_info;
   } meta;
 
@@ -647,6 +651,10 @@
    */
   void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
 
+  bool HasCheckCast() const {
+    return (merged_df_flags_ & DF_CHK_CAST) != 0u;
+  }
+
   bool HasFieldAccess() const {
     return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
   }
@@ -691,8 +699,16 @@
   void DoCacheMethodLoweringInfo();
 
   const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
-    DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
-    return method_lowering_infos_[mir->meta.method_lowering_info];
+    return GetMethodLoweringInfo(mir->meta.method_lowering_info);
+  }
+
+  const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
+    DCHECK_LT(lowering_info, method_lowering_infos_.size());
+    return method_lowering_infos_[lowering_info];
+  }
+
+  size_t GetMethodLoweringInfoCount() const {
+    return method_lowering_infos_.size();
   }
 
   void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -1073,7 +1089,9 @@
   bool EliminateNullChecksGate();
   bool EliminateNullChecks(BasicBlock* bb);
   void EliminateNullChecksEnd();
+  void InferTypesStart();
   bool InferTypes(BasicBlock* bb);
+  void InferTypesEnd();
   bool EliminateClassInitChecksGate();
   bool EliminateClassInitChecks(BasicBlock* bb);
   void EliminateClassInitChecksEnd();
@@ -1100,34 +1118,6 @@
     return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
   }
 
-  /*
-   * Type inference handling helpers.  Because Dalvik's bytecode is not fully typed,
-   * we have to do some work to figure out the sreg type.  For some operations it is
-   * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
-   * may never know the "real" type.
-   *
-   * We perform the type inference operation by using an iterative  walk over
-   * the graph, propagating types "defined" by typed opcodes to uses and defs in
-   * non-typed opcodes (such as MOVE).  The Setxx(index) helpers are used to set defined
-   * types on typed opcodes (such as ADD_INT).  The Setxx(index, is_xx) form is used to
-   * propagate types through non-typed opcodes such as PHI and MOVE.  The is_xx flag
-   * tells whether our guess of the type is based on a previously typed definition.
-   * If so, the defined type takes precedence.  Note that it's possible to have the same sreg
-   * show multiple defined types because dx treats constants as untyped bit patterns.
-   * The return value of the Setxx() helpers says whether or not the Setxx() action changed
-   * the current guess, and is used to know when to terminate the iterative walk.
-   */
-  bool SetFp(int index, bool is_fp);
-  bool SetFp(int index);
-  bool SetCore(int index, bool is_core);
-  bool SetCore(int index);
-  bool SetRef(int index, bool is_ref);
-  bool SetRef(int index);
-  bool SetWide(int index, bool is_wide);
-  bool SetWide(int index);
-  bool SetHigh(int index, bool is_high);
-  bool SetHigh(int index);
-
   bool PuntToInterpreter() {
     return punt_to_interpreter_;
   }
@@ -1252,7 +1242,6 @@
   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
 
   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
-  bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
 
  protected:
   int FindCommonParent(int block1, int block2);
@@ -1399,6 +1388,7 @@
       ArenaBitVector* work_live_vregs;
       ArenaBitVector** def_block_matrix;  // num_vregs x num_blocks_.
       ArenaBitVector** phi_node_blocks;  // num_vregs x num_blocks_.
+      TypeInference* ti;
     } ssa;
     // Global value numbering.
     struct {
@@ -1458,6 +1448,7 @@
   friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
   friend class TopologicalSortOrderTest;
+  friend class TypeInferenceTest;
   friend class QuickCFITest;
 };
 
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 7230c46..3706012 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -232,6 +232,7 @@
   int stats_flags_;
 
   friend class MirOptimizationTest;
+  friend class TypeInferenceTest;
 };
 
 }  // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9d7b4b4..546e67a 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -25,6 +25,7 @@
 #include "gvn_dead_code_elimination.h"
 #include "local_value_numbering.h"
 #include "mir_field_info.h"
+#include "type_inference.h"
 #include "quick/dex_file_method_inliner.h"
 #include "quick/dex_file_to_method_inliner_map.h"
 #include "stack.h"
@@ -574,7 +575,6 @@
               // Copy the SSA information that is relevant.
               mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
               mir_next->ssa_rep->uses = mir->ssa_rep->uses;
-              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
               mir_next->ssa_rep->num_defs = 0;
               mir->ssa_rep->num_uses = 0;
               mir->ssa_rep->num_defs = 0;
@@ -668,16 +668,7 @@
                 mir->ssa_rep->uses = src_ssa;
                 mir->ssa_rep->num_uses = 3;
               }
-              mir->ssa_rep->num_defs = 1;
-              mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
-              mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
-              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
-              // Match type of uses to def.
-              mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
-                                                              kArenaAllocDFInfo);
-              for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-                mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
-              }
+              AllocateSSADefData(mir, 1);
               /*
                * There is usually a Phi node in the join block for our two cases.  If the
                * Phi node only contains our two cases as input, we will use the result
@@ -1131,23 +1122,26 @@
   }
 }
 
+void MIRGraph::InferTypesStart() {
+  DCHECK(temp_scoped_alloc_ != nullptr);
+  temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
+}
+
 /*
  * Perform type and size inference for a basic block.
  */
 bool MIRGraph::InferTypes(BasicBlock* bb) {
   if (bb->data_flow_info == nullptr) return false;
 
-  bool infer_changed = false;
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-        continue;
-    }
+  DCHECK(temp_.ssa.ti != nullptr);
+  return temp_.ssa.ti->Apply(bb);
+}
 
-    // Propagate type info.
-    infer_changed = InferTypeAndSize(bb, mir, infer_changed);
-  }
-
-  return infer_changed;
+void MIRGraph::InferTypesEnd() {
+  DCHECK(temp_.ssa.ti != nullptr);
+  temp_.ssa.ti->Finish();
+  delete temp_.ssa.ti;
+  temp_.ssa.ti = nullptr;
 }
 
 bool MIRGraph::EliminateClassInitChecksGate() {
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index a8b8a54..b35bc3d 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -41,7 +41,7 @@
   pass_manager->AddPass(new SSAConversion);
   pass_manager->AddPass(new PhiNodeOperands);
   pass_manager->AddPass(new PerformInitRegLocations);
-  pass_manager->AddPass(new TypeInference);
+  pass_manager->AddPass(new TypeInferencePass);
   pass_manager->AddPass(new FinishSSATransformation);
 }
 
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 1ab8625..e9fa0eb 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -263,12 +263,19 @@
 };
 
 /**
- * @class TypeInference
+ * @class TypeInferencePass
  * @brief Type inference pass.
  */
-class TypeInference : public PassMEMirSsaRep {
+class TypeInferencePass : public PassMEMirSsaRep {
  public:
-  TypeInference() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+  TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+  }
+
+  void Start(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->InferTypesStart();
   }
 
   bool Worker(PassDataHolder* data) const {
@@ -280,6 +287,13 @@
     DCHECK(bb != nullptr);
     return c_unit->mir_graph->InferTypes(bb);
   }
+
+  void End(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph.get()->InferTypesEnd();
+  }
 };
 
 /**
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 818112d..ca31dbf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -753,6 +753,7 @@
   insn->dalvikInsn.opcode = Instruction::CONST;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = method.d.data;
+  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
   bb->InsertMIRAfter(move_result, insn);
   return true;
 }
@@ -791,6 +792,7 @@
   insn->dalvikInsn.opcode = opcode;
   insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
   insn->dalvikInsn.vB = arg;
+  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
   bb->InsertMIRAfter(move_result, insn);
   return true;
 }
@@ -913,6 +915,7 @@
     }
     move->dalvikInsn.vA = move_result->dalvikInsn.vA;
     move->dalvikInsn.vB = return_reg;
+    move->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
     bb->InsertMIRAfter(insn, move);
   }
   return true;
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
new file mode 100644
index 0000000..84cd69a
--- /dev/null
+++ b/compiler/dex/type_inference.cc
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_inference.h"
+
+#include "base/bit_vector-inl.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex_file-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "mir_field_info.h"
+#include "mir_graph.h"
+#include "mir_method_info.h"
+
+namespace art {
+
+inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
+  DCHECK_NE(array_depth, 0u);
+  return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
+              ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
+}
+
+inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
+  if (component_type.ArrayDepth() == 0u) {
+    return ArrayType(1u, component_type);
+  }
+  if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
+    return component_type;
+  }
+  return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart));  // array_depth + 1u;
+}
+
+TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
+  switch (shorty) {
+    case 'L':
+      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+    case 'D':
+      return Type(kFlagLowWord | kFlagWide | kFlagFp);
+    case 'J':
+      return Type(kFlagLowWord | kFlagWide | kFlagCore);
+    case 'F':
+      return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
+    default:
+      DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
+      return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
+  }
+}
+
+TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
+  const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
+  if (UNLIKELY(desc[0] == 'V')) {
+    return Unknown();
+  } else if (UNLIKELY(desc[0] == '[')) {
+    size_t array_depth = 0u;
+    while (*desc == '[') {
+      ++array_depth;
+      ++desc;
+    }
+    if (UNLIKELY(array_depth > kMaxArrayDepth)) {
+      LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
+          << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
+      array_depth = kMaxArrayDepth;
+    }
+    Type shorty_result = Type::ShortyType(desc[0]);
+    return ArrayType(array_depth, shorty_result);
+  } else {
+    return ShortyType(desc[0]);
+  }
+}
+
+bool TypeInference::Type::MergeArrayConflict(Type src_type) {
+  DCHECK(Ref());
+  DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
+  DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
+  bool size_conflict =
+      (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
+      (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
+  // Mark all three array type bits so that merging any other type bits will not change this type.
+  return Copy(Type((raw_bits_ & kMaskNonArray) |
+                   (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
+                   kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
+}
+
+bool TypeInference::Type::MergeStrong(Type src_type) {
+  bool changed = MergeNonArrayFlags(src_type);
+  if (src_type.ArrayDepth() != 0u) {
+    if (ArrayDepth() == 0u) {
+      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+      DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
+      raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
+      changed = true;
+    } else if (ArrayDepth() == src_type.ArrayDepth()) {
+      changed |= MergeBits(src_type, kMaskArrayWideAndType);
+    } else if (src_type.ArrayDepth() == 1u &&
+        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Source type is [L or [? but current type is at least [[, preserve it.
+    } else if (ArrayDepth() == 1u &&
+        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Overwrite [? or [L with the source array type which is at least [[.
+      raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
+      changed = true;
+    } else {
+      // Mark the array value type with conflict - both ref and fp.
+      changed |= MergeArrayConflict(src_type);
+    }
+  }
+  return changed;
+}
+
+bool TypeInference::Type::MergeWeak(Type src_type) {
+  bool changed = MergeNonArrayFlags(src_type);
+  if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
+    DCHECK_NE(src_type.ArrayDepth(), 0u);
+    if (ArrayDepth() == 0u) {
+      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+      // Preserve current type.
+    } else if (ArrayDepth() == src_type.ArrayDepth()) {
+      changed |= MergeBits(src_type, kMaskArrayWideAndType);
+    } else if (src_type.ArrayDepth() == 1u &&
+        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // Source type is [L or [? but current type is at least [[, preserve it.
+    } else if (ArrayDepth() == 1u &&
+        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+      // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
+      changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
+    } else {
+      // Mark the array value type with conflict - both ref and fp.
+      changed |= MergeArrayConflict(src_type);
+    }
+  }
+  return changed;
+}
+
+TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+    : mir_graph_(mir_graph),
+      alloc_(alloc),
+      num_blocks_(mir_graph->GetNumBlocks()),
+      num_sregs_(mir_graph->GetNumSSARegs()),
+      check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
+      split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
+}
+
+void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
+  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+  type.CheckPureRef();
+  int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+  num_sregs_ += 1;
+  check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type});  // NOLINT
+  int32_t s_reg = check_cast->ssa_rep->uses[0];
+  auto lb = split_sreg_data_.lower_bound(s_reg);
+  if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
+    SplitSRegData split_s_reg_data = {
+        0,
+        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+        new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
+    };
+    std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
+    std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
+    split_s_reg_data.def_phi_blocks_->ClearAllBits();
+    BasicBlock* def_bb = FindDefBlock(check_cast);
+    split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
+    split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
+    lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
+  }
+  lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
+  lb->second.def_phi_blocks_->SetBit(check_cast->bb);
+}
+
+void TypeInference::CheckCastData::AddPseudoPhis() {
+  // Look for pseudo-phis where a split SSA reg merges with a differently typed version
+  // and initialize all starting_mod_s_reg.
+  DCHECK(!split_sreg_data_.empty());
+  ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
+
+  for (auto& entry : split_sreg_data_) {
+    SplitSRegData& data = entry.second;
+
+    // Find pseudo-phi nodes.
+    phi_blocks->ClearAllBits();
+    ArenaBitVector* input_blocks = data.def_phi_blocks_;
+    do {
+      for (uint32_t idx : input_blocks->Indexes()) {
+        BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
+        if (def_bb->dom_frontier != nullptr) {
+          phi_blocks->Union(def_bb->dom_frontier);
+        }
+      }
+    } while (input_blocks->Union(phi_blocks));
+
+    // Find live pseudo-phis. Make sure they're merging the same SSA reg.
+    data.def_phi_blocks_->ClearAllBits();
+    int32_t s_reg = entry.first;
+    int v_reg = mir_graph_->SRegToVReg(s_reg);
+    for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
+      BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
+      DCHECK(phi_bb != nullptr);
+      DCHECK(phi_bb->data_flow_info != nullptr);
+      DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
+      if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
+        int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+        num_sregs_ += 1;
+        data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
+        data.def_phi_blocks_->SetBit(phi_bb_id);
+      }
+    }
+
+    // SSA rename for s_reg.
+    TopologicalSortIterator iter(mir_graph_);
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+      if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
+        continue;
+      }
+      BasicBlockId bb_id = bb->id;
+      if (data.def_phi_blocks_->IsBitSet(bb_id)) {
+        DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+      } else {
+        DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+        if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
+          // The earliest predecessor must have been processed already.
+          BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+          int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
+          data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
+        } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
+          // Start the original defining block with s_reg.
+          data.starting_mod_s_reg[bb_id] = s_reg;
+        }
+      }
+      if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
+        // If the block doesn't define the modified SSA reg, it propagates the starting type.
+        data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
+      }
+    }
+  }
+}
+
+void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
+  for (const auto& entry : check_cast_map_) {
+    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+    sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
+  }
+}
+
+void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
+  for (const auto& entry : check_cast_map_) {
+    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+    sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
+        sregs[entry.second.modified_s_reg].AsNull());
+  }
+}
+
+void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
+  for (auto& entry : split_sreg_data_) {
+    for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
+      bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
+    }
+  }
+}
+
+void TypeInference::CheckCastData::Start(BasicBlock* bb) {
+  for (auto& entry : split_sreg_data_) {
+    entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
+  }
+}
+
+bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
+  bool changed = false;
+  for (auto& entry : split_sreg_data_) {
+    DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
+    if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
+      int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
+      Type merged_type = sregs[entry.second.current_mod_s_reg];
+      for (BasicBlockId pred_id : bb->predecessors) {
+        DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+        merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
+      }
+      if (UNLIKELY(!merged_type.IsDefined())) {
+        // This can happen during an initial merge of a loop head if the original def is
+        // actually an untyped null. (All other definitions are typed using the check-cast.)
+      } else if (merged_type.Wide()) {
+        // Ignore the pseudo-phi, just remember that there's a size mismatch.
+        sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
+      } else {
+        DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
+        // Propagate both down (fully) and up (without the "non-null" flag).
+        changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
+        merged_type = merged_type.AsNull();
+        for (BasicBlockId pred_id : bb->predecessors) {
+          DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+          sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
+        }
+      }
+    }
+  }
+  return changed;
+}
+
+void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
+  auto mir_it = check_cast_map_.find(mir);
+  DCHECK(mir_it != check_cast_map_.end());
+  auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
+  DCHECK(sreg_it != split_sreg_data_.end());
+  sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
+}
+
+TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
+  auto it = split_sreg_data_.find(s_reg);
+  return (it == split_sreg_data_.end()) ? nullptr : &it->second;
+}
+
+BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
+  // Find the initial definition of the SSA reg used by the check-cast.
+  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+  int32_t s_reg = check_cast->ssa_rep->uses[0];
+  if (mir_graph_->IsInVReg(s_reg)) {
+    return mir_graph_->GetEntryBlock();
+  }
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
+  DCHECK(bb != nullptr);
+  while (true) {
+    // Find the earliest predecessor in the topological sort order to ensure we don't
+    // go in a loop.
+    BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+    DCHECK(pred_bb != nullptr);
+    DCHECK(pred_bb->data_flow_info != nullptr);
+    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+      // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
+      return bb;
+    }
+    bb = pred_bb;
+  }
+}
+
+BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
+  DCHECK(!bb->predecessors.empty());
+  const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
+  DCHECK_LT(bb->id, indexes.size());
+  size_t best_idx = indexes[bb->id];
+  BasicBlockId best_id = NullBasicBlockId;
+  for (BasicBlockId pred_id : bb->predecessors) {
+    DCHECK_LT(pred_id, indexes.size());
+    if (best_idx > indexes[pred_id]) {
+      best_idx = indexes[pred_id];
+      best_id = pred_id;
+    }
+  }
+  // There must be at least one predecessor earlier than the bb.
+  DCHECK_LT(best_idx, indexes[bb->id]);
+  return mir_graph_->GetBasicBlock(best_id);
+}
+
+bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
+  DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
+  DCHECK(bb != nullptr);
+  DCHECK(bb->data_flow_info != nullptr);
+  DCHECK(bb->data_flow_info->live_in_v != nullptr);
+  if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
+    return false;
+  }
+  for (BasicBlockId pred_id : bb->predecessors) {
+    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+    DCHECK(pred_bb != nullptr);
+    DCHECK(pred_bb->data_flow_info != nullptr);
+    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+    : mir_graph_(mir_graph),
+      cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
+      check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
+          InitializeCheckCastData(mir_graph, alloc)),
+      num_sregs_(
+          check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
+      ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
+          PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
+      sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
+          PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
+      signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
+          PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
+      current_method_signature_(
+          Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
+      sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
+      bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
+  InitializeSRegs();
+}
+
+bool TypeInference::Apply(BasicBlock* bb) {
+  bool changed = false;
+  uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
+  if (bb_df_attrs != 0u) {
+    if (UNLIKELY(check_cast_data_ != nullptr)) {
+      check_cast_data_->Start(bb);
+      if (bb_df_attrs & DF_NULL_TRANSFER_N) {
+        changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
+      }
+    }
+    MIR* mir = bb->first_mir_insn;
+    MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
+    for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
+        mir = mir->next) {
+      // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
+      // At least one input must have been previously processed. Look for the first
+      // occurrence of a high_word or low_word flag to determine the type.
+      size_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+      DCHECK_EQ(bb->predecessors.size(), num_uses);
+      Type merged_type = sregs_[defs[0]];
+      for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+        int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+        merged_type.MergeWeak(sregs_[input_mod_s_reg]);
+      }
+      if (UNLIKELY(!merged_type.IsDefined())) {
+        // No change
+      } else if (merged_type.HighWord()) {
+        // Ignore the high word phi, just remember if there's a size mismatch.
+        if (UNLIKELY(merged_type.LowWord())) {
+          sregs_[defs[0]].MarkSizeConflict();
+        }
+      } else {
+        // Propagate both down (fully) and up (without the "non-null" flag).
+        changed |= sregs_[defs[0]].Copy(merged_type);
+        merged_type = merged_type.AsNull();
+        for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+          int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+          changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
+        }
+      }
+    }
+
+    // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
+    for (; mir != main_mirs_end; mir = mir->next) {
+      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+      size_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+
+      // Special handling for moves. Propagate type both ways.
+      if ((attrs & DF_IS_MOVE) != 0) {
+        int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
+        int32_t defd_mod_s_reg = defs[0];
+
+        // The "non-null" flag is propagated only downwards from actual definitions and it's
+        // not initially marked for moves, so used sreg must be marked before defined sreg.
+        // The only exception is an inlined move where we know the type from the original invoke.
+        DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
+               (mir->optimization_flags & MIR_CALLEE) != 0);
+        changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
+
+        // The value is the same, so either both registers are null or no register is.
+        // In any case we can safely propagate the array type down.
+        changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
+        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
+          // Mark type conflict: move instead of move-object.
+          sregs_[used_mod_s_reg].MarkTypeConflict();
+        }
+        continue;
+      }
+
+      // Handle AGET/APUT.
+      if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
+        int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
+        int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
+        DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
+        if (!sregs_[base_mod_s_reg].NonNull()) {
+          // If the base is null, don't propagate anything. All that we could determine
+          // has already been merged in the previous stage.
+        } else {
+          changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
+          Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
+          if ((attrs & DF_DA) != 0) {
+            changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
+          } else {
+            changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
+          }
+        }
+        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
+          // Mark type conflict: aget/aput instead of aget/aput-object.
+          sregs_[mod_s_reg].MarkTypeConflict();
+        }
+        continue;
+      }
+
+      // Special-case handling for check-cast to advance modified SSA reg.
+      if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
+        DCHECK(check_cast_data_ != nullptr);
+        check_cast_data_->ProcessCheckCast(mir);
+      }
+    }
+
+    // Propagate types for IF_cc if present.
+    if (mir != nullptr) {
+      DCHECK(mir == bb->last_mir_insn);
+      DCHECK(mir->next == nullptr);
+      DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
+      DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
+      const int32_t* uses = mir->ssa_rep->uses;
+      int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
+      int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
+      changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
+      changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
+    }
+  }
+  return changed;
+}
+
+void TypeInference::Finish() {
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->MergeCheckCastConflicts(sregs_);
+  }
+
+  size_t num_sregs = mir_graph_->GetNumSSARegs();  // Without the extra SSA regs.
+  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+    if (sregs_[s_reg].SizeConflict()) {
+      /*
+       * The dex bytecode definition does not explicitly outlaw the definition of the same
+       * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
+       * does not generate this pattern (at least recently).  Further, in the next revision of
+       * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
+       * and punt to the interpreter.
+       */
+      LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                   << " has size conflict block for sreg " << s_reg
+                   << ", punting to interpreter.";
+      mir_graph_->SetPuntToInterpreter(true);
+      return;
+    }
+  }
+
+  size_t conflict_s_reg = 0;
+  bool type_conflict = false;
+  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+    Type type = sregs_[s_reg];
+    RegLocation* loc = &mir_graph_->reg_location_[s_reg];
+    loc->wide = type.Wide();
+    loc->defined = type.IsDefined();
+    loc->fp = type.Fp();
+    loc->core = type.Core();
+    loc->ref = type.Ref();
+    loc->high_word = type.HighWord();
+    if (UNLIKELY(type.TypeConflict())) {
+      type_conflict = true;
+      conflict_s_reg = s_reg;
+    }
+  }
+
+  if (type_conflict) {
+    /*
+     * We don't normally expect to see a Dalvik register definition used both as a
+     * floating point and core value, though technically it could happen with constants.
+     * Until we have proper typing, detect this situation and disable register promotion
+     * (which relies on the distinction between core a fp usages).
+     */
+    LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                 << " has type conflict block for sreg " << conflict_s_reg
+                 << ", disabling register promotion.";
+    cu_->disable_opt |= (1 << kPromoteRegs);
+  }
+}
+
+TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
+  uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
+  Type result = Type::DexType(dex_file, type_idx);
+  return result;
+}
+
+TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
+                                                       MIRGraph* mir_graph,
+                                                       ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetIFieldLoweringInfoCount();
+  Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
+    const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
+    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+    uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
+    ifields[i] = FieldType(current_dex_file, field_idx);
+    DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
+    DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
+  }
+  return ifields;
+}
+
+TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
+                                                       MIRGraph* mir_graph,
+                                                       ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetSFieldLoweringInfoCount();
+  Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // FieldIndex() is always valid for static fields (no quickened instructions).
+    sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
+  }
+  return sfields;
+}
+
+TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
+                                                        uint32_t method_idx,
+                                                        bool is_static,
+                                                        ScopedArenaAllocator* alloc) {
+  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+  Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
+  const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
+  size_t this_size = (is_static ? 0u : 1u);
+  size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
+  size_t size = this_size + param_size;
+  Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
+  if (!is_static) {
+    param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
+  }
+  for (size_t i = 0; i != param_size; ++i)  {
+    uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
+    param_types[this_size + i] = Type::DexType(dex_file, type_idx);
+  }
+  return MethodSignature{ return_type, size, param_types };  // NOLINT
+}
+
+TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
+                                                                 MIRGraph* mir_graph,
+                                                                 ScopedArenaAllocator* alloc) {
+  size_t count = mir_graph->GetMethodLoweringInfoCount();
+  MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
+  for (uint32_t i = 0u; i != count; ++i) {
+    // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
+    const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
+    uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
+    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+    signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
+  }
+  return signatures;
+}
+
+TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
+                                                                     ScopedArenaAllocator* alloc) {
+  if (!mir_graph->HasCheckCast()) {
+    return nullptr;
+  }
+
+  CheckCastData* data = nullptr;
+  const DexFile* dex_file = nullptr;
+  PreOrderDfsIterator iter(mir_graph);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
+        if (data == nullptr) {
+          data = new (alloc) CheckCastData(mir_graph, alloc);
+          dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
+        }
+        Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
+        data->AddCheckCast(mir, type);
+      }
+    }
+  }
+  if (data != nullptr) {
+    data->AddPseudoPhis();
+  }
+  return data;
+}
+
+void TypeInference::InitializeSRegs() {
+  std::fill_n(sregs_, num_sregs_, Type::Unknown());
+
+  // Initialize parameter SSA regs at method entry.
+  int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
+  for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i)  {
+    Type param_type = current_method_signature_.param_types[i].AsNonNull();
+    sregs_[entry_param_s_reg] = param_type;
+    entry_param_s_reg += param_type.Wide() ? 2 : 1;
+  }
+  DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
+            mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
+
+  // Initialize check-cast types.
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->InitializeCheckCastSRegs(sregs_);
+  }
+
+  // Initialize well-known SSA register definition types. Merge inferred types
+  // upwards where a single merge is enough (INVOKE arguments and return type,
+  // RETURN type, IPUT/SPUT source type).
+  // NOTE: Using topological sort order to make sure the definition comes before
+  // any upward merging. This allows simple assignment of the defined types
+  // instead of MergeStrong().
+  TopologicalSortIterator iter(mir_graph_);
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+    uint64_t bb_df_attrs = 0u;
+    if (UNLIKELY(check_cast_data_ != nullptr)) {
+      check_cast_data_->Start(bb);
+    }
+    // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+      bb_df_attrs |= attrs;
+
+      const uint32_t num_uses = mir->ssa_rep->num_uses;
+      const int32_t* uses = mir->ssa_rep->uses;
+      const int32_t* defs = mir->ssa_rep->defs;
+
+      uint16_t opcode = mir->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::CONST_4:
+        case Instruction::CONST_16:
+        case Instruction::CONST:
+        case Instruction::CONST_HIGH16:
+        case Instruction::CONST_WIDE_16:
+        case Instruction::CONST_WIDE_32:
+        case Instruction::CONST_WIDE:
+        case Instruction::CONST_WIDE_HIGH16:
+        case Instruction::MOVE:
+        case Instruction::MOVE_FROM16:
+        case Instruction::MOVE_16:
+        case Instruction::MOVE_WIDE:
+        case Instruction::MOVE_WIDE_FROM16:
+        case Instruction::MOVE_WIDE_16:
+        case Instruction::MOVE_OBJECT:
+        case Instruction::MOVE_OBJECT_FROM16:
+        case Instruction::MOVE_OBJECT_16:
+          if ((mir->optimization_flags & MIR_CALLEE) != 0) {
+            // Inlined const/move keeps method_lowering_info for type inference.
+            DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
+            Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
+            DCHECK(return_type.IsDefined());  // Method return type can't be void.
+            sregs_[defs[0]] = return_type.AsNonNull();
+            if (return_type.Wide()) {
+              DCHECK_EQ(defs[0] + 1, defs[1]);
+              sregs_[defs[1]] = return_type.ToHighWord();
+            }
+            break;
+          }
+          FALLTHROUGH_INTENDED;
+        case kMirOpPhi:
+          // These cannot be determined in this simple pass and will be processed later.
+          break;
+
+        case Instruction::MOVE_RESULT:
+        case Instruction::MOVE_RESULT_WIDE:
+        case Instruction::MOVE_RESULT_OBJECT:
+          // Nothing to do, handled with invoke-* or filled-new-array/-range.
+          break;
+        case Instruction::MOVE_EXCEPTION:
+          // NOTE: We can never catch an array.
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CONST_STRING:
+        case Instruction::CONST_STRING_JUMBO:
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CONST_CLASS:
+          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+          break;
+        case Instruction::CHECK_CAST:
+          DCHECK(check_cast_data_ != nullptr);
+          check_cast_data_->ProcessCheckCast(mir);
+          break;
+        case Instruction::ARRAY_LENGTH:
+          sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
+          break;
+        case Instruction::NEW_INSTANCE:
+          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
+          DCHECK(sregs_[defs[0]].Ref());
+          DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
+          break;
+        case Instruction::NEW_ARRAY:
+          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
+          DCHECK(sregs_[defs[0]].Ref());
+          DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
+          break;
+        case Instruction::FILLED_NEW_ARRAY:
+        case Instruction::FILLED_NEW_ARRAY_RANGE: {
+          Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
+          array_type.CheckPureRef();  // Previously checked by the method verifier.
+          DCHECK_NE(array_type.ArrayDepth(), 0u);
+          Type component_type = array_type.ComponentType();
+          DCHECK(!component_type.Wide());
+          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+          if (move_result_mir != nullptr) {
+            DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
+            sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
+          }
+          DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
+          for (size_t next = 0u; next != num_uses; ++next) {
+            int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
+            sregs_[input_mod_s_reg].MergeStrong(component_type);
+          }
+          break;
+        }
+        case Instruction::INVOKE_VIRTUAL:
+        case Instruction::INVOKE_SUPER:
+        case Instruction::INVOKE_DIRECT:
+        case Instruction::INVOKE_STATIC:
+        case Instruction::INVOKE_INTERFACE:
+        case Instruction::INVOKE_VIRTUAL_RANGE:
+        case Instruction::INVOKE_SUPER_RANGE:
+        case Instruction::INVOKE_DIRECT_RANGE:
+        case Instruction::INVOKE_STATIC_RANGE:
+        case Instruction::INVOKE_INTERFACE_RANGE:
+        case Instruction::INVOKE_VIRTUAL_QUICK:
+        case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+          const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
+          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+          if (move_result_mir != nullptr) {
+            Type return_type = signature->return_type;
+            sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
+            if (return_type.Wide()) {
+              DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
+              sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
+            }
+          }
+          size_t next = 0u;
+          for (size_t i = 0, size = signature->num_params; i != size; ++i)  {
+            Type param_type = signature->param_types[i];
+            int32_t param_s_reg = ModifiedSReg(uses[next]);
+            DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
+            UpdateSRegFromLowWordType(param_s_reg, param_type);
+            next += param_type.Wide() ? 2 : 1;
+          }
+          DCHECK_EQ(next, num_uses);
+          DCHECK_EQ(next, mir->dalvikInsn.vA);
+          break;
+        }
+
+        case Instruction::RETURN_WIDE:
+          DCHECK(current_method_signature_.return_type.Wide());
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::RETURN:
+        case Instruction::RETURN_OBJECT: {
+          int32_t mod_s_reg = ModifiedSReg(uses[0]);
+          UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
+          break;
+        }
+
+        // NOTE: For AGET/APUT we set only the array type. The operand type is set
+        // below based on the data flow attributes.
+        case Instruction::AGET:
+        case Instruction::APUT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
+          break;
+        case Instruction::AGET_WIDE:
+        case Instruction::APUT_WIDE:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
+          break;
+        case Instruction::AGET_OBJECT:
+          sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
+          FALLTHROUGH_INTENDED;
+        case Instruction::APUT_OBJECT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
+          break;
+        case Instruction::AGET_BOOLEAN:
+        case Instruction::APUT_BOOLEAN:
+        case Instruction::AGET_BYTE:
+        case Instruction::APUT_BYTE:
+        case Instruction::AGET_CHAR:
+        case Instruction::APUT_CHAR:
+        case Instruction::AGET_SHORT:
+        case Instruction::APUT_SHORT:
+          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
+          break;
+
+        case Instruction::IGET_WIDE:
+        case Instruction::IGET_WIDE_QUICK:
+          DCHECK_EQ(defs[0] + 1, defs[1]);
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
+          FALLTHROUGH_INTENDED;
+        case Instruction::IGET:
+        case Instruction::IGET_OBJECT:
+        case Instruction::IGET_BOOLEAN:
+        case Instruction::IGET_BYTE:
+        case Instruction::IGET_CHAR:
+        case Instruction::IGET_SHORT:
+        case Instruction::IGET_QUICK:
+        case Instruction::IGET_OBJECT_QUICK:
+        case Instruction::IGET_BOOLEAN_QUICK:
+        case Instruction::IGET_BYTE_QUICK:
+        case Instruction::IGET_CHAR_QUICK:
+        case Instruction::IGET_SHORT_QUICK:
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
+          break;
+        case Instruction::IPUT_WIDE:
+        case Instruction::IPUT_WIDE_QUICK:
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::IPUT:
+        case Instruction::IPUT_OBJECT:
+        case Instruction::IPUT_BOOLEAN:
+        case Instruction::IPUT_BYTE:
+        case Instruction::IPUT_CHAR:
+        case Instruction::IPUT_SHORT:
+        case Instruction::IPUT_QUICK:
+        case Instruction::IPUT_OBJECT_QUICK:
+        case Instruction::IPUT_BOOLEAN_QUICK:
+        case Instruction::IPUT_BYTE_QUICK:
+        case Instruction::IPUT_CHAR_QUICK:
+        case Instruction::IPUT_SHORT_QUICK:
+          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+                                    ifields_[mir->meta.ifield_lowering_info]);
+          break;
+        case Instruction::SGET_WIDE:
+          DCHECK_EQ(defs[0] + 1, defs[1]);
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
+          FALLTHROUGH_INTENDED;
+        case Instruction::SGET:
+        case Instruction::SGET_OBJECT:
+        case Instruction::SGET_BOOLEAN:
+        case Instruction::SGET_BYTE:
+        case Instruction::SGET_CHAR:
+        case Instruction::SGET_SHORT:
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
+          break;
+        case Instruction::SPUT_WIDE:
+          DCHECK_EQ(uses[0] + 1, uses[1]);
+          FALLTHROUGH_INTENDED;
+        case Instruction::SPUT:
+        case Instruction::SPUT_OBJECT:
+        case Instruction::SPUT_BOOLEAN:
+        case Instruction::SPUT_BYTE:
+        case Instruction::SPUT_CHAR:
+        case Instruction::SPUT_SHORT:
+          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+                                          sfields_[mir->meta.sfield_lowering_info]);
+          break;
+
+        default:
+          // No invokes or reference definitions here.
+          DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
+          DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
+          break;
+      }
+
+      if ((attrs & DF_NULL_TRANSFER_N) != 0) {
+        // Don't process Phis at this stage.
+        continue;
+      }
+
+      // Handle defs
+      if (attrs & DF_DA) {
+        int32_t s_reg = defs[0];
+        sregs_[s_reg].SetLowWord();
+        if (attrs & DF_FP_A) {
+          sregs_[s_reg].SetFp();
+        }
+        if (attrs & DF_CORE_A) {
+          sregs_[s_reg].SetCore();
+        }
+        if (attrs & DF_REF_A) {
+          sregs_[s_reg].SetRef();
+        }
+        if (attrs & DF_A_WIDE) {
+          sregs_[s_reg].SetWide();
+          DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
+          sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
+        } else {
+          sregs_[s_reg].SetNarrow();
+        }
+      }
+
+      // Handles uses
+      size_t next = 0;
+  #define PROCESS(REG)                                                        \
+      if (attrs & DF_U##REG) {                                                \
+        int32_t mod_s_reg = ModifiedSReg(uses[next]);                         \
+        sregs_[mod_s_reg].SetLowWord();                                       \
+        if (attrs & DF_FP_##REG) {                                            \
+          sregs_[mod_s_reg].SetFp();                                          \
+        }                                                                     \
+        if (attrs & DF_CORE_##REG) {                                          \
+          sregs_[mod_s_reg].SetCore();                                        \
+        }                                                                     \
+        if (attrs & DF_REF_##REG) {                                           \
+          sregs_[mod_s_reg].SetRef();                                         \
+        }                                                                     \
+        if (attrs & DF_##REG##_WIDE) {                                        \
+          sregs_[mod_s_reg].SetWide();                                        \
+          DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1]));             \
+          sregs_[mod_s_reg + 1].SetWide();                                    \
+          sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);             \
+          next += 2;                                                          \
+        } else {                                                              \
+          sregs_[mod_s_reg].SetNarrow();                                      \
+          next++;                                                             \
+        }                                                                     \
+      }
+      PROCESS(A)
+      PROCESS(B)
+      PROCESS(C)
+  #undef PROCESS
+      DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
+    }
+    // Record relevant attributes.
+    bb_df_attrs_[bb->id] = bb_df_attrs &
+        (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
+  }
+
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
+  }
+}
+
+int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+    if (UNLIKELY(split_data != nullptr)) {
+      DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
+      return split_data->current_mod_s_reg;
+    }
+  }
+  return s_reg;
+}
+
+int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
+  DCHECK_LT(pred_idx, bb->predecessors.size());
+  if (UNLIKELY(check_cast_data_ != nullptr)) {
+    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+    if (UNLIKELY(split_data != nullptr)) {
+      return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
+    }
+  }
+  return s_reg;
+}
+
+bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
+  DCHECK(low_word_type.LowWord());
+  bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
+  if (!sregs_[mod_s_reg].Narrow()) {  // Wide without conflict with narrow.
+    DCHECK(!low_word_type.Narrow());
+    DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs());  // Original SSA reg.
+    changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
+  }
+  return changed;
+}
+
+}  // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
new file mode 100644
index 0000000..c9b29bf
--- /dev/null
+++ b/compiler/dex/type_inference.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
+#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
+
+#include "base/logging.h"
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+struct CompilationUnit;
+class DexFile;
+class MirFieldInfo;
+class MirMethodInfo;
+class MIR;
+class MIRGraph;
+
+/**
+ * @brief Determine the type of SSA registers.
+ *
+ * @details
+ * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
+ * out the sreg type.  For some operations it is clear based on the opcode (i.e.
+ * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
+ *
+ * We perform the type inference operation in two phases:
+ *   1. First, we make one pass over all insns in the topological sort order and
+ *      extract known type information from all insns for their defs and uses.
+ *   2. Then we repeatedly go through the graph to process insns that can propagate
+ *      types from inputs to outputs and vice versa. These insns are just the MOVEs,
+ *      AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
+ *
+ * Since the main purpose is to determine the basic FP/core/reference type, we don't
+ * need to record the precise reference type, we only record the array type to determine
+ * the result types of agets and source type of aputs.
+ *
+ * One complication is the check-cast instruction that effectively defines a new
+ * virtual register that has a different type than the original sreg. We need to
+ * track these virtual sregs and insert pseudo-phis where they merge.
+ *
+ * Another problems is with null references. The same zero constant can be used
+ * as differently typed null and moved around with move-object which would normally
+ * be an ill-formed assignment. So we need to keep track of values that can be null
+ * and values that cannot.
+ *
+ * Note that it's possible to have the same sreg show multiple defined types because dx
+ * treats constants as untyped bit patterns. We disable register promotion in that case.
+ */
+class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+  TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+  bool Apply(BasicBlock* bb);
+  void Finish();
+
+ private:
+  struct Type {
+    static Type Unknown() {
+      return Type(0u);
+    }
+
+    static Type NonArrayRefType() {
+      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+    }
+
+    static Type ObjectArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
+    }
+
+    static Type WideArrayType() {
+      // Core or FP unknown.
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayWide);
+    }
+
+    static Type NarrowArrayType() {
+      // Core or FP unknown.
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
+    }
+
+    static Type NarrowCoreArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
+    }
+
+    static Type UnknownArrayType() {
+      return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
+    }
+
+    static Type ArrayType(uint32_t array_depth, Type nested_type);
+    static Type ArrayTypeFromComponent(Type component_type);
+    static Type ShortyType(char shorty);
+    static Type DexType(const DexFile* dex_file, uint32_t type_idx);
+
+    bool IsDefined() {
+      return raw_bits_ != 0u;
+    }
+
+    bool SizeConflict() const {
+      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+      return (Wide() && Narrow()) || (HighWord() && LowWord());
+    }
+
+    bool TypeConflict() const {
+      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+      return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType);  // 2+ bits.
+    }
+
+    void MarkSizeConflict() {
+      SetBits(kFlagLowWord | kFlagHighWord);
+    }
+
+    void MarkTypeConflict() {
+      // Mark all three type bits so that merging any other type bits will not change this type.
+      SetBits(kFlagFp | kFlagCore | kFlagRef);
+    }
+
+    void CheckPureRef() const {
+      DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
+    }
+
+    // If reference, don't treat as possible null and require precise type.
+    //
+    // References without this flag are allowed to have a type conflict and their
+    // type will not be propagated down. However, for simplicity we allow propagation
+    // of other flags up as it will affect only other null references; should those
+    // references be marked non-null later, we would have to do it anyway.
+    // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
+    // to simplify merging together with other non-array flags.
+    bool NonNull() const {
+      return IsBitSet(kFlagNonNull);
+    }
+
+    bool Wide() const {
+      return IsBitSet(kFlagWide);
+    }
+
+    bool Narrow() const {
+      return IsBitSet(kFlagNarrow);
+    }
+
+    bool Fp() const {
+      return IsBitSet(kFlagFp);
+    }
+
+    bool Core() const {
+      return IsBitSet(kFlagCore);
+    }
+
+    bool Ref() const {
+      return IsBitSet(kFlagRef);
+    }
+
+    bool LowWord() const {
+      return IsBitSet(kFlagLowWord);
+    }
+
+    bool HighWord() const {
+      return IsBitSet(kFlagHighWord);
+    }
+
+    uint32_t ArrayDepth() const {
+      return raw_bits_ >> kBitArrayDepthStart;
+    }
+
+    Type NestedType() const {
+      DCHECK_NE(ArrayDepth(), 0u);
+      return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
+    }
+
+    Type ComponentType() const {
+      DCHECK_NE(ArrayDepth(), 0u);
+      Type temp(raw_bits_ - (1u << kBitArrayDepthStart));  // array_depth - 1u;
+      return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
+    }
+
+    void SetWide() {
+      SetBits(kFlagWide);
+    }
+
+    void SetNarrow() {
+      SetBits(kFlagNarrow);
+    }
+
+    void SetFp() {
+      SetBits(kFlagFp);
+    }
+
+    void SetCore() {
+      SetBits(kFlagCore);
+    }
+
+    void SetRef() {
+      SetBits(kFlagRef);
+    }
+
+    void SetLowWord() {
+      SetBits(kFlagLowWord);
+    }
+
+    void SetHighWord() {
+      SetBits(kFlagHighWord);
+    }
+
+    Type ToHighWord() const {
+      DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
+      return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
+    }
+
+    bool MergeHighWord(Type low_word_type) {
+      // NOTE: low_word_type may be also Narrow() or HighWord().
+      DCHECK(low_word_type.Wide() && low_word_type.LowWord());
+      return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
+                       kMaskWideAndType | kFlagHighWord);
+    }
+
+    bool Copy(Type type) {
+      if (raw_bits_ != type.raw_bits_) {
+        raw_bits_ = type.raw_bits_;
+        return true;
+      }
+      return false;
+    }
+
+    // Merge non-array flags.
+    bool MergeNonArrayFlags(Type src_type) {
+      return MergeBits(src_type, kMaskNonArray);
+    }
+
+    // Merge array flags for conflict.
+    bool MergeArrayConflict(Type src_type);
+
+    // Merge all flags.
+    bool MergeStrong(Type src_type);
+
+    // Merge all flags.
+    bool MergeWeak(Type src_type);
+
+    // Get the same type but mark that it should not be treated as null.
+    Type AsNonNull() const {
+      return Type(raw_bits_ | kFlagNonNull);
+    }
+
+    // Get the same type but mark that it can be treated as null.
+    Type AsNull() const {
+      return Type(raw_bits_ & ~kFlagNonNull);
+    }
+
+   private:
+    enum FlagBits {
+      kBitNonNull = 0,
+      kBitWide,
+      kBitNarrow,
+      kBitFp,
+      kBitCore,
+      kBitRef,
+      kBitLowWord,
+      kBitHighWord,
+      kBitArrayWide,
+      kBitArrayNarrow,
+      kBitArrayFp,
+      kBitArrayCore,
+      kBitArrayRef,
+      kBitArrayDepthStart,
+    };
+    static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
+
+    static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
+    static constexpr uint32_t kFlagWide = 1u << kBitWide;
+    static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
+    static constexpr uint32_t kFlagFp = 1u << kBitFp;
+    static constexpr uint32_t kFlagCore = 1u << kBitCore;
+    static constexpr uint32_t kFlagRef = 1u << kBitRef;
+    static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
+    static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
+    static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
+    static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
+    static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
+    static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
+    static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
+
+    static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
+    static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
+    static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
+    static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
+    static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
+    static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
+    static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
+
+    static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
+    static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
+    static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
+    static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
+    static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
+    static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
+
+    static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
+    static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
+
+    // The maximum representable array depth. If we exceed the maximum (which can happen
+    // only with an absurd nested array type in a dex file which would presumably cause
+    // OOM while being resolved), we can report false conflicts.
+    static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
+
+    explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
+
+    bool IsBitSet(uint32_t flag) const {
+      return (raw_bits_ & flag) != 0u;
+    }
+
+    void SetBits(uint32_t flags) {
+      raw_bits_ |= flags;
+    }
+
+    bool MergeBits(Type src_type, uint32_t mask) {
+      uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
+      if (new_bits != raw_bits_) {
+        raw_bits_ = new_bits;
+        return true;
+      }
+      return false;
+    }
+
+    uint32_t raw_bits_;
+  };
+
+  struct MethodSignature {
+    Type return_type;
+    size_t num_params;
+    Type* param_types;
+  };
+
+  struct SplitSRegData {
+    int32_t current_mod_s_reg;
+    int32_t* starting_mod_s_reg;        // Indexed by BasicBlock::id.
+    int32_t* ending_mod_s_reg;          // Indexed by BasicBlock::id.
+
+    // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
+    // with check-casts and the block with the original SSA reg.
+    // After AddPseudoPhis(), it marks blocks with pseudo-phis.
+    ArenaBitVector* def_phi_blocks_;    // Indexed by BasicBlock::id.
+  };
+
+  class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
+   public:
+    CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+    size_t NumSRegs() const {
+      return num_sregs_;
+    }
+
+    void AddCheckCast(MIR* check_cast, Type type);
+    void AddPseudoPhis();
+    void InitializeCheckCastSRegs(Type* sregs) const;
+    void MergeCheckCastConflicts(Type* sregs) const;
+    void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
+
+    void Start(BasicBlock* bb);
+    bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
+    void ProcessCheckCast(MIR* mir);
+
+    SplitSRegData* GetSplitSRegData(int32_t s_reg);
+
+   private:
+    BasicBlock* FindDefBlock(MIR* check_cast);
+    BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
+    bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
+
+    MIRGraph* const mir_graph_;
+    ScopedArenaAllocator* const alloc_;
+    const size_t num_blocks_;
+    size_t num_sregs_;
+
+    // Map check-cast mir to special sreg and type.
+    struct CheckCastMapValue {
+      int32_t modified_s_reg;
+      Type type;
+    };
+    ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
+    ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
+  };
+
+  static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
+  static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+                                  ScopedArenaAllocator* alloc);
+  static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+                                  ScopedArenaAllocator* alloc);
+  static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
+                                   ScopedArenaAllocator* alloc);
+  static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
+                                            ScopedArenaAllocator* alloc);
+  static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+  void InitializeSRegs();
+
+  int32_t ModifiedSReg(int32_t s_reg);
+  int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
+
+  bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
+
+  MIRGraph* const mir_graph_;
+  CompilationUnit* const cu_;
+
+  // The type inference propagates types also backwards but this must not happen across
+  // check-cast. So we need to effectively split an SSA reg into two at check-cast and
+  // keep track of the types separately.
+  std::unique_ptr<CheckCastData> check_cast_data_;
+
+  size_t num_sregs_;      // Number of SSA regs or modified SSA regs, see check-cast.
+  const Type* const ifields_;                 // Indexed by MIR::meta::ifield_lowering_info.
+  const Type* const sfields_;                 // Indexed by MIR::meta::sfield_lowering_info.
+  const MethodSignature* const signatures_;   // Indexed by MIR::meta::method_lowering_info.
+  const MethodSignature current_method_signature_;
+  Type* const sregs_;     // Indexed by SSA reg or modified SSA reg, see check-cast.
+  uint64_t* const bb_df_attrs_;               // Indexed by BasicBlock::id.
+
+  friend class TypeInferenceTest;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
new file mode 100644
index 0000000..54b5747f
--- /dev/null
+++ b/compiler/dex/type_inference_test.cc
@@ -0,0 +1,2042 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex/mir_field_info.h"
+#include "dex/mir_graph.h"
+#include "driver/dex_compilation_unit.h"
+#include "gtest/gtest.h"
+#include "type_inference.h"
+#include "utils/test_dex_file_builder.h"
+
+namespace art {
+
+class TypeInferenceTest : public testing::Test {
+ protected:
+  struct TypeDef {
+    const char* descriptor;
+  };
+
+  struct FieldDef {
+    const char* class_descriptor;
+    const char* type;
+    const char* name;
+  };
+
+  struct MethodDef {
+    const char* class_descriptor;
+    const char* signature;
+    const char* name;
+    InvokeType type;
+  };
+
+  struct BBDef {
+    static constexpr size_t kMaxSuccessors = 4;
+    static constexpr size_t kMaxPredecessors = 4;
+
+    BBType type;
+    size_t num_successors;
+    BasicBlockId successors[kMaxPredecessors];
+    size_t num_predecessors;
+    BasicBlockId predecessors[kMaxPredecessors];
+  };
+
+  struct MIRDef {
+    static constexpr size_t kMaxSsaDefs = 2;
+    static constexpr size_t kMaxSsaUses = 4;
+
+    BasicBlockId bbid;
+    Instruction::Code opcode;
+    int64_t value;
+    uint32_t metadata;
+    size_t num_uses;
+    int32_t uses[kMaxSsaUses];
+    size_t num_defs;
+    int32_t defs[kMaxSsaDefs];
+  };
+
+#define DEF_SUCC0() \
+    0u, { }
+#define DEF_SUCC1(s1) \
+    1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+    2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+    3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+    4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+    0u, { }
+#define DEF_PRED1(p1) \
+    1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+    2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+    3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+    4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+    { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE0(bb, opcode, method_idx) \
+    { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
+    { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
+#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
+    { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
+#define DEF_IFZ(bb, opcode, reg) \
+    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
+#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
+#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
+#define DEF_CHECK_CAST(bb, opcode, reg, type) \
+    { bb, opcode, 0, type, 1, { reg }, 0, { } }
+#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
+    { bb, opcode, 0, type, 1, { length }, 1, { reg } }
+
+  void AddTypes(const TypeDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const TypeDef* def = &defs[i];
+      dex_file_builder_.AddType(def->descriptor);
+    }
+  }
+
+  template <size_t count>
+  void PrepareTypes(const TypeDef (&defs)[count]) {
+    type_defs_ = defs;
+    type_count_ = count;
+    AddTypes(defs, count);
+  }
+
+  void AddFields(const FieldDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const FieldDef* def = &defs[i];
+      dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
+    }
+  }
+
+  template <size_t count>
+  void PrepareIFields(const FieldDef (&defs)[count]) {
+    ifield_defs_ = defs;
+    ifield_count_ = count;
+    AddFields(defs, count);
+  }
+
+  template <size_t count>
+  void PrepareSFields(const FieldDef (&defs)[count]) {
+    sfield_defs_ = defs;
+    sfield_count_ = count;
+    AddFields(defs, count);
+  }
+
+  void AddMethods(const MethodDef* defs, size_t count) {
+    for (size_t i = 0; i != count; ++i) {
+      const MethodDef* def = &defs[i];
+      dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
+    }
+  }
+
+  template <size_t count>
+  void PrepareMethods(const MethodDef (&defs)[count]) {
+    method_defs_ = defs;
+    method_count_ = count;
+    AddMethods(defs, count);
+  }
+
+  DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
+    switch (descriptor[0]) {
+      case 'I':
+      case 'F':
+        return kDexMemAccessWord;
+      case 'J':
+      case 'D':
+        return kDexMemAccessWide;
+      case '[':
+      case 'L':
+        return kDexMemAccessObject;
+      case 'Z':
+        return kDexMemAccessBoolean;
+      case 'B':
+        return kDexMemAccessByte;
+      case 'C':
+        return kDexMemAccessChar;
+      case 'S':
+        return kDexMemAccessShort;
+      default:
+        LOG(FATAL) << "Bad descriptor: " << descriptor;
+        UNREACHABLE();
+    }
+  }
+
+  size_t CountIns(const std::string& test_method_signature, bool is_static) {
+    const char* sig = test_method_signature.c_str();
+    CHECK_EQ(sig[0], '(');
+    ++sig;
+    size_t result = is_static ? 0u : 1u;
+    while (*sig != ')') {
+      result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
+      while (*sig == '[') {
+        ++sig;
+      }
+      if (*sig == 'L') {
+        do {
+          ++sig;
+          CHECK(*sig != '\0' && *sig != ')');
+        } while (*sig != ';');
+      }
+      ++sig;
+    }
+    return result;
+  }
+
+  void BuildDexFile(const std::string& test_method_signature, bool is_static) {
+    dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
+    dex_file_ = dex_file_builder_.Build(kDexLocation);
+    cu_.dex_file = dex_file_.get();
+    cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
+    cu_.access_flags = is_static ? kAccStatic : 0u;
+    cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
+        &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
+        0u /* class_def_idx not used */, 0u /* method_index not used */,
+        cu_.access_flags, nullptr /* verified_method not used */));
+    cu_.mir_graph->current_method_ = 0u;
+    code_item_ = static_cast<DexFile::CodeItem*>(
+        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+
+    code_item_->ins_size_ = CountIns(test_method_signature, is_static);
+    code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
+    cu_.mir_graph->current_code_item_ = code_item_;
+    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
+
+    cu_.mir_graph->ifield_lowering_infos_.clear();
+    cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
+    for (size_t i = 0u; i != ifield_count_; ++i) {
+      const FieldDef* def = &ifield_defs_[i];
+      uint32_t field_idx =
+          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+      MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
+      field_info.declaring_dex_file_ = cu_.dex_file;
+      field_info.declaring_field_idx_ = field_idx;
+      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+    }
+
+    cu_.mir_graph->sfield_lowering_infos_.clear();
+    cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
+    for (size_t i = 0u; i != sfield_count_; ++i) {
+      const FieldDef* def = &sfield_defs_[i];
+      uint32_t field_idx =
+          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+      MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
+      field_info.declaring_dex_file_ = cu_.dex_file;
+      field_info.declaring_field_idx_ = field_idx;
+      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+    }
+
+    cu_.mir_graph->method_lowering_infos_.clear();
+    cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
+    for (size_t i = 0u; i != method_count_; ++i) {
+      const MethodDef* def = &method_defs_[i];
+      uint32_t method_idx =
+          dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
+      MirMethodLoweringInfo method_info(method_idx, def->type, false);
+      method_info.declaring_dex_file_ = cu_.dex_file;
+      method_info.declaring_method_idx_ = method_idx;
+      cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+    }
+  }
+
+  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+    cu_.mir_graph->block_id_map_.clear();
+    cu_.mir_graph->block_list_.clear();
+    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
+    ASSERT_EQ(kNullBlock, defs[0].type);
+    ASSERT_EQ(kEntryBlock, defs[1].type);
+    ASSERT_EQ(kExitBlock, defs[2].type);
+    for (size_t i = 0u; i != count; ++i) {
+      const BBDef* def = &defs[i];
+      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+      if (def->num_successors <= 2) {
+        bb->successor_block_list_type = kNotUsed;
+        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+      } else {
+        bb->successor_block_list_type = kPackedSwitch;
+        bb->fall_through = 0u;
+        bb->taken = 0u;
+        bb->successor_blocks.reserve(def->num_successors);
+        for (size_t j = 0u; j != def->num_successors; ++j) {
+          SuccessorBlockInfo* successor_block_info =
+              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+                                                               kArenaAllocSuccessor));
+          successor_block_info->block = j;
+          successor_block_info->key = 0u;  // Not used by class init check elimination.
+          bb->successor_blocks.push_back(successor_block_info);
+        }
+      }
+      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+        bb->data_flow_info->live_in_v = live_in_v_;
+      }
+    }
+    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+  }
+
+  template <size_t count>
+  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+    DoPrepareBasicBlocks(defs, count);
+  }
+
+  void PrepareSingleBlock() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void PrepareDiamond() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void PrepareLoop() {
+    static const BBDef bbs[] = {
+        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
+        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+    };
+    PrepareBasicBlocks(bbs);
+  }
+
+  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+    mir_count_ = count;
+    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
+    ssa_reps_.resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const MIRDef* def = &defs[i];
+      MIR* mir = &mirs_[i];
+      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+      bb->AppendMIR(mir);
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+      mir->dalvikInsn.vB_wide = def->value;
+      if (IsInstructionIGetOrIPut(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
+        mir->meta.ifield_lowering_info = def->metadata;
+        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
+                  IGetOrIPutMemAccessType(def->opcode));
+        cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
+      } else if (IsInstructionSGetOrSPut(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
+        mir->meta.sfield_lowering_info = def->metadata;
+        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
+                  SGetOrSPutMemAccessType(def->opcode));
+        cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
+      } else if (IsInstructionInvoke(def->opcode)) {
+        ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
+        mir->meta.method_lowering_info = def->metadata;
+        mir->dalvikInsn.vA = def->num_uses;
+        cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
+      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+        mir->meta.phi_incoming =
+            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+        ASSERT_EQ(def->num_uses, bb->predecessors.size());
+        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+      } else if (def->opcode == Instruction::CHECK_CAST) {
+        ASSERT_LT(def->metadata, type_count_);
+        mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+        cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
+      } else if (def->opcode == Instruction::NEW_ARRAY) {
+        ASSERT_LT(def->metadata, type_count_);
+        mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+      }
+      mir->ssa_rep = &ssa_reps_[i];
+      mir->ssa_rep->num_uses = def->num_uses;
+      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
+      mir->ssa_rep->num_defs = def->num_defs;
+      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->offset = i;  // LVN uses offset only for debug output
+      mir->optimization_flags = 0u;
+    }
+    code_item_->insns_size_in_code_units_ = 2u * count;
+  }
+
+  template <size_t count>
+  void PrepareMIRs(const MIRDef (&defs)[count]) {
+    DoPrepareMIRs(defs, count);
+  }
+
+  // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
+  void AllocEndingVRegToSRegMaps() {
+    AllNodesIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      if (bb->data_flow_info != nullptr) {
+        if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
+          size_t num_vregs = code_item_->registers_size_;
+          bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+              cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
+          std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
+        }
+      }
+    }
+  }
+
+  template <size_t count>
+  void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
+    AllocEndingVRegToSRegMaps();
+    for (BasicBlockId bb_id : bb_ids) {
+      BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+      CHECK(bb != nullptr);
+      CHECK(bb->data_flow_info != nullptr);
+      CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+      bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
+    }
+  }
+
+  void PerformTypeInference() {
+    cu_.mir_graph->SSATransformationStart();
+    cu_.mir_graph->ComputeDFSOrders();
+    cu_.mir_graph->ComputeDominators();
+    cu_.mir_graph->ComputeTopologicalSortOrder();
+    cu_.mir_graph->SSATransformationEnd();
+    ASSERT_TRUE(type_inference_ == nullptr);
+    type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
+                                                               allocator_.get()));
+    RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
+    bool changed = false;
+    for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
+      changed = type_inference_->Apply(bb);
+    }
+    type_inference_->Finish();
+  }
+
+  TypeInferenceTest()
+      : pool_(),
+        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+        mir_count_(0u),
+        mirs_(nullptr),
+        code_item_(nullptr),
+        ssa_reps_(),
+        allocator_(),
+        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)),
+        type_defs_(nullptr),
+        type_count_(0u),
+        ifield_defs_(nullptr),
+        ifield_count_(0u),
+        sfield_defs_(nullptr),
+        sfield_count_(0u),
+        method_defs_(nullptr),
+        method_count_(0u),
+        dex_file_builder_(),
+        dex_file_(nullptr) {
+    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+      cu_.mir_graph->ssa_base_vregs_.push_back(i);
+      cu_.mir_graph->ssa_subscripts_.push_back(0);
+    }
+  }
+
+  enum ExpectFlags : uint32_t {
+    kExpectWide         = 0x0001u,
+    kExpectNarrow       = 0x0002u,
+    kExpectFp           = 0x0004u,
+    kExpectCore         = 0x0008u,
+    kExpectRef          = 0x0010u,
+    kExpectArrayWide    = 0x0020u,
+    kExpectArrayNarrow  = 0x0040u,
+    kExpectArrayFp      = 0x0080u,
+    kExpectArrayCore    = 0x0100u,
+    kExpectArrayRef     = 0x0200u,
+    kExpectNull         = 0x0400u,
+    kExpectHigh         = 0x0800u,  // Reserved for ExpectSRegType().
+  };
+
+  struct SRegExpectation {
+    uint32_t array_depth;
+    uint32_t flags;
+  };
+
+  void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
+    uint32_t flags = expectation.flags;
+    uint32_t array_depth = expectation.array_depth;
+    TypeInference::Type type = type_inference_->sregs_[s_reg];
+
+    if (check_loc) {
+      RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
+      EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
+      EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
+      EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
+      EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
+      EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
+    }
+
+    EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
+    EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
+    EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
+    EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
+    EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
+    EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
+    EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
+
+    if ((flags & kExpectRef) != 0u) {
+      EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
+    } else {
+      // Null should be checked only for references.
+      ASSERT_EQ((flags & kExpectNull), 0u);
+    }
+
+    ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
+    if (array_depth != 0u) {
+      ASSERT_NE((flags & kExpectRef), 0u);
+      TypeInference::Type nested_type = type.NestedType();
+      EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
+      EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
+    }
+    if (!type.Narrow() && type.LowWord() &&
+        (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
+      SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
+      ExpectSRegType(s_reg + 1, high_expectation);
+    }
+  }
+
+  void ExpectCore(int s_reg, bool core) {
+    EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
+  }
+
+  void ExpectRef(int s_reg, bool ref) {
+    EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
+  }
+
+  void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
+    EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
+  }
+
+  static constexpr size_t kMaxSsaRegs = 16384u;
+  static constexpr uint16_t kLocalVRs = 1000u;
+
+  static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
+  static constexpr const char* kClassName = "LTypeInferenceTest;";
+  static constexpr const char* kMethodName = "test";
+
+  ArenaPool pool_;
+  CompilationUnit cu_;
+  size_t mir_count_;
+  MIR* mirs_;
+  DexFile::CodeItem* code_item_;
+  std::vector<SSARepresentation> ssa_reps_;
+  std::unique_ptr<ScopedArenaAllocator> allocator_;
+  std::unique_ptr<TypeInference> type_inference_;
+  ArenaBitVector* live_in_v_;
+
+  const TypeDef* type_defs_;
+  size_t type_count_;
+  const FieldDef* ifield_defs_;
+  size_t ifield_count_;
+  const FieldDef* sfield_defs_;
+  size_t sfield_count_;
+  const MethodDef* method_defs_;
+  size_t method_count_;
+
+  TestDexFileBuilder dex_file_builder_;
+  std::unique_ptr<const DexFile> dex_file_;
+};
+
+TEST_F(TypeInferenceTest, IGet) {
+  static const FieldDef ifields[] = {
+      { kClassName, "B", "byteField" },
+      { kClassName, "C", "charField" },
+      { kClassName, "D", "doubleField" },
+      { kClassName, "F", "floatField" },
+      { kClassName, "I", "intField" },
+      { kClassName, "J", "longField" },
+      { kClassName, "S", "shortField" },
+      { kClassName, "Z", "booleanField" },
+      { kClassName, "Ljava/lang/Object;", "objectField" },
+      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+  };
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
+      DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
+      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
+      DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
+      DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
+      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
+      DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
+      DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
+      DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
+      DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
+  };
+
+  PrepareIFields(ifields);
+  BuildDexFile("()V", false);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGet) {
+  static const FieldDef sfields[] = {
+      { kClassName, "B", "staticByteField" },
+      { kClassName, "C", "staticCharField" },
+      { kClassName, "D", "staticDoubleField" },
+      { kClassName, "F", "staticFloatField" },
+      { kClassName, "I", "staticIntField" },
+      { kClassName, "J", "staticLongField" },
+      { kClassName, "S", "staticShortField" },
+      { kClassName, "Z", "staticBooleanField" },
+      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
+      DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
+      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
+      DEF_SGET(3u, Instruction::SGET, 4u, 3u),
+      DEF_SGET(3u, Instruction::SGET, 5u, 4u),
+      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
+      DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
+      DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
+      DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
+      DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IPut) {
+  static const FieldDef ifields[] = {
+      { kClassName, "B", "byteField" },
+      { kClassName, "C", "charField" },
+      { kClassName, "D", "doubleField" },
+      { kClassName, "F", "floatField" },
+      { kClassName, "I", "intField" },
+      { kClassName, "J", "longField" },
+      { kClassName, "S", "shortField" },
+      { kClassName, "Z", "booleanField" },
+      { kClassName, "Ljava/lang/Object;", "objectField" },
+      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+  };
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
+  };
+
+  PrepareIFields(ifields);
+  BuildDexFile("()V", false);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SPut) {
+  static const FieldDef sfields[] = {
+      { kClassName, "B", "staticByteField" },
+      { kClassName, "C", "staticCharField" },
+      { kClassName, "D", "staticDoubleField" },
+      { kClassName, "F", "staticFloatField" },
+      { kClassName, "I", "staticIntField" },
+      { kClassName, "J", "staticLongField" },
+      { kClassName, "S", "staticShortField" },
+      { kClassName, "Z", "staticBooleanField" },
+      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodReturnType) {
+  static const MethodDef methods[] = {
+      { kClassName, "()B", "byteFoo", kStatic },
+      { kClassName, "()C", "charFoo", kStatic },
+      { kClassName, "()D", "doubleFoo", kStatic },
+      { kClassName, "()F", "floatFoo", kStatic },
+      { kClassName, "()I", "intFoo", kStatic },
+      { kClassName, "()J", "longFoo", kStatic },
+      { kClassName, "()S", "shortFoo", kStatic },
+      { kClassName, "()Z", "booleanFoo", kStatic },
+      { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
+      { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
+      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
+      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
+      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
+      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodArgType) {
+  static const MethodDef methods[] = {
+      { kClassName, "(B)V", "fooByte", kStatic },
+      { kClassName, "(C)V", "fooChar", kStatic },
+      { kClassName, "(D)V", "fooDouble", kStatic },
+      { kClassName, "(F)V", "fooFloat", kStatic },
+      { kClassName, "(I)V", "fooInt", kStatic },
+      { kClassName, "(J)V", "fooLong", kStatic },
+      { kClassName, "(S)V", "fooShort", kStatic },
+      { kClassName, "(Z)V", "fooBoolean", kStatic },
+      { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
+      { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
+      DEF_CONST(3u, Instruction::CONST, 4u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
+      DEF_CONST(3u, Instruction::CONST, 5u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
+      DEF_CONST(3u, Instruction::CONST, 8u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
+      DEF_CONST(3u, Instruction::CONST, 9u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
+      DEF_CONST(3u, Instruction::CONST, 10u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
+      DEF_CONST(3u, Instruction::CONST, 11u, 0),
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      // One expectation for every 2 MIRs.
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectFp | kExpectWide },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(expectations); ++i) {
+    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // value; can't even determine whether core or fp.
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
+      { 0u, kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut2) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] value
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut3) {
+  static const MIRDef mirs[] = {
+      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array1
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] array2
+      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut4) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // value; can't even determine whether core or fp.
+      DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
+      { 0u, kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut5) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // Object[] value
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut6) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array1
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // Object[] array2
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
+      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
+  static const MIRDef mirs[] = {
+      // void foo() {
+      //   Object[] array1 = ((Object[])null)[0];
+      //   Object[] array2 = ((Object[])null)[0];
+      //   for (int i = 0; i != 3; ++i) {
+      //     Object[] a1 = null;  // One of these could be Object[][] but not both.
+      //     Object[] a2 = null;  // But they will be deduced as Object[].
+      //     try { a1[0] = a2; } catch (Throwable ignored) { }
+      //     try { a2[0] = a1; } catch (Throwable ignored) { }
+      //     array1 = a1;
+      //     array2 = a2;
+      //   }
+      // }
+      //
+      // Omitting the try-catch:
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),            // null
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),            // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // array1
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // array2
+      DEF_PHI2(4u, 4u, 2u, 8u),  // ? + [L -> [? gives [L (see array-length below)
+      DEF_PHI2(4u, 5u, 3u, 9u),  // ? + [L -> ? gives ?
+      DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u),  // a1
+      DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u),  // a2
+      DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
+      DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
+      DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareLoop();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayFloat) {
+  static const MethodDef methods[] = {
+      { kClassName, "(F)V", "fooFloat", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      // void foo() {
+      //   try {
+      //     float[][][] aaaf = null;
+      //     float[][] array = aaaf[0];  // Make sure array is treated as properly typed.
+      //     array[0][0] = 0.0f;      // const + aget-object[1] + aput
+      //     fooFloat(array[0][0]);   // aget-object[2] + aget + invoke
+      //     // invoke: signature => input is F.
+      //     // aget: output is F => base is [F (precise)
+      //     // aget-object[2]: output is [F => base is [[F (precise)
+      //     // aput: unknown input type => base is [?
+      //     // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
+      //     // aput (again): base is [F => result is F
+      //     // const: F determined by the aput reprocessing.
+      //   } catch (Throwable ignored) {
+      //   }
+      // }
+      //
+      // Omitting the try-catch:
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),             // 0
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),             // aaaf
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u),   // array = aaaf[0]
+      DEF_CONST(3u, Instruction::CONST, 3u, 0),             // 0.0f
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u),   // array[0]
+      DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u),          // array[0][0] = 0.0f
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u),   // array[0]
+      DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u),          // array[0][0]
+      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u),  // fooFloat(array[0][0])
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectFp | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast1) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+      // Pseudo-phi from [I and [I into L infers only L but not [.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast2) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+      // Pseudo-phi from [I and [I into [? infers [I.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast3) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into L correctly leaves it as L.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict1) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // The type conflict in array element wasn't propagated to an SSA reg.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict2) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+      DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+  MapVRegToSReg(2, 2, v0_def_blocks);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // Type conflict in an SSA reg, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi1) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+      // Phi from [I and [I infers only L but not [.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi2) {
+  static const TypeDef types[] = {
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+      // Phi from [F and [F into [? infers [F.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi3) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Phi from [I and [F infers L.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi4) {
+  static const TypeDef types[] = {
+      { "[I" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_CONST(5u, Instruction::CONST, 2u, 0),
+      // Pseudo-phi from [I and null infers L.
+      DEF_PHI2(6u, 3u, 1u, 2u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+      { 0u, kExpectRef | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict1) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // The type conflict in array element wasn't propagated to an SSA reg.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict2) {
+  static const TypeDef types[] = {
+      { "[I" },
+      { "[F" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 100),
+      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+      DEF_PHI2(6u, 3u, 1u, 2u),
+      DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
+  };
+  PrepareTypes(types);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  // Type conflict in an SSA reg, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Wide1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
+      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // long[]
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0),  // long
+      DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
+      { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
+  };
+
+  BuildDexFile("()[J", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectWide },
+      // NOTE: High word checked implicitly for sreg = 3.
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg], false);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, WideSizeConflict1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
+  };
+
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectNarrow | kExpectWide },
+      { 0u, kExpectNarrow | kExpectWide },
+  };
+  ExpectSRegType(0u, expectations[0], false);
+  ExpectSRegType(2u, expectations[1], false);
+  EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayLongLength) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[J", "arrayLongField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+      DEF_PHI2(6u, 2u, 0u, 1u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+      DEF_PHI2(6u, 2u, 0u, 1u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGetAdd0SPut) {
+  static const FieldDef sfields[] = {
+      { kClassName, "I", "staticIntField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+      DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u),  // +0
+      DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveObjectNull) {
+  static const MethodDef methods[] = {
+      { kClassName, "([I[D)V", "foo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  ExpectSRegType(1u, expectation);
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull1) {
+  static const MethodDef methods[] = {
+      { kClassName, "([I[D)V", "foo", kStatic },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
+      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+  };
+
+  PrepareMethods(methods);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  ExpectSRegType(1u, expectation);
+  // Type conflict using move instead of move-object for null, register promotion disabled.
+  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull2) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[F", "staticArrayArrayFloatField" },
+      { kClassName, "[I", "staticArrayIntField" },
+      { kClassName, "[[I", "staticArrayArrayIntField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(4u, Instruction::CONST, 0u, 0),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
+      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
+      DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
+      DEF_PHI2(6u, 6u, 0u, 3u),
+      DEF_PHI2(6u, 7u, 1u, 4u),
+      DEF_PHI2(6u, 8u, 2u, 5u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
+      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
+      { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()[[I", true);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull1) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[I", "staticArrayLongField" },
+      { kClassName, "[[F", "staticArrayArrayFloatField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull2) {
+  static const FieldDef sfields[] = {
+      { kClassName, "[J", "staticArrayLongField" },
+      { kClassName, "[[F", "staticArrayArrayFloatField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3u, Instruction::CONST, 0u, 0),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      1u,
+      kExpectRef | kExpectNarrow | kExpectNull |
+      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArgIsNonNull) {
+  constexpr uint32_t thiz = kLocalVRs;
+  static const MIRDef mirs[] = {
+      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
+  };
+
+  BuildDexFile("(Ljava/lang/Object;)V", true);
+  PrepareSingleBlock();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectation = {
+      0u,
+      kExpectRef | kExpectNarrow
+  };
+  ExpectSRegType(0u, expectation);
+  // Type conflict in array type not propagated to actual register.
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IfCc) {
+  static const FieldDef sfields[] = {
+      { kClassName, "I", "intField" },
+  };
+  static const MIRDef mirs[] = {
+      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+      DEF_CONST(3u, Instruction::CONST, 1u, 0u),
+      { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
+  };
+
+  PrepareSFields(sfields);
+  BuildDexFile("()V", false);
+  PrepareDiamond();
+  PrepareMIRs(mirs);
+  PerformTypeInference();
+
+  ASSERT_EQ(arraysize(mirs), mir_count_);
+  static const SRegExpectation expectations[] = {
+      { 0u, kExpectCore | kExpectNarrow },
+      { 0u, kExpectCore | kExpectNarrow },
+  };
+  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+    ExpectSRegType(sreg, expectations[sreg]);
+  }
+  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+}  // namespace art
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38..e681bcf 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -23,400 +23,6 @@
 
 namespace art {
 
-bool MIRGraph::SetFp(int index, bool is_fp) {
-  bool change = false;
-  if (is_fp && !reg_location_[index].fp) {
-    reg_location_[index].fp = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetFp(int index) {
-  bool change = false;
-  if (!reg_location_[index].fp) {
-    reg_location_[index].fp = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetCore(int index, bool is_core) {
-  bool change = false;
-  if (is_core && !reg_location_[index].defined) {
-    reg_location_[index].core = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetCore(int index) {
-  bool change = false;
-  if (!reg_location_[index].defined) {
-    reg_location_[index].core = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetRef(int index, bool is_ref) {
-  bool change = false;
-  if (is_ref && !reg_location_[index].defined) {
-    reg_location_[index].ref = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetRef(int index) {
-  bool change = false;
-  if (!reg_location_[index].defined) {
-    reg_location_[index].ref = true;
-    reg_location_[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetWide(int index, bool is_wide) {
-  bool change = false;
-  if (is_wide && !reg_location_[index].wide) {
-    reg_location_[index].wide = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetWide(int index) {
-  bool change = false;
-  if (!reg_location_[index].wide) {
-    reg_location_[index].wide = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetHigh(int index, bool is_high) {
-  bool change = false;
-  if (is_high && !reg_location_[index].high_word) {
-    reg_location_[index].high_word = true;
-    change = true;
-  }
-  return change;
-}
-
-bool MIRGraph::SetHigh(int index) {
-  bool change = false;
-  if (!reg_location_[index].high_word) {
-    reg_location_[index].high_word = true;
-    change = true;
-  }
-  return change;
-}
-
-
-/*
- * Infer types and sizes.  We don't need to track change on sizes,
- * as it doesn't propagate.  We're guaranteed at least one pass through
- * the cfg.
- */
-bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
-  SSARepresentation *ssa_rep = mir->ssa_rep;
-
-  /*
-   * The dex bytecode definition does not explicitly outlaw the definition of the same
-   * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
-   * does not generate this pattern (at least recently).  Further, in the next revision of
-   * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
-   * and punt to the interpreter.
-   */
-  bool type_mismatch = false;
-
-  if (ssa_rep) {
-    uint64_t attrs = GetDataFlowAttributes(mir);
-    const int* uses = ssa_rep->uses;
-    const int* defs = ssa_rep->defs;
-
-    // Handle defs
-    if (attrs & DF_DA) {
-      if (attrs & DF_CORE_A) {
-        changed |= SetCore(defs[0]);
-      }
-      if (attrs & DF_REF_A) {
-        changed |= SetRef(defs[0]);
-      }
-      if (attrs & DF_A_WIDE) {
-        reg_location_[defs[0]].wide = true;
-        reg_location_[defs[1]].wide = true;
-        reg_location_[defs[1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(defs[0])+1,
-        SRegToVReg(defs[1]));
-      }
-    }
-
-
-    // Handles uses
-    int next = 0;
-    if (attrs & DF_UA) {
-      if (attrs & DF_CORE_A) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_A) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_A_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-        SRegToVReg(uses[next + 1]));
-        next += 2;
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-    }
-    if (attrs & DF_UB) {
-      if (attrs & DF_CORE_B) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_B) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_B_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-                             SRegToVReg(uses[next + 1]));
-        next += 2;
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-    }
-    if (attrs & DF_UC) {
-      if (attrs & DF_CORE_C) {
-        changed |= SetCore(uses[next]);
-      }
-      if (attrs & DF_REF_C) {
-        changed |= SetRef(uses[next]);
-      }
-      if (attrs & DF_C_WIDE) {
-        reg_location_[uses[next]].wide = true;
-        reg_location_[uses[next + 1]].wide = true;
-        reg_location_[uses[next + 1]].high_word = true;
-        DCHECK_EQ(SRegToVReg(uses[next])+1,
-        SRegToVReg(uses[next + 1]));
-      } else {
-        type_mismatch |= reg_location_[uses[next]].wide;
-      }
-    }
-
-    // Special-case return handling
-    if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
-        (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
-        (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
-      switch (cu_->shorty[0]) {
-          case 'I':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetCore(uses[0]);
-            break;
-          case 'J':
-            changed |= SetCore(uses[0]);
-            changed |= SetCore(uses[1]);
-            reg_location_[uses[0]].wide = true;
-            reg_location_[uses[1]].wide = true;
-            reg_location_[uses[1]].high_word = true;
-            break;
-          case 'F':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetFp(uses[0]);
-            break;
-          case 'D':
-            changed |= SetFp(uses[0]);
-            changed |= SetFp(uses[1]);
-            reg_location_[uses[0]].wide = true;
-            reg_location_[uses[1]].wide = true;
-            reg_location_[uses[1]].high_word = true;
-            break;
-          case 'L':
-            type_mismatch |= reg_location_[uses[0]].wide;
-            changed |= SetRef(uses[0]);
-            break;
-          default: break;
-      }
-    }
-
-    // Special-case handling for format 35c/3rc invokes
-    Instruction::Code opcode = mir->dalvikInsn.opcode;
-    int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
-                  0 : mir->dalvikInsn.FlagsOf();
-    if ((flags & Instruction::kInvoke) &&
-        (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
-      DCHECK_EQ(next, 0);
-      const auto& lowering_info = GetMethodLoweringInfo(mir);
-      const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
-      // Handle result type if floating point
-      if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
-        MIR* move_result_mir = FindMoveResult(bb, mir);
-        // Result might not be used at all, so no move-result
-        if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
-            Instruction::MOVE_RESULT_OBJECT)) {
-          SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
-          DCHECK(tgt_rep != NULL);
-          tgt_rep->fp_def[0] = true;
-          changed |= SetFp(tgt_rep->defs[0]);
-          if (shorty[0] == 'D') {
-            tgt_rep->fp_def[1] = true;
-            changed |= SetFp(tgt_rep->defs[1]);
-          }
-        }
-      }
-      int num_uses = mir->dalvikInsn.vA;
-      // If this is a non-static invoke, mark implicit "this"
-      if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
-        reg_location_[uses[next]].defined = true;
-        reg_location_[uses[next]].ref = true;
-        type_mismatch |= reg_location_[uses[next]].wide;
-        next++;
-      }
-      uint32_t cpos = 1;
-      if (strlen(shorty) > 1) {
-        for (int i = next; i < num_uses;) {
-          DCHECK_LT(cpos, strlen(shorty));
-          switch (shorty[cpos++]) {
-            case 'D':
-              ssa_rep->fp_use[i] = true;
-              ssa_rep->fp_use[i+1] = true;
-              reg_location_[uses[i]].wide = true;
-              reg_location_[uses[i+1]].wide = true;
-              reg_location_[uses[i+1]].high_word = true;
-              DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
-              i++;
-              break;
-            case 'J':
-              reg_location_[uses[i]].wide = true;
-              reg_location_[uses[i+1]].wide = true;
-              reg_location_[uses[i+1]].high_word = true;
-              DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
-              changed |= SetCore(uses[i]);
-              i++;
-              break;
-            case 'F':
-              type_mismatch |= reg_location_[uses[i]].wide;
-              ssa_rep->fp_use[i] = true;
-              break;
-            case 'L':
-              type_mismatch |= reg_location_[uses[i]].wide;
-              changed |= SetRef(uses[i]);
-              break;
-            default:
-              type_mismatch |= reg_location_[uses[i]].wide;
-              changed |= SetCore(uses[i]);
-              break;
-          }
-          i++;
-        }
-      }
-    }
-
-    for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
-      if (ssa_rep->fp_use[i]) {
-        changed |= SetFp(uses[i]);
-      }
-    }
-    for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
-      if (ssa_rep->fp_def[i]) {
-        changed |= SetFp(defs[i]);
-      }
-    }
-    // Special-case handling for moves & Phi
-    if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
-      /*
-       * If any of our inputs or outputs is defined, set all.
-       * Some ugliness related to Phi nodes and wide values.
-       * The Phi set will include all low words or all high
-       * words, so we have to treat them specially.
-       */
-      bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
-      RegLocation rl_temp = reg_location_[defs[0]];
-      bool defined_fp = rl_temp.defined && rl_temp.fp;
-      bool defined_core = rl_temp.defined && rl_temp.core;
-      bool defined_ref = rl_temp.defined && rl_temp.ref;
-      bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
-      bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
-      for (int i = 0; i < ssa_rep->num_uses; i++) {
-        rl_temp = reg_location_[uses[i]];
-        defined_fp |= rl_temp.defined && rl_temp.fp;
-        defined_core |= rl_temp.defined && rl_temp.core;
-        defined_ref |= rl_temp.defined && rl_temp.ref;
-        is_wide |= rl_temp.wide;
-        is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
-      }
-      /*
-       * We don't normally expect to see a Dalvik register definition used both as a
-       * floating point and core value, though technically it could happen with constants.
-       * Until we have proper typing, detect this situation and disable register promotion
-       * (which relies on the distinction between core a fp usages).
-       */
-      if ((defined_fp && (defined_core | defined_ref)) &&
-          ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
-        LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-                     << " op at block " << bb->id
-                     << " has both fp and core/ref uses for same def.";
-        cu_->disable_opt |= (1 << kPromoteRegs);
-      }
-      changed |= SetFp(defs[0], defined_fp);
-      changed |= SetCore(defs[0], defined_core);
-      changed |= SetRef(defs[0], defined_ref);
-      changed |= SetWide(defs[0], is_wide);
-      changed |= SetHigh(defs[0], is_high);
-      if (attrs & DF_A_WIDE) {
-        changed |= SetWide(defs[1]);
-        changed |= SetHigh(defs[1]);
-      }
-
-      bool has_ins = (GetNumOfInVRs() > 0);
-
-      for (int i = 0; i < ssa_rep->num_uses; i++) {
-        if (has_ins && IsInVReg(uses[i])) {
-          // NB: The SSA name for the first def of an in-reg will be the same as
-          // the reg's actual name.
-          if (!reg_location_[uses[i]].fp && defined_fp) {
-            // If we were about to infer that this first def of an in-reg is a float
-            // when it wasn't previously (because float/int is set during SSA initialization),
-            // do not allow this to happen.
-            continue;
-          }
-        }
-        changed |= SetFp(uses[i], defined_fp);
-        changed |= SetCore(uses[i], defined_core);
-        changed |= SetRef(uses[i], defined_ref);
-        changed |= SetWide(uses[i], is_wide);
-        changed |= SetHigh(uses[i], is_high);
-      }
-      if (attrs & DF_A_WIDE) {
-        DCHECK_EQ(ssa_rep->num_uses, 2);
-        changed |= SetWide(uses[1]);
-        changed |= SetHigh(uses[1]);
-      }
-    }
-  }
-  if (type_mismatch) {
-    LOG(WARNING) << "Deprecated dex type mismatch, interpreting "
-                 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    LOG(INFO) << "@ 0x" << std::hex << mir->offset;
-    SetPuntToInterpreter(true);
-  }
-  return changed;
-}
-
 static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
 
 void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
@@ -456,56 +62,6 @@
   loc[method_sreg].defined = true;
 
   reg_location_ = loc;
-
-  int num_regs = GetNumOfCodeVRs();
-
-  /* Add types of incoming arguments based on signature */
-  int num_ins = GetNumOfInVRs();
-  if (num_ins > 0) {
-    int s_reg = num_regs - num_ins;
-    if ((cu_->access_flags & kAccStatic) == 0) {
-      // For non-static, skip past "this"
-      reg_location_[s_reg].defined = true;
-      reg_location_[s_reg].ref = true;
-      s_reg++;
-    }
-    const char* shorty = cu_->shorty;
-    int shorty_len = strlen(shorty);
-    for (int i = 1; i < shorty_len; i++) {
-      switch (shorty[i]) {
-        case 'D':
-          reg_location_[s_reg].wide = true;
-          reg_location_[s_reg+1].high_word = true;
-          reg_location_[s_reg+1].fp = true;
-          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          reg_location_[s_reg].fp = true;
-          reg_location_[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'J':
-          reg_location_[s_reg].wide = true;
-          reg_location_[s_reg+1].high_word = true;
-          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          reg_location_[s_reg].core = true;
-          reg_location_[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'F':
-          reg_location_[s_reg].fp = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        case 'L':
-          reg_location_[s_reg].ref = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        default:
-          reg_location_[s_reg].core = true;
-          reg_location_[s_reg].defined = true;
-          break;
-        }
-        s_reg++;
-      }
-  }
 }
 
 /*
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 03ae489..3983006 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -21,6 +21,7 @@
 
 #include "dex_file.h"
 #include "jni.h"
+#include "base/arena_object.h"
 
 namespace art {
 namespace mirror {
@@ -31,7 +32,7 @@
 struct CompilationUnit;
 class VerifiedMethod;
 
-class DexCompilationUnit {
+class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
  public:
   explicit DexCompilationUnit(CompilationUnit* cu);
 
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
new file mode 100644
index 0000000..ab039aa
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+#define ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+
+#include <cstring>
+#include <set>
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "utils.h"
+
+namespace art {
+
+class TestDexFileBuilder {
+ public:
+  TestDexFileBuilder()
+      : strings_(), types_(), fields_(), protos_(), dex_file_data_() {
+  }
+
+  void AddString(const std::string& str) {
+    CHECK(dex_file_data_.empty());
+    auto it = strings_.emplace(str, IdxAndDataOffset()).first;
+    CHECK_LT(it->first.length(), 128u);  // Don't allow multi-byte length in uleb128.
+  }
+
+  void AddType(const std::string& descriptor) {
+    CHECK(dex_file_data_.empty());
+    AddString(descriptor);
+    types_.emplace(descriptor, 0u);
+  }
+
+  void AddField(const std::string& class_descriptor, const std::string& type,
+                const std::string& name) {
+    CHECK(dex_file_data_.empty());
+    AddType(class_descriptor);
+    AddType(type);
+    AddString(name);
+    FieldKey key = { class_descriptor, type, name };
+    fields_.emplace(key, 0u);
+  }
+
+  void AddMethod(const std::string& class_descriptor, const std::string& signature,
+                 const std::string& name) {
+    CHECK(dex_file_data_.empty());
+    AddType(class_descriptor);
+    AddString(name);
+
+    ProtoKey proto_key = CreateProtoKey(signature);
+    AddString(proto_key.shorty);
+    AddType(proto_key.return_type);
+    for (const auto& arg_type : proto_key.args) {
+      AddType(arg_type);
+    }
+    auto it = protos_.emplace(proto_key, IdxAndDataOffset()).first;
+    const ProtoKey* proto = &it->first;  // Valid as long as the element remains in protos_.
+
+    MethodKey method_key = {
+        class_descriptor, name, proto
+    };
+    methods_.emplace(method_key, 0u);
+  }
+
+  // NOTE: The builder holds the actual data, so it must live as long as the dex file.
+  std::unique_ptr<const DexFile> Build(const std::string& dex_location) {
+    CHECK(dex_file_data_.empty());
+    union {
+      uint8_t data[sizeof(DexFile::Header)];
+      uint64_t force_alignment;
+    } header_data;
+    std::memset(header_data.data, 0, sizeof(header_data.data));
+    DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
+    std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
+    std::copy_n(DexFile::kDexMagicVersion, 4u, header->magic_ + 4u);
+    header->header_size_ = sizeof(header);
+    header->endian_tag_ = DexFile::kDexEndianConstant;
+    header->link_size_ = 0u;  // Unused.
+    header->link_off_ = 0u;  // Unused.
+    header->map_off_ = 0u;  // Unused.
+
+    uint32_t data_section_size = 0u;
+
+    uint32_t string_ids_offset = sizeof(DexFile::Header);
+    uint32_t string_idx = 0u;
+    for (auto& entry : strings_) {
+      entry.second.idx = string_idx;
+      string_idx += 1u;
+      entry.second.data_offset = data_section_size;
+      data_section_size += entry.first.length() + 1u /* length */ + 1u /* null-terminator */;
+    }
+    header->string_ids_size_ = strings_.size();
+    header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
+
+    uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+    uint32_t type_idx = 0u;
+    for (auto& entry : types_) {
+      entry.second = type_idx;
+      type_idx += 1u;
+    }
+    header->type_ids_size_ = types_.size();
+    header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
+
+    uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+    uint32_t proto_idx = 0u;
+    for (auto& entry : protos_) {
+      entry.second.idx = proto_idx;
+      proto_idx += 1u;
+      size_t num_args = entry.first.args.size();
+      if (num_args != 0u) {
+        entry.second.data_offset = RoundUp(data_section_size, 4u);
+        data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+      } else {
+        entry.second.data_offset = 0u;
+      }
+    }
+    header->proto_ids_size_ = protos_.size();
+    header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
+
+    uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+    uint32_t field_idx = 0u;
+    for (auto& entry : fields_) {
+      entry.second = field_idx;
+      field_idx += 1u;
+    }
+    header->field_ids_size_ = fields_.size();
+    header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
+
+    uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+    uint32_t method_idx = 0u;
+    for (auto& entry : methods_) {
+      entry.second = method_idx;
+      method_idx += 1u;
+    }
+    header->method_ids_size_ = methods_.size();
+    header->method_ids_off_ = methods_.empty() ? 0u : method_ids_offset;
+
+    // No class defs.
+    header->class_defs_size_ = 0u;
+    header->class_defs_off_ = 0u;
+
+    uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+    header->data_size_ = data_section_size;
+    header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
+
+    uint32_t total_size = data_section_offset + data_section_size;
+
+    dex_file_data_.resize(total_size);
+    std::memcpy(&dex_file_data_[0], header_data.data, sizeof(DexFile::Header));
+
+    for (const auto& entry : strings_) {
+      CHECK_LT(entry.first.size(), 128u);
+      uint32_t raw_offset = data_section_offset + entry.second.data_offset;
+      dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
+      std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
+      Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+    }
+
+    for (const auto& entry : types_) {
+      Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+      ++type_idx;
+    }
+
+    for (const auto& entry : protos_) {
+      size_t num_args = entry.first.args.size();
+      uint32_t type_list_offset =
+          (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
+      uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+      Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
+      Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
+      Write32(raw_offset + 8u, type_list_offset);
+      if (num_args != 0u) {
+        CHECK_NE(entry.second.data_offset, 0u);
+        Write32(type_list_offset, num_args);
+        for (size_t i = 0; i != num_args; ++i) {
+          Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+                  GetTypeIdx(entry.first.args[i]));
+        }
+      }
+    }
+
+    for (const auto& entry : fields_) {
+      uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+      Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+      Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
+      Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+    }
+
+    for (const auto& entry : methods_) {
+      uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+      Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+      auto it = protos_.find(*entry.first.proto);
+      CHECK(it != protos_.end());
+      Write16(raw_offset + 2u, it->second.idx);
+      Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+    }
+
+    // Leave checksum and signature as zeros.
+
+    std::string error_msg;
+    std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+        &dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
+    CHECK(dex_file != nullptr) << error_msg;
+    return std::move(dex_file);
+  }
+
+  uint32_t GetStringIdx(const std::string& type) {
+    auto it = strings_.find(type);
+    CHECK(it != strings_.end());
+    return it->second.idx;
+  }
+
+  uint32_t GetTypeIdx(const std::string& type) {
+    auto it = types_.find(type);
+    CHECK(it != types_.end());
+    return it->second;
+  }
+
+  uint32_t GetFieldIdx(const std::string& class_descriptor, const std::string& type,
+                       const std::string& name) {
+    FieldKey key = { class_descriptor, type, name };
+    auto it = fields_.find(key);
+    CHECK(it != fields_.end());
+    return it->second;
+  }
+
+  uint32_t GetMethodIdx(const std::string& class_descriptor, const std::string& signature,
+                        const std::string& name) {
+    ProtoKey proto_key = CreateProtoKey(signature);
+    MethodKey method_key = { class_descriptor, name, &proto_key };
+    auto it = methods_.find(method_key);
+    CHECK(it != methods_.end());
+    return it->second;
+  }
+
+ private:
+  struct IdxAndDataOffset {
+    uint32_t idx;
+    uint32_t data_offset;
+  };
+
+  struct FieldKey {
+    const std::string class_descriptor;
+    const std::string type;
+    const std::string name;
+  };
+  struct FieldKeyComparator {
+    bool operator()(const FieldKey& lhs, const FieldKey& rhs) const {
+      if (lhs.class_descriptor != rhs.class_descriptor) {
+        return lhs.class_descriptor < rhs.class_descriptor;
+      }
+      if (lhs.name != rhs.name) {
+        return lhs.name < rhs.name;
+      }
+      return lhs.type < rhs.type;
+    }
+  };
+
+  struct ProtoKey {
+    std::string shorty;
+    std::string return_type;
+    std::vector<std::string> args;
+  };
+  struct ProtoKeyComparator {
+    bool operator()(const ProtoKey& lhs, const ProtoKey& rhs) const {
+      if (lhs.return_type != rhs.return_type) {
+        return lhs.return_type < rhs.return_type;
+      }
+      size_t min_args = std::min(lhs.args.size(), rhs.args.size());
+      for (size_t i = 0; i != min_args; ++i) {
+        if (lhs.args[i] != rhs.args[i]) {
+          return lhs.args[i] < rhs.args[i];
+        }
+      }
+      return lhs.args.size() < rhs.args.size();
+    }
+  };
+
+  struct MethodKey {
+    std::string class_descriptor;
+    std::string name;
+    const ProtoKey* proto;
+  };
+  struct MethodKeyComparator {
+    bool operator()(const MethodKey& lhs, const MethodKey& rhs) const {
+      if (lhs.class_descriptor != rhs.class_descriptor) {
+        return lhs.class_descriptor < rhs.class_descriptor;
+      }
+      if (lhs.name != rhs.name) {
+        return lhs.name < rhs.name;
+      }
+      return ProtoKeyComparator()(*lhs.proto, *rhs.proto);
+    }
+  };
+
+  ProtoKey CreateProtoKey(const std::string& signature) {
+    CHECK_EQ(signature[0], '(');
+    const char* args = signature.c_str() + 1;
+    const char* args_end = std::strchr(args, ')');
+    CHECK(args_end != nullptr);
+    const char* return_type = args_end + 1;
+
+    ProtoKey key = {
+        std::string() + ((*return_type == '[') ? 'L' : *return_type),
+        return_type,
+        std::vector<std::string>()
+    };
+    while (args != args_end) {
+      key.shorty += (*args == '[') ? 'L' : *args;
+      const char* arg_start = args;
+      while (*args == '[') {
+        ++args;
+      }
+      if (*args == 'L') {
+        do {
+          ++args;
+          CHECK_NE(args, args_end);
+        } while (*args != ';');
+      }
+      ++args;
+      key.args.emplace_back(arg_start, args);
+    }
+    return key;
+  }
+
+  void Write32(size_t offset, uint32_t value) {
+    CHECK_LE(offset + 4u, dex_file_data_.size());
+    CHECK_EQ(dex_file_data_[offset + 0], 0u);
+    CHECK_EQ(dex_file_data_[offset + 1], 0u);
+    CHECK_EQ(dex_file_data_[offset + 2], 0u);
+    CHECK_EQ(dex_file_data_[offset + 3], 0u);
+    dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+    dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+    dex_file_data_[offset + 2] = static_cast<uint8_t>(value >> 16);
+    dex_file_data_[offset + 3] = static_cast<uint8_t>(value >> 24);
+  }
+
+  void Write16(size_t offset, uint32_t value) {
+    CHECK_LE(value, 0xffffu);
+    CHECK_LE(offset + 2u, dex_file_data_.size());
+    CHECK_EQ(dex_file_data_[offset + 0], 0u);
+    CHECK_EQ(dex_file_data_[offset + 1], 0u);
+    dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+    dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+  }
+
+  std::map<std::string, IdxAndDataOffset> strings_;
+  std::map<std::string, uint32_t> types_;
+  std::map<FieldKey, uint32_t, FieldKeyComparator> fields_;
+  std::map<ProtoKey, IdxAndDataOffset, ProtoKeyComparator> protos_;
+  std::map<MethodKey, uint32_t, MethodKeyComparator> methods_;
+
+  std::vector<uint8_t> dex_file_data_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
new file mode 100644
index 0000000..ee6e35d
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test_dex_file_builder.h"
+
+#include "dex_file-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(TestDexFileBuilderTest, SimpleTest) {
+  TestDexFileBuilder builder;
+  builder.AddString("Arbitrary string");
+  builder.AddType("Ljava/lang/Class;");
+  builder.AddField("LTestClass;", "[I", "intField");
+  builder.AddMethod("LTestClass;", "()I", "foo");
+  builder.AddMethod("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar");
+  const char* dex_location = "TestDexFileBuilder/SimpleTest";
+  std::unique_ptr<const DexFile> dex_file(builder.Build(dex_location));
+  ASSERT_TRUE(dex_file != nullptr);
+  EXPECT_STREQ(dex_location, dex_file->GetLocation().c_str());
+
+  static const char* const expected_strings[] = {
+      "Arbitrary string",
+      "I",
+      "LLL",  // shorty
+      "LTestClass;",
+      "Ljava/lang/Class;",
+      "Ljava/lang/Object;",
+      "[I",
+      "[Ljava/lang/Object;",
+      "bar",
+      "foo",
+      "intField",
+  };
+  ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
+  for (size_t i = 0; i != arraysize(expected_strings); ++i) {
+    EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+  }
+
+  static const char* const expected_types[] = {
+      "I",
+      "LTestClass;",
+      "Ljava/lang/Class;",
+      "Ljava/lang/Object;",
+      "[I",
+      "[Ljava/lang/Object;",
+  };
+  ASSERT_EQ(arraysize(expected_types), dex_file->NumTypeIds());
+  for (size_t i = 0; i != arraysize(expected_types); ++i) {
+    EXPECT_STREQ(expected_types[i], dex_file->GetTypeDescriptor(dex_file->GetTypeId(i))) << i;
+  }
+
+  ASSERT_EQ(1u, dex_file->NumFieldIds());
+  EXPECT_STREQ("[I TestClass.intField", PrettyField(0u, *dex_file).c_str());
+
+  ASSERT_EQ(2u, dex_file->NumProtoIds());
+  ASSERT_EQ(2u, dex_file->NumMethodIds());
+  EXPECT_STREQ("TestClass TestClass.bar(java.lang.Object, java.lang.Object[])",
+               PrettyMethod(0u, *dex_file).c_str());
+  EXPECT_STREQ("int TestClass.foo()",
+               PrettyMethod(1u, *dex_file).c_str());
+
+  EXPECT_EQ(0u, builder.GetStringIdx("Arbitrary string"));
+  EXPECT_EQ(2u, builder.GetTypeIdx("Ljava/lang/Class;"));
+  EXPECT_EQ(0u, builder.GetFieldIdx("LTestClass;", "[I", "intField"));
+  EXPECT_EQ(1u, builder.GetMethodIdx("LTestClass;", "()I", "foo"));
+  EXPECT_EQ(0u, builder.GetMethodIdx("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar"));
+}
+
+}  // namespace art