Compiler: Spring cleaning

Significant restructuring of the Quick compiler to break out the
common frontend more cleanly.  Additional C++'ification.

The goal is to move from the monolithic structure of the old
JIT towards a more modular model in which components - in
particular the compiler backend - can be replaced.  This CL
focuses on moving MIR-related data from the CompilationUnit
struct into a new MIRGraph class.  The next CL will isolate all
LIR-related data and code down into the Quick backend.

This change will happen in multiple steps, and may look uglier
before it starts looking better.

Among the changes:

   o Moved all mir-related fields from CompilationUnit to new
     MirGraph class.

   o Moved the register promotion stuff into the Quick backend.

   o Deleted the GBC to LIR conversion code.

   o Replaced with old C-style function pointer dataflow analysis
     dispatcher with a basic block iterator class.

   o Renamed some files to make the name more consistent with what
     the code actually does.

   o Added the foundation for future inlining support.

   o Stripped out the remains of the old fingerprinting mechanism.

Change-Id: I6c30facc642f8084b1c7b2075cf7014de387aa56
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index 8f0ccf8..389cc08 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -15,7 +15,7 @@
 #
 
 LIBART_COMPILER_SRC_FILES := \
-	src/compiler/dex/bb_opt.cc \
+	src/compiler/dex/local_value_numbering.cc \
 	src/compiler/dex/quick/arm/assemble_arm.cc \
 	src/compiler/dex/quick/arm/call_arm.cc \
 	src/compiler/dex/quick/arm/fp_arm.cc \
@@ -43,9 +43,12 @@
 	src/compiler/dex/quick/x86/utility_x86.cc \
 	src/compiler/dex/portable/mir_to_gbc.cc \
 	src/compiler/dex/compiler_utility.cc \
-	src/compiler/dex/dataflow.cc \
+	src/compiler/dex/mir_dataflow.cc \
+	src/compiler/dex/dataflow_iterator.cc \
+	src/compiler/dex/mir_optimization.cc \
 	src/compiler/dex/frontend.cc \
-	src/compiler/dex/ralloc.cc \
+	src/compiler/dex/mir_graph.cc \
+	src/compiler/dex/vreg_analysis.cc \
 	src/compiler/dex/ssa_transformation.cc \
 	src/compiler/dex/write_elf.cc \
 	src/compiler/driver/dex_compilation_unit.cc \
diff --git a/src/compiler/dex/compiler_internals.h b/src/compiler/dex/compiler_internals.h
index 8a44bb8..71792e0 100644
--- a/src/compiler/dex/compiler_internals.h
+++ b/src/compiler/dex/compiler_internals.h
@@ -26,13 +26,13 @@
 #include "class_linker.h"
 #include "quick/codegen.h"
 #include "compiler/driver/compiler_driver.h"
+#include "mir_graph.h"
 #include "compiler_ir.h"
 #include "compiler_utility.h"
 #include "frontend.h"
 #include "gc/card_table.h"
 #include "mirror/dex_cache.h"
 #include "monitor.h"
-#include "ralloc.h"
 #include "thread.h"
 #include "utils.h"
 
diff --git a/src/compiler/dex/compiler_ir.h b/src/compiler/dex/compiler_ir.h
index f8cdd34..caa23d7 100644
--- a/src/compiler/dex/compiler_ir.h
+++ b/src/compiler/dex/compiler_ir.h
@@ -33,6 +33,7 @@
 
 namespace art {
 
+//TODO: replace these macros
 #define SLOW_FIELD_PATH (cu->enable_debug & (1 << kDebugSlowFieldPath))
 #define SLOW_INVOKE_PATH (cu->enable_debug & (1 << kDebugSlowInvokePath))
 #define SLOW_STRING_PATH (cu->enable_debug & (1 << kDebugSlowStringPath))
@@ -62,7 +63,7 @@
   RegLocationType location:3;
   unsigned wide:1;
   unsigned defined:1;   // Do we know the type?
-  unsigned is_const:1;  // Constant, value in cu->constant_values[].
+  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
   unsigned fp:1;        // Floating point?
   unsigned core:1;      // Non-floating point?
   unsigned ref:1;       // Something GC cares about.
@@ -215,6 +216,7 @@
   DecodedInstruction dalvikInsn;
   unsigned int width;
   unsigned int offset;
+  int m_unit_index;               // From which method was this MIR included
   MIR* prev;
   MIR* next;
   SSARepresentation* ssa_rep;
@@ -227,7 +229,23 @@
   } meta;
 };
 
-struct BasicBlockDataFlow;
+struct BasicBlockDataFlow {
+  ArenaBitVector* use_v;
+  ArenaBitVector* def_v;
+  ArenaBitVector* live_in_v;
+  ArenaBitVector* phi_v;
+  int* vreg_to_ssa_map;
+  ArenaBitVector* ending_null_check_v;
+};
+
+struct SSARepresentation {
+  int num_uses;
+  int* uses;
+  bool* fp_use;
+  int num_defs;
+  int* defs;
+  bool* fp_def;
+};
 
 struct BasicBlock {
   int id;
@@ -273,14 +291,14 @@
 struct RegisterPool;
 struct ArenaMemBlock;
 struct Memstats;
+class MIRGraph;
 class Codegen;
 
 #define NOTVISITED (-1)
 
 struct CompilationUnit {
   CompilationUnit()
-    : num_blocks(0),
-      compiler_driver(NULL),
+    : compiler_driver(NULL),
       class_linker(NULL),
       dex_file(NULL),
       class_loader(NULL),
@@ -290,48 +308,14 @@
       access_flags(0),
       invoke_type(kDirect),
       shorty(NULL),
-      first_lir_insn(NULL),
-      last_lir_insn(NULL),
-      literal_list(NULL),
-      method_literal_list(NULL),
-      code_literal_list(NULL),
       disable_opt(0),
       enable_debug(0),
-      data_offset(0),
-      total_size(0),
-      assembler_status(kSuccess),
-      assembler_retries(0),
       verbose(false),
-      has_loop(false),
-      has_invoke(false),
-      qd_mode(false),
-      reg_pool(NULL),
+      gen_bitcode(false),
+      disable_dataflow(false),
       instruction_set(kNone),
-      num_ssa_regs(0),
-      ssa_base_vregs(NULL),
-      ssa_subscripts(NULL),
-      ssa_strings(NULL),
-      vreg_to_ssa_map(NULL),
-      ssa_last_defs(NULL),
-      is_constant_v(NULL),
-      must_flush_constant_v(NULL),
-      constant_values(NULL),
-      reg_location(NULL),
-      promotion_map(NULL),
-      method_sreg(0),
-      num_reachable_blocks(0),
       num_dalvik_registers(0),
-      entry_block(NULL),
-      exit_block(NULL),
-      cur_block(NULL),
-      i_dom_list(NULL),
-      try_block_addr(NULL),
-      def_block_matrix(NULL),
-      temp_block_v(NULL),
-      temp_dalvik_register_v(NULL),
-      temp_ssa_register_v(NULL),
-      temp_ssa_block_id_v(NULL),
-      block_label_list(NULL),
+      insns(NULL),
       num_ins(0),
       num_outs(0),
       num_regs(0),
@@ -339,22 +323,18 @@
       num_fp_spills(0),
       num_compiler_temps(0),
       frame_size(0),
-      core_spill_mask(0U),
-      fp_spill_mask(0U),
-      attrs(0U),
-      current_dalvik_offset(0),
-      insns(NULL),
-      insns_size(0U),
-      disable_dataflow(false),
-      def_count(0),
+      core_spill_mask(0),
+      fp_spill_mask(0),
+      attributes(0),
       compiler_flip_match(false),
       arena_head(NULL),
       current_arena(NULL),
       num_arena_blocks(0),
       mstats(NULL),
       checkstats(NULL),
-      gen_bitcode(false),
-      llvm_compilation_unit(NULL),
+      mir_graph(NULL),
+      cg(NULL),
+      live_sreg(0),
       llvm_info(NULL),
       context(NULL),
       module(NULL),
@@ -365,14 +345,24 @@
       entry_bb(NULL),
       entryTarget_bb(NULL),
       temp_name(0),
-#ifndef NDEBUG
-      live_sreg(0),
-#endif
-      opcode_count(NULL),
-      cg(NULL) {}
-
-  int num_blocks;
-  GrowableList block_list;
+      first_lir_insn(NULL),
+      last_lir_insn(NULL),
+      literal_list(NULL),
+      method_literal_list(NULL),
+      code_literal_list(NULL),
+      data_offset(0),
+      total_size(0),
+      reg_pool(NULL),
+      reg_location(NULL),
+      promotion_map(NULL),
+      method_sreg(0),
+      block_label_list(NULL),
+      current_dalvik_offset(0)
+ {}
+  /*
+   * Fields needed/generated by common frontend and generally used throughout
+   * the compiler.
+  */
   CompilerDriver* compiler_driver;
   ClassLinker* class_linker;           // Linker to resolve fields and methods.
   const DexFile* dex_file;             // DexFile containing the method being compiled.
@@ -383,91 +373,21 @@
   uint32_t access_flags;               // compiling method's access flags.
   InvokeType invoke_type;              // compiling method's invocation type.
   const char* shorty;                  // compiling method's shorty.
-  LIR* first_lir_insn;
-  LIR* last_lir_insn;
-  LIR* literal_list;                   // Constants.
-  LIR* method_literal_list;            // Method literals requiring patching.
-  LIR* code_literal_list;              // Code literals requiring patching.
   uint32_t disable_opt;                // opt_control_vector flags.
   uint32_t enable_debug;               // debugControlVector flags.
-  int data_offset;                     // starting offset of literal pool.
-  int total_size;                      // header + code size.
-  AssemblerStatus assembler_status;    // Success or fix and retry.
-  int assembler_retries;
   std::vector<uint8_t> code_buffer;
-  /*
-   * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
-   * Native PC is on the return address of the safepointed operation.  Dex PC is for
-   * the instruction being executed at the safepoint.
-   */
-  std::vector<uint32_t> pc2dexMappingTable;
-  /*
-   * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
-   * immediately preceed the instruction.
-   */
-  std::vector<uint32_t> dex2pcMappingTable;
+  bool verbose;
   std::vector<uint32_t> combined_mapping_table;
   std::vector<uint32_t> core_vmap_table;
   std::vector<uint32_t> fp_vmap_table;
   std::vector<uint8_t> native_gc_map;
-  std::vector<BasicBlock*> extended_basic_blocks;
-  bool verbose;
-  bool has_loop;                       // Contains a loop.
-  bool has_invoke;                     // Contains an invoke instruction.
-  bool qd_mode;                        // Compile for code size/compile time.
-  RegisterPool* reg_pool;
+  bool gen_bitcode;
+  bool disable_dataflow;               // Skip dataflow analysis if possible
   InstructionSet instruction_set;
-  // Number of total regs used in the whole cu after SSA transformation .
-  int num_ssa_regs;
-  // Map SSA reg i to the base virtual register/subscript.
-  GrowableList* ssa_base_vregs;
-  GrowableList* ssa_subscripts;
-  GrowableList* ssa_strings;
 
-  // Map original Dalvik virtual reg i to the current SSA name.
-  int* vreg_to_ssa_map;            // length == method->registers_size
-  int* ssa_last_defs;              // length == method->registers_size
-  ArenaBitVector* is_constant_v;   // length == num_ssa_reg
-  ArenaBitVector* must_flush_constant_v;   // length == num_ssa_reg
-  int* constant_values;            // length == num_ssa_reg
-
-  // Use counts of ssa names.
-  GrowableList use_counts;         // Weighted by nesting depth
-  GrowableList raw_use_counts;     // Not weighted
-
-  // Optimization support.
-  GrowableList loop_headers;
-
-  // Map SSA names to location.
-  RegLocation* reg_location;
-
-  // Keep track of Dalvik v_reg to physical register mappings.
-  PromotionMap* promotion_map;
-
-  // SSA name for Method*.
-  int method_sreg;
-  RegLocation method_loc;          // Describes location of method*.
-
-  int num_reachable_blocks;
+  // CLEANUP: much of this info available elsewhere.  Go to the original source?
   int num_dalvik_registers;        // method->registers_size.
-  BasicBlock* entry_block;
-  BasicBlock* exit_block;
-  BasicBlock* cur_block;
-  GrowableList dfs_order;
-  GrowableList dfs_post_order;
-  GrowableList dom_post_order_traversal;
-  GrowableList throw_launchpads;
-  GrowableList suspend_launchpads;
-  GrowableList intrinsic_launchpads;
-  GrowableList compiler_temps;
-  int* i_dom_list;
-  ArenaBitVector* try_block_addr;
-  ArenaBitVector** def_block_matrix;    // num_dalvik_register x num_blocks.
-  ArenaBitVector* temp_block_v;
-  ArenaBitVector* temp_dalvik_register_v;
-  ArenaBitVector* temp_ssa_register_v;  // num_ssa_regs.
-  int* temp_ssa_block_id_v;             // working storage for Phi labels.
-  LIR* block_label_list;
+  const uint16_t* insns;
   /*
    * Frame layout details.
    * NOTE: for debug support it will be necessary to add a structure
@@ -483,27 +403,7 @@
   int frame_size;
   unsigned int core_spill_mask;
   unsigned int fp_spill_mask;
-  unsigned int attrs;
-  /*
-   * TODO: The code generation utilities don't have a built-in
-   * mechanism to propagate the original Dalvik opcode address to the
-   * associated generated instructions.  For the trace compiler, this wasn't
-   * necessary because the interpreter handled all throws and debugging
-   * requests.  For now we'll handle this by placing the Dalvik offset
-   * in the CompilationUnit struct before codegen for each instruction.
-   * The low-level LIR creation utilites will pull it from here.  Rework this.
-   */
-  int current_dalvik_offset;
-  GrowableList switch_tables;
-  GrowableList fill_array_data;
-  const uint16_t* insns;
-  uint32_t insns_size;
-  bool disable_dataflow; // Skip dataflow analysis if possible
-  SafeMap<unsigned int, BasicBlock*> block_map; // FindBlock lookup cache.
-  SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache.
-  SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache.
-  int def_count;         // Used to estimate number of SSA names.
-
+  unsigned int attributes;
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
   // Flips sense of compiler_method_match - apply flags if doesn't match.
@@ -513,10 +413,21 @@
   int num_arena_blocks;
   Memstats* mstats;
   Checkstats* checkstats;
-  bool gen_bitcode;
+  UniquePtr<MIRGraph> mir_graph;   // MIR container.
+  UniquePtr<Codegen> cg;           // Target-specific codegen.
+  /*
+   * Sanity checking for the register temp tracking.  The same ssa
+   * name should never be associated with one temp register per
+   * instruction compilation.
+   */
+  int live_sreg;
 
   // Fields for Portable
   llvm::LlvmCompilationUnit* llvm_compilation_unit;
+ /*
+  * Fields needed by GBC creation.  Candidates for moving to a new MIR to
+  * llvm bitcode class.
+  */
   LLVMInfo* llvm_info;
   std::string symbol;
   ::llvm::LLVMContext* context;
@@ -531,109 +442,62 @@
   std::string bitcode_filename;
   GrowableList llvm_values;
   int32_t temp_name;
-  SafeMap< ::llvm::BasicBlock*, LIR*> block_to_label_map; // llvm bb -> LIR label.
-  SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map; // block id -> llvm bb.
-  SafeMap< ::llvm::Value*, RegLocation> loc_map; // llvm Value to loc rec.
-  std::set< ::llvm::BasicBlock*> llvm_blocks;
-#ifndef NDEBUG
+  SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map;  // block id -> llvm bb.
+
+ /*
+  * Fields needed by the Quick backend.  Candidates for moving to a new
+  * QuickBackend class.
+  */
+  LIR* first_lir_insn;
+  LIR* last_lir_insn;
+  LIR* literal_list;                   // Constants.
+  LIR* method_literal_list;            // Method literals requiring patching.
+  LIR* code_literal_list;              // Code literals requiring patching.
+  int data_offset;                     // starting offset of literal pool.
+  int total_size;                      // header + code size.
+  RegisterPool* reg_pool;
+  // Map SSA names to location.
+  RegLocation* reg_location;
+  // Keep track of Dalvik v_reg to physical register mappings.
+  PromotionMap* promotion_map;
+  // SSA name for Method*.
+  int method_sreg;
+  RegLocation method_loc;          // Describes location of method*.
+  GrowableList throw_launchpads;
+  GrowableList suspend_launchpads;
+  GrowableList intrinsic_launchpads;
+  GrowableList compiler_temps;
+  LIR* block_label_list;
   /*
-   * Sanity checking for the register temp tracking.  The same ssa
-   * name should never be associated with one temp register per
-   * instruction compilation.
+   * TODO: The code generation utilities don't have a built-in
+   * mechanism to propagate the original Dalvik opcode address to the
+   * associated generated instructions.  For the trace compiler, this wasn't
+   * necessary because the interpreter handled all throws and debugging
+   * requests.  For now we'll handle this by placing the Dalvik offset
+   * in the CompilationUnit struct before codegen for each instruction.
+   * The low-level LIR creation utilites will pull it from here.  Rework this.
    */
-  int live_sreg;
-#endif
-  std::set<uint32_t> catches;
-  int* opcode_count;    // Count Dalvik opcodes for tuning.
-  UniquePtr<Codegen> cg;
+  int current_dalvik_offset;
+  GrowableList switch_tables;
+  GrowableList fill_array_data;
+  SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache.
+  SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache.
+  /*
+   * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
+   * Native PC is on the return address of the safepointed operation.  Dex PC is for
+   * the instruction being executed at the safepoint.
+   */
+  std::vector<uint32_t> pc2dexMappingTable;
+  /*
+   * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
+   * immediately preceed the instruction.
+   */
+  std::vector<uint32_t> dex2pcMappingTable;
 };
 
-struct SwitchTable {
-  int offset;
-  const uint16_t* table;      // Original dex table.
-  int vaddr;                  // Dalvik offset of switch opcode.
-  LIR* anchor;                // Reference instruction for relative offsets.
-  LIR** targets;              // Array of case targets.
-};
+// TODO: move this
+int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
 
-struct FillArrayData {
-  int offset;
-  const uint16_t* table;      // Original dex table.
-  int size;
-  int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
-};
-
-#define MAX_PATTERN_LEN 5
-
-struct CodePattern {
-  const Instruction::Code opcodes[MAX_PATTERN_LEN];
-  const SpecialCaseHandler handler_code;
-};
-
-static const CodePattern special_patterns[] = {
-  {{Instruction::RETURN_VOID}, kNullMethod},
-  {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
-  {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
-  {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
-  {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
-  {{Instruction::IGET, Instruction:: RETURN}, kIGet},
-  {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
-  {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
-  {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
-  {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
-  {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
-  {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
-  {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
-  {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
-  {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
-  {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
-  {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
-  {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
-  {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
-  {{Instruction::RETURN}, kIdentity},
-  {{Instruction::RETURN_OBJECT}, kIdentity},
-  {{Instruction::RETURN_WIDE}, kIdentity},
-};
-
-static inline bool IsConst(const CompilationUnit* cu, int32_t s_reg)
-{
-  return (IsBitSet(cu->is_constant_v, s_reg));
-}
-
-static inline bool IsConst(const CompilationUnit* cu, RegLocation loc)
-{
-  return (IsConst(cu, loc.orig_sreg));
-}
-
-static inline int32_t ConstantValue(const CompilationUnit* cu, RegLocation loc)
-{
-  DCHECK(IsConst(cu, loc));
-  return cu->constant_values[loc.orig_sreg];
-}
-
-static inline int32_t ConstantValue(const CompilationUnit* cu, int32_t s_reg)
-{
-  DCHECK(IsConst(cu, s_reg));
-  return cu->constant_values[s_reg];
-}
-
-static inline int64_t ConstantValueWide(const CompilationUnit* cu, RegLocation loc)
-{
-  DCHECK(IsConst(cu, loc));
-  return (static_cast<int64_t>(cu->constant_values[loc.orig_sreg + 1]) << 32) |
-      Low32Bits(static_cast<int64_t>(cu->constant_values[loc.orig_sreg]));
-}
-
-static inline bool IsConstantNullRef(const CompilationUnit* cu, RegLocation loc)
-{
-  return loc.ref && loc.is_const && (ConstantValue(cu, loc) == 0);
-}
-
-static inline bool MustFlushConstant(const CompilationUnit* cu, RegLocation loc)
-{
-  DCHECK(IsConst(cu, loc));
-  return IsBitSet(cu->must_flush_constant_v, loc.orig_sreg);
-}
 
 }  // namespace art
 
diff --git a/src/compiler/dex/compiler_utility.cc b/src/compiler/dex/compiler_utility.cc
index 9dc90ce..82a156d 100644
--- a/src/compiler/dex/compiler_utility.cc
+++ b/src/compiler/dex/compiler_utility.cc
@@ -203,6 +203,18 @@
 #endif
 }
 
+void ReallocGrowableList(CompilationUnit* cu, GrowableList* g_list, size_t new_length)
+{
+  if (new_length > g_list->num_allocated) {
+    uintptr_t *new_array =
+        static_cast<uintptr_t*>(NewMem(cu, sizeof(uintptr_t) * new_length, true,
+                                       kAllocGrowableList));
+    memcpy(new_array, g_list->elem_list, sizeof(uintptr_t) * g_list->num_allocated);
+    g_list->num_allocated = new_length;
+    g_list->elem_list = new_array;
+  }
+}
+
 /* Expand the capacity of a growable list */
 static void ExpandGrowableList(CompilationUnit* cu, GrowableList* g_list)
 {
@@ -288,7 +300,7 @@
   if (total > (10 * 1024 * 1024)) {
     LOG(INFO) << "MEMUSAGE: " << total << " : "
         << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "insns_size: " << cu->insns_size;
+    LOG(INFO) << "insns_size: " << cu->code_item->insns_size_in_code_units_;
     if (cu->disable_dataflow) {
         LOG(INFO) << " ** Dataflow disabled ** ";
     }
@@ -330,10 +342,8 @@
 
   LOG(INFO) << "Compiling " << PrettyMethod(cu->method_idx, *cu->dex_file);
   LOG(INFO) << cu->insns << " insns";
-  LOG(INFO) << cu->num_blocks << " blocks in total";
-  GrowableListIterator iterator;
-
-  GrowableListIteratorInit(&cu->block_list, &iterator);
+  LOG(INFO) << cu->mir_graph->GetNumBlocks() << " blocks in total";
+  GrowableListIterator iterator = cu->mir_graph->GetBasicBlockIterator();
 
   while (true) {
     bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
@@ -797,4 +807,171 @@
   new_lir->next->prev = new_lir;
 }
 
+/* Turn method name into a legal Linux file name */
+void ReplaceSpecialChars(std::string& str)
+{
+  static const struct { const char before; const char after; } match[] =
+      {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
+       {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
+  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
+    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
+  }
+}
+
+std::string GetSSAName(const CompilationUnit* cu, int ssa_reg)
+{
+  return StringPrintf("v%d_%d", cu->mir_graph->SRegToVReg(ssa_reg), cu->mir_graph->GetSSASubscript(ssa_reg));
+}
+
+// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
+std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only)
+{
+  if (cu->reg_location == NULL) {
+    // Pre-SSA - just use the standard name
+    return GetSSAName(cu, ssa_reg);
+  }
+  if (cu->mir_graph->IsConst(cu->reg_location[ssa_reg])) {
+    if (!singles_only && cu->reg_location[ssa_reg].wide) {
+      return StringPrintf("v%d_%d#0x%llx", cu->mir_graph->SRegToVReg(ssa_reg),
+                          cu->mir_graph->GetSSASubscript(ssa_reg),
+                          cu->mir_graph->ConstantValueWide(cu->reg_location[ssa_reg]));
+    } else {
+      return StringPrintf("v%d_%d#0x%x", cu->mir_graph->SRegToVReg(ssa_reg),
+                          cu->mir_graph->GetSSASubscript(ssa_reg),
+                          cu->mir_graph->ConstantValue(cu->reg_location[ssa_reg]));
+    }
+  } else {
+    return StringPrintf("v%d_%d", cu->mir_graph->SRegToVReg(ssa_reg),
+                        cu->mir_graph->GetSSASubscript(ssa_reg));
+  }
+}
+
+char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir)
+{
+  DecodedInstruction insn = mir->dalvikInsn;
+  std::string str;
+  int flags = 0;
+  int opcode = insn.opcode;
+  char* ret;
+  bool nop = false;
+  SSARepresentation* ssa_rep = mir->ssa_rep;
+  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
+  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+
+  // Handle special cases.
+  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
+    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
+    str.append(": ");
+    // Recover the original Dex instruction
+    insn = mir->meta.throw_insn->dalvikInsn;
+    ssa_rep = mir->meta.throw_insn->ssa_rep;
+    defs = ssa_rep->num_defs;
+    uses = ssa_rep->num_uses;
+    opcode = insn.opcode;
+  } else if (opcode == kMirOpNop) {
+    str.append("[");
+    insn.opcode = mir->meta.original_opcode;
+    opcode = mir->meta.original_opcode;
+    nop = true;
+  }
+
+  if (opcode >= kMirOpFirst) {
+    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
+  } else {
+    dalvik_format = Instruction::FormatOf(insn.opcode);
+    flags = Instruction::FlagsOf(insn.opcode);
+    str.append(Instruction::Name(insn.opcode));
+  }
+
+  if (opcode == kMirOpPhi) {
+    int* incoming = reinterpret_cast<int*>(insn.vB);
+    str.append(StringPrintf(" %s = (%s",
+               GetSSANameWithConst(cu, ssa_rep->defs[0], true).c_str(),
+               GetSSANameWithConst(cu, ssa_rep->uses[0], true).c_str()));
+    str.append(StringPrintf(":%d",incoming[0]));
+    int i;
+    for (i = 1; i < uses; i++) {
+      str.append(StringPrintf(", %s:%d",
+                              GetSSANameWithConst(cu, ssa_rep->uses[i], true).c_str(),
+                              incoming[i]));
+    }
+    str.append(")");
+  } else if ((flags & Instruction::kBranch) != 0) {
+    // For branches, decode the instructions to print out the branch targets.
+    int offset = 0;
+    switch (dalvik_format) {
+      case Instruction::k21t:
+        str.append(StringPrintf(" %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str()));
+        offset = insn.vB;
+        break;
+      case Instruction::k22t:
+        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str(),
+                   GetSSANameWithConst(cu, ssa_rep->uses[1], false).c_str()));
+        offset = insn.vC;
+        break;
+      case Instruction::k10t:
+      case Instruction::k20t:
+      case Instruction::k30t:
+        offset = insn.vA;
+        break;
+      default:
+        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+    }
+    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
+                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+  } else {
+    // For invokes-style formats, treat wide regs as a pair of singles
+    bool show_singles = ((dalvik_format == Instruction::k35c) ||
+                         (dalvik_format == Instruction::k3rc));
+    if (defs != 0) {
+      str.append(StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->defs[0], false).c_str()));
+      if (uses != 0) {
+        str.append(", ");
+      }
+    }
+    for (int i = 0; i < uses; i++) {
+      str.append(
+          StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->uses[i], show_singles).c_str()));
+      if (!show_singles && (cu->reg_location != NULL) && cu->reg_location[i].wide) {
+        // For the listing, skip the high sreg.
+        i++;
+      }
+      if (i != (uses -1)) {
+        str.append(",");
+      }
+    }
+    switch (dalvik_format) {
+      case Instruction::k11n: // Add one immediate from vB
+      case Instruction::k21s:
+      case Instruction::k31i:
+      case Instruction::k21h:
+        str.append(StringPrintf(", #%d", insn.vB));
+        break;
+      case Instruction::k51l: // Add one wide immediate
+        str.append(StringPrintf(", #%lld", insn.vB_wide));
+        break;
+      case Instruction::k21c: // One register, one string/type/method index
+      case Instruction::k31c:
+        str.append(StringPrintf(", index #%d", insn.vB));
+        break;
+      case Instruction::k22c: // Two registers, one string/type/method index
+        str.append(StringPrintf(", index #%d", insn.vC));
+        break;
+      case Instruction::k22s: // Add one immediate from vC
+      case Instruction::k22b:
+        str.append(StringPrintf(", #%d", insn.vC));
+        break;
+      default:
+        ; // Nothing left to print
+      }
+  }
+  if (nop) {
+    str.append("]--optimized away");
+  }
+  int length = str.length() + 1;
+  ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
+  strncpy(ret, str.c_str(), length);
+  return ret;
+}
 }  // namespace art
diff --git a/src/compiler/dex/compiler_utility.h b/src/compiler/dex/compiler_utility.h
index 582d32d..4201f5d 100644
--- a/src/compiler/dex/compiler_utility.h
+++ b/src/compiler/dex/compiler_utility.h
@@ -153,6 +153,7 @@
 
 void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
                               size_t init_length, oat_list_kind kind = kListMisc);
+void ReallocGrowableList(CompilationUnit* cu, GrowableList* g_list, size_t new_length);
 void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list, uintptr_t elem);
 void DeleteGrowableList(GrowableList* g_list, uintptr_t elem);
 void GrowableListIteratorInit(GrowableList* g_list, GrowableListIterator* iterator);
@@ -191,6 +192,10 @@
 void AppendLIR(CompilationUnit *cu, LIR* lir);
 void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
 void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
+void ReplaceSpecialChars(std::string& str);
+char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir);
+std::string GetSSAName(const CompilationUnit* cu, int ssa_reg);
+std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only);
 
 }  // namespace art
 
diff --git a/src/compiler/dex/dataflow.cc b/src/compiler/dex/dataflow.cc
deleted file mode 100644
index f0f177a..0000000
--- a/src/compiler/dex/dataflow.cc
+++ /dev/null
@@ -1,2571 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_internals.h"
-#include "dataflow.h"
-#include "bb_opt.h"
-
-namespace art {
-
-/*
- * Main table containing data flow attributes for each bytecode. The
- * first kNumPackedOpcodes entries are for Dalvik bytecode
- * instructions, where extended opcode at the MIR level are appended
- * afterwards.
- *
- * TODO - many optimization flags are incomplete - they will only limit the
- * scope of optimizations but will not cause mis-optimizations.
- */
-const int oat_data_flow_attributes[kMirOpLast] = {
-  // 00 NOP
-  DF_NOP,
-
-  // 01 MOVE vA, vB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 02 MOVE_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 03 MOVE_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 04 MOVE_WIDE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 06 MOVE_WIDE_16 vAAAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 07 MOVE_OBJECT vA, vB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 0A MOVE_RESULT vAA
-  DF_DA,
-
-  // 0B MOVE_RESULT_WIDE vAA
-  DF_DA | DF_A_WIDE,
-
-  // 0C MOVE_RESULT_OBJECT vAA
-  DF_DA | DF_REF_A,
-
-  // 0D MOVE_EXCEPTION vAA
-  DF_DA | DF_REF_A,
-
-  // 0E RETURN_VOID
-  DF_NOP,
-
-  // 0F RETURN vAA
-  DF_UA,
-
-  // 10 RETURN_WIDE vAA
-  DF_UA | DF_A_WIDE,
-
-  // 11 RETURN_OBJECT vAA
-  DF_UA | DF_REF_A,
-
-  // 12 CONST_4 vA, #+B
-  DF_DA | DF_SETS_CONST,
-
-  // 13 CONST_16 vAA, #+BBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 14 CONST vAA, #+BBBBBBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 15 CONST_HIGH16 VAA, #+BBBB0000
-  DF_DA | DF_SETS_CONST,
-
-  // 16 CONST_WIDE_16 vAA, #+BBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 1A CONST_STRING vAA, string@BBBB
-  DF_DA | DF_REF_A,
-
-  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
-  DF_DA | DF_REF_A,
-
-  // 1C CONST_CLASS vAA, type@BBBB
-  DF_DA | DF_REF_A,
-
-  // 1D MONITOR_ENTER vAA
-  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
-
-  // 1E MONITOR_EXIT vAA
-  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
-
-  // 1F CHK_CAST vAA, type@BBBB
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 20 INSTANCE_OF vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
-
-  // 21 ARRAY_LENGTH vA, vB
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
-
-  // 22 NEW_INSTANCE vAA, type@BBBB
-  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
-
-  // 23 NEW_ARRAY vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
-
-  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
-
-  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
-  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
-
-  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 27 THROW vAA
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 28 GOTO
-  DF_NOP,
-
-  // 29 GOTO_16
-  DF_NOP,
-
-  // 2A GOTO_32
-  DF_NOP,
-
-  // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  DF_UA,
-
-  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  DF_UA,
-
-  // 2D CMPL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2E CMPG_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2F CMPL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 30 CMPG_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 31 CMP_LONG vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 32 IF_EQ vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 33 IF_NE vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 34 IF_LT vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 35 IF_GE vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 36 IF_GT vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 37 IF_LE vA, vB, +CCCC
-  DF_UA | DF_UB,
-
-  // 38 IF_EQZ vAA, +BBBB
-  DF_UA,
-
-  // 39 IF_NEZ vAA, +BBBB
-  DF_UA,
-
-  // 3A IF_LTZ vAA, +BBBB
-  DF_UA,
-
-  // 3B IF_GEZ vAA, +BBBB
-  DF_UA,
-
-  // 3C IF_GTZ vAA, +BBBB
-  DF_UA,
-
-  // 3D IF_LEZ vAA, +BBBB
-  DF_UA,
-
-  // 3E UNUSED_3E
-  DF_NOP,
-
-  // 3F UNUSED_3F
-  DF_NOP,
-
-  // 40 UNUSED_40
-  DF_NOP,
-
-  // 41 UNUSED_41
-  DF_NOP,
-
-  // 42 UNUSED_42
-  DF_NOP,
-
-  // 43 UNUSED_43
-  DF_NOP,
-
-  // 44 AGET vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 45 AGET_WIDE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 46 AGET_OBJECT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
-
-  // 47 AGET_BOOLEAN vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 48 AGET_BYTE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 49 AGET_CHAR vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 4A AGET_SHORT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
-  // 4B APUT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
-  // 4C APUT_WIDE vAA, vBB, vCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
-
-  // 4D APUT_OBJECT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
-
-  // 4E APUT_BOOLEAN vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
-  // 4F APUT_BYTE vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
-  // 50 APUT_CHAR vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
-  // 51 APUT_SHORT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
-  // 52 IGET vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 53 IGET_WIDE vA, vB, field@CCCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 54 IGET_OBJECT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
-
-  // 55 IGET_BOOLEAN vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 56 IGET_BYTE vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 57 IGET_CHAR vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 58 IGET_SHORT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // 59 IPUT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // 5A IPUT_WIDE vA, vB, field@CCCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
-
-  // 5B IPUT_OBJECT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
-
-  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // 5D IPUT_BYTE vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // 5E IPUT_CHAR vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // 5F IPUT_SHORT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // 60 SGET vAA, field@BBBB
-  DF_DA | DF_UMS,
-
-  // 61 SGET_WIDE vAA, field@BBBB
-  DF_DA | DF_A_WIDE | DF_UMS,
-
-  // 62 SGET_OBJECT vAA, field@BBBB
-  DF_DA | DF_REF_A | DF_UMS,
-
-  // 63 SGET_BOOLEAN vAA, field@BBBB
-  DF_DA | DF_UMS,
-
-  // 64 SGET_BYTE vAA, field@BBBB
-  DF_DA | DF_UMS,
-
-  // 65 SGET_CHAR vAA, field@BBBB
-  DF_DA | DF_UMS,
-
-  // 66 SGET_SHORT vAA, field@BBBB
-  DF_DA | DF_UMS,
-
-  // 67 SPUT vAA, field@BBBB
-  DF_UA | DF_UMS,
-
-  // 68 SPUT_WIDE vAA, field@BBBB
-  DF_UA | DF_A_WIDE | DF_UMS,
-
-  // 69 SPUT_OBJECT vAA, field@BBBB
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 6A SPUT_BOOLEAN vAA, field@BBBB
-  DF_UA | DF_UMS,
-
-  // 6B SPUT_BYTE vAA, field@BBBB
-  DF_UA | DF_UMS,
-
-  // 6C SPUT_CHAR vAA, field@BBBB
-  DF_UA | DF_UMS,
-
-  // 6D SPUT_SHORT vAA, field@BBBB
-  DF_UA | DF_UMS,
-
-  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_UMS,
-
-  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_UMS,
-
-  // 73 UNUSED_73
-  DF_NOP,
-
-  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_UMS,
-
-  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_UMS,
-
-  // 79 UNUSED_79
-  DF_NOP,
-
-  // 7A UNUSED_7A
-  DF_NOP,
-
-  // 7B NEG_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7C NOT_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7D NEG_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7E NOT_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7F NEG_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 80 NEG_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 81 INT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 82 INT_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 83 INT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 84 LONG_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 85 LONG_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 86 LONG_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 87 FLOAT_TO_INT vA, vB
-  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 88 FLOAT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 89 FLOAT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 8A DOUBLE_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8B DOUBLE_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8C DOUBLE_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 8D INT_TO_BYTE vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8E INT_TO_CHAR vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8F INT_TO_SHORT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 90 ADD_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 91 SUB_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 92 MUL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 93 DIV_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 94 REM_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 95 AND_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 96 OR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 97 XOR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 98 SHL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 99 SHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9A USHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9B ADD_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9C SUB_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9D MUL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9E DIV_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9F REM_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A0 AND_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A1 OR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A2 XOR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A3 SHL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A4 SHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A5 USHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A6 ADD_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A7 SUB_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A8 MUL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A9 DIV_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AA REM_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AB ADD_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AC SUB_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AD MUL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AE DIV_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AF REM_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // B0 ADD_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B1 SUB_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B2 MUL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B3 DIV_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B4 REM_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B5 AND_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B6 OR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B7 XOR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B8 SHL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B9 SHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BA USHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BB ADD_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BC SUB_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BD MUL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BE DIV_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BF REM_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C0 AND_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C1 OR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C2 XOR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C3 SHL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C4 SHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C5 USHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C6 ADD_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C7 SUB_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C8 MUL_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C9 DIV_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CA REM_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CB ADD_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CC SUB_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CD MUL_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CE DIV_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CF REM_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D1 RSUB_INT vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D4 REM_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D5 AND_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D6 OR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DA MUL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DB DIV_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DC REM_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DD AND_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DE OR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DF XOR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E3 IGET_VOLATILE
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // E4 IPUT_VOLATILE
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
-  // E5 SGET_VOLATILE
-  DF_DA | DF_UMS,
-
-  // E6 SPUT_VOLATILE
-  DF_UA | DF_UMS,
-
-  // E7 IGET_OBJECT_VOLATILE
-  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
-
-  // E8 IGET_WIDE_VOLATILE
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
-  // E9 IPUT_WIDE_VOLATILE
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
-
-  // EA SGET_WIDE_VOLATILE
-  DF_DA | DF_A_WIDE | DF_UMS,
-
-  // EB SPUT_WIDE_VOLATILE
-  DF_UA | DF_A_WIDE | DF_UMS,
-
-  // EC BREAKPOINT
-  DF_NOP,
-
-  // ED THROW_VERIFICATION_ERROR
-  DF_NOP | DF_UMS,
-
-  // EE EXECUTE_INLINE
-  DF_FORMAT_35C,
-
-  // EF EXECUTE_INLINE_RANGE
-  DF_FORMAT_3RC,
-
-  // F0 INVOKE_OBJECT_INIT_RANGE
-  DF_NOP | DF_NULL_CHK_0,
-
-  // F1 RETURN_VOID_BARRIER
-  DF_NOP,
-
-  // F2 IGET_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_0,
-
-  // F3 IGET_WIDE_QUICK
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
-
-  // F4 IGET_OBJECT_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_0,
-
-  // F5 IPUT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_1,
-
-  // F6 IPUT_WIDE_QUICK
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
-
-  // F7 IPUT_OBJECT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_1,
-
-  // F8 INVOKE_VIRTUAL_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // F9 INVOKE_VIRTUAL_QUICK_RANGE
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FA INVOKE_SUPER_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FB INVOKE_SUPER_QUICK_RANGE
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FC IPUT_OBJECT_VOLATILE
-  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
-
-  // FD SGET_OBJECT_VOLATILE
-  DF_DA | DF_REF_A | DF_UMS,
-
-  // FE SPUT_OBJECT_VOLATILE
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // FF UNUSED_FF
-  DF_NOP,
-
-  // Beginning of extended MIR opcodes
-  // 100 MIR_PHI
-  DF_DA | DF_NULL_TRANSFER_N,
-
-  // 101 MIR_COPY
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 102 MIR_FUSED_CMPL_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 103 MIR_FUSED_CMPG_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 104 MIR_FUSED_CMPL_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 105 MIR_FUSED_CMPG_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 106 MIR_FUSED_CMP_LONG
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 107 MIR_NOP
-  DF_NOP,
-
-  // 108 MIR_NULL_CHECK
-  0,
-
-  // 109 MIR_RANGE_CHECK
-  0,
-
-  // 110 MIR_DIV_ZERO_CHECK
-  0,
-
-  // 111 MIR_CHECK
-  0,
-
-  // 112 MIR_CHECKPART2
-  0,
-
-  // 113 MIR_SELECT
-  DF_DA | DF_UB,
-};
-
-/* Return the base virtual register for a SSA name */
-int SRegToVReg(const CompilationUnit* cu, int ssa_reg)
-{
-  DCHECK_LT(ssa_reg, static_cast<int>(cu->ssa_base_vregs->num_used));
-  return GET_ELEM_N(cu->ssa_base_vregs, int, ssa_reg);
-}
-
-int SRegToSubscript(const CompilationUnit* cu, int ssa_reg)
-{
-  DCHECK(ssa_reg < static_cast<int>(cu->ssa_subscripts->num_used));
-  return GET_ELEM_N(cu->ssa_subscripts, int, ssa_reg);
-}
-
-static int GetSSAUseCount(CompilationUnit* cu, int s_reg)
-{
-  DCHECK(s_reg < static_cast<int>(cu->raw_use_counts.num_used));
-  return cu->raw_use_counts.elem_list[s_reg];
-}
-
-static std::string GetSSAName(const CompilationUnit* cu, int ssa_reg)
-{
-  return StringPrintf("v%d_%d", SRegToVReg(cu, ssa_reg), SRegToSubscript(cu, ssa_reg));
-}
-
-// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
-static std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only)
-{
-  if (cu->reg_location == NULL) {
-    // Pre-SSA - just use the standard name
-    return GetSSAName(cu, ssa_reg);
-  }
-  if (IsConst(cu, cu->reg_location[ssa_reg])) {
-    if (!singles_only && cu->reg_location[ssa_reg].wide) {
-      return StringPrintf("v%d_%d#0x%llx", SRegToVReg(cu, ssa_reg),
-                          SRegToSubscript(cu, ssa_reg),
-                          ConstantValueWide(cu, cu->reg_location[ssa_reg]));
-    } else {
-      return StringPrintf("v%d_%d#0x%x", SRegToVReg(cu, ssa_reg),
-                          SRegToSubscript(cu, ssa_reg),
-                          ConstantValue(cu, cu->reg_location[ssa_reg]));
-    }
-  } else {
-    return StringPrintf("v%d_%d", SRegToVReg(cu, ssa_reg), SRegToSubscript(cu, ssa_reg));
-  }
-}
-
-
-char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir)
-{
-  DecodedInstruction insn = mir->dalvikInsn;
-  std::string str;
-  int flags = 0;
-  int opcode = insn.opcode;
-  char* ret;
-  bool nop = false;
-  SSARepresentation* ssa_rep = mir->ssa_rep;
-  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
-  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
-
-  // Handle special cases.
-  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
-    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
-    str.append(": ");
-    // Recover the original Dex instruction
-    insn = mir->meta.throw_insn->dalvikInsn;
-    ssa_rep = mir->meta.throw_insn->ssa_rep;
-    defs = ssa_rep->num_defs;
-    uses = ssa_rep->num_uses;
-    opcode = insn.opcode;
-  } else if (opcode == kMirOpNop) {
-    str.append("[");
-    insn.opcode = mir->meta.original_opcode;
-    opcode = mir->meta.original_opcode;
-    nop = true;
-  }
-
-  if (opcode >= kMirOpFirst) {
-    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
-  } else {
-    dalvik_format = Instruction::FormatOf(insn.opcode);
-    flags = Instruction::FlagsOf(insn.opcode);
-    str.append(Instruction::Name(insn.opcode));
-  }
-
-  if (opcode == kMirOpPhi) {
-    int* incoming = reinterpret_cast<int*>(insn.vB);
-    str.append(StringPrintf(" %s = (%s",
-               GetSSANameWithConst(cu, ssa_rep->defs[0], true).c_str(),
-               GetSSANameWithConst(cu, ssa_rep->uses[0], true).c_str()));
-    str.append(StringPrintf(":%d",incoming[0]));
-    int i;
-    for (i = 1; i < uses; i++) {
-      str.append(StringPrintf(", %s:%d",
-                              GetSSANameWithConst(cu, ssa_rep->uses[i], true).c_str(),
-                              incoming[i]));
-    }
-    str.append(")");
-  } else if (flags & Instruction::kBranch) {
-    // For branches, decode the instructions to print out the branch targets.
-    int offset = 0;
-    switch (dalvik_format) {
-      case Instruction::k21t:
-        str.append(StringPrintf(" %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str()));
-        offset = insn.vB;
-        break;
-      case Instruction::k22t:
-        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str(),
-                   GetSSANameWithConst(cu, ssa_rep->uses[1], false).c_str()));
-        offset = insn.vC;
-        break;
-      case Instruction::k10t:
-      case Instruction::k20t:
-      case Instruction::k30t:
-        offset = insn.vA;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
-    }
-    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
-                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
-  } else {
-    // For invokes-style formats, treat wide regs as a pair of singles
-    bool show_singles = ((dalvik_format == Instruction::k35c) ||
-                         (dalvik_format == Instruction::k3rc));
-    if (defs != 0) {
-      str.append(StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->defs[0], false).c_str()));
-      if (uses != 0) {
-        str.append(", ");
-      }
-    }
-    for (int i = 0; i < uses; i++) {
-      str.append(
-          StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->uses[i], show_singles).c_str()));
-      if (!show_singles && (cu->reg_location != NULL) && cu->reg_location[i].wide) {
-        // For the listing, skip the high sreg.
-        i++;
-      }
-      if (i != (uses -1)) {
-        str.append(",");
-      }
-    }
-    switch (dalvik_format) {
-      case Instruction::k11n: // Add one immediate from vB
-      case Instruction::k21s:
-      case Instruction::k31i:
-      case Instruction::k21h:
-        str.append(StringPrintf(", #%d", insn.vB));
-        break;
-      case Instruction::k51l: // Add one wide immediate
-        str.append(StringPrintf(", #%lld", insn.vB_wide));
-        break;
-      case Instruction::k21c: // One register, one string/type/method index
-      case Instruction::k31c:
-        str.append(StringPrintf(", index #%d", insn.vB));
-        break;
-      case Instruction::k22c: // Two registers, one string/type/method index
-        str.append(StringPrintf(", index #%d", insn.vC));
-        break;
-      case Instruction::k22s: // Add one immediate from vC
-      case Instruction::k22b:
-        str.append(StringPrintf(", #%d", insn.vC));
-        break;
-      default:
-        ; // Nothing left to print
-      }
-  }
-  if (nop) {
-    str.append("]--optimized away");
-  }
-  int length = str.length() + 1;
-  ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
-  strncpy(ret, str.c_str(), length);
-  return ret;
-}
-
-/* Any register that is used before being defined is considered live-in */
-static void HandleLiveInUse(CompilationUnit* cu, ArenaBitVector* use_v, ArenaBitVector* def_v,
-                            ArenaBitVector* live_in_v, int dalvik_reg_id)
-{
-  SetBit(cu, use_v, dalvik_reg_id);
-  if (!IsBitSet(def_v, dalvik_reg_id)) {
-    SetBit(cu, live_in_v, dalvik_reg_id);
-  }
-}
-
-/* Mark a reg as being defined */
-static void HandleDef(CompilationUnit* cu, ArenaBitVector* def_v, int dalvik_reg_id)
-{
-  SetBit(cu, def_v, dalvik_reg_id);
-}
-
-/*
- * Find out live-in variables for natural loops. Variables that are live-in in
- * the main loop body are considered to be defined in the entry block.
- */
-bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb)
-{
-  MIR* mir;
-  ArenaBitVector *use_v, *def_v, *live_in_v;
-
-  if (bb->data_flow_info == NULL) return false;
-
-  use_v = bb->data_flow_info->use_v =
-      AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapUse);
-  def_v = bb->data_flow_info->def_v =
-      AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapDef);
-  live_in_v = bb->data_flow_info->live_in_v =
-      AllocBitVector(cu, cu->num_dalvik_registers, false,
-                        kBitMapLiveIn);
-
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-    DecodedInstruction *d_insn = &mir->dalvikInsn;
-
-    if (df_attributes & DF_HAS_USES) {
-      if (df_attributes & DF_UA) {
-        HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA);
-        if (df_attributes & DF_A_WIDE) {
-          HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA+1);
-        }
-      }
-      if (df_attributes & DF_UB) {
-        HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB);
-        if (df_attributes & DF_B_WIDE) {
-          HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB+1);
-        }
-      }
-      if (df_attributes & DF_UC) {
-        HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC);
-        if (df_attributes & DF_C_WIDE) {
-          HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+1);
-        }
-      }
-    }
-    if (df_attributes & DF_FORMAT_35C) {
-      for (unsigned int i = 0; i < d_insn->vA; i++) {
-        HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->arg[i]);
-      }
-    }
-    if (df_attributes & DF_FORMAT_3RC) {
-      for (unsigned int i = 0; i < d_insn->vA; i++) {
-        HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+i);
-      }
-    }
-    if (df_attributes & DF_HAS_DEFS) {
-      HandleDef(cu, def_v, d_insn->vA);
-      if (df_attributes & DF_A_WIDE) {
-        HandleDef(cu, def_v, d_insn->vA+1);
-      }
-    }
-  }
-  return true;
-}
-
-static int AddNewSReg(CompilationUnit* cu, int v_reg)
-{
-  // Compiler temps always have a subscript of 0
-  int subscript = (v_reg < 0) ? 0 : ++cu->ssa_last_defs[v_reg];
-  int ssa_reg = cu->num_ssa_regs++;
-  InsertGrowableList(cu, cu->ssa_base_vregs, v_reg);
-  InsertGrowableList(cu, cu->ssa_subscripts, subscript);
-  std::string ssa_name = GetSSAName(cu, ssa_reg);
-  char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, false, kAllocDFInfo));
-  strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
-  InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
-  DCHECK_EQ(cu->ssa_base_vregs->num_used, cu->ssa_subscripts->num_used);
-  return ssa_reg;
-}
-
-/* Find out the latest SSA register for a given Dalvik register */
-static void HandleSSAUse(CompilationUnit* cu, int* uses, int dalvik_reg, int reg_index)
-{
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
-  uses[reg_index] = cu->vreg_to_ssa_map[dalvik_reg];
-}
-
-/* Setup a new SSA register for a given Dalvik register */
-static void HandleSSADef(CompilationUnit* cu, int* defs, int dalvik_reg, int reg_index)
-{
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
-  int ssa_reg = AddNewSReg(cu, dalvik_reg);
-  cu->vreg_to_ssa_map[dalvik_reg] = ssa_reg;
-  defs[reg_index] = ssa_reg;
-}
-
-/* Look up new SSA names for format_35c instructions */
-static void DataFlowSSAFormat35C(CompilationUnit* cu, MIR* mir)
-{
-  DecodedInstruction *d_insn = &mir->dalvikInsn;
-  int num_uses = d_insn->vA;
-  int i;
-
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
-                                                 kAllocDFInfo));
-
-  for (i = 0; i < num_uses; i++) {
-    HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->arg[i], i);
-  }
-}
-
-/* Look up new SSA names for format_3rc instructions */
-static void DataFlowSSAFormat3RC(CompilationUnit* cu, MIR* mir)
-{
-  DecodedInstruction *d_insn = &mir->dalvikInsn;
-  int num_uses = d_insn->vA;
-  int i;
-
-  mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
-  // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
-                                                 kAllocDFInfo));
-
-  for (i = 0; i < num_uses; i++) {
-    HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+i, i);
-  }
-}
-
-/* Entry function to convert a block into SSA representation */
-bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb)
-{
-  MIR* mir;
-
-  if (bb->data_flow_info == NULL) return false;
-
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu, sizeof(SSARepresentation),
-                                                                 true, kAllocDFInfo));
-
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-
-      // If not a pseudo-op, note non-leaf or can throw
-    if (static_cast<int>(mir->dalvikInsn.opcode) <
-        static_cast<int>(kNumPackedOpcodes)) {
-      int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
-
-      if (flags & Instruction::kThrow) {
-        cu->attrs &= ~METHOD_IS_THROW_FREE;
-      }
-
-      if (flags & Instruction::kInvoke) {
-        cu->attrs &= ~METHOD_IS_LEAF;
-      }
-    }
-
-    int num_uses = 0;
-
-    if (df_attributes & DF_FORMAT_35C) {
-      DataFlowSSAFormat35C(cu, mir);
-      continue;
-    }
-
-    if (df_attributes & DF_FORMAT_3RC) {
-      DataFlowSSAFormat3RC(cu, mir);
-      continue;
-    }
-
-    if (df_attributes & DF_HAS_USES) {
-      if (df_attributes & DF_UA) {
-        num_uses++;
-        if (df_attributes & DF_A_WIDE) {
-          num_uses ++;
-        }
-      }
-      if (df_attributes & DF_UB) {
-        num_uses++;
-        if (df_attributes & DF_B_WIDE) {
-          num_uses ++;
-        }
-      }
-      if (df_attributes & DF_UC) {
-        num_uses++;
-        if (df_attributes & DF_C_WIDE) {
-          num_uses ++;
-        }
-      }
-    }
-
-    if (num_uses) {
-      mir->ssa_rep->num_uses = num_uses;
-      mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false,
-                                                   kAllocDFInfo));
-      mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, false,
-                                                     kAllocDFInfo));
-    }
-
-    int num_defs = 0;
-
-    if (df_attributes & DF_HAS_DEFS) {
-      num_defs++;
-      if (df_attributes & DF_A_WIDE) {
-        num_defs++;
-      }
-    }
-
-    if (num_defs) {
-      mir->ssa_rep->num_defs = num_defs;
-      mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * num_defs, false,
-                                                   kAllocDFInfo));
-      mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_defs, false,
-                                                     kAllocDFInfo));
-    }
-
-    DecodedInstruction *d_insn = &mir->dalvikInsn;
-
-    if (df_attributes & DF_HAS_USES) {
-      num_uses = 0;
-      if (df_attributes & DF_UA) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
-        HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA, num_uses++);
-        if (df_attributes & DF_A_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
-          HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
-        }
-      }
-      if (df_attributes & DF_UB) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
-        HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB, num_uses++);
-        if (df_attributes & DF_B_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
-          HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
-        }
-      }
-      if (df_attributes & DF_UC) {
-        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
-        HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC, num_uses++);
-        if (df_attributes & DF_C_WIDE) {
-          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
-          HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
-        }
-      }
-    }
-    if (df_attributes & DF_HAS_DEFS) {
-      mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
-      HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA, 0);
-      if (df_attributes & DF_A_WIDE) {
-        mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
-        HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA+1, 1);
-      }
-    }
-  }
-
-  if (!cu->disable_dataflow) {
-    /*
-     * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
-     * input to PHI nodes can be derived from the snapshot of all
-     * predecessor blocks.
-     */
-    bb->data_flow_info->vreg_to_ssa_map =
-        static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_dalvik_registers, false,
-                                 kAllocDFInfo));
-
-    memcpy(bb->data_flow_info->vreg_to_ssa_map, cu->vreg_to_ssa_map,
-           sizeof(int) * cu->num_dalvik_registers);
-  }
-  return true;
-}
-
-/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
-static void SetConstant(CompilationUnit* cu, int32_t ssa_reg, int value)
-{
-  SetBit(cu, cu->is_constant_v, ssa_reg);
-  cu->constant_values[ssa_reg] = value;
-}
-
-static void SetConstantWide(CompilationUnit* cu, int ssa_reg, int64_t value)
-{
-  SetBit(cu, cu->is_constant_v, ssa_reg);
-  cu->constant_values[ssa_reg] = Low32Bits(value);
-  cu->constant_values[ssa_reg + 1] = High32Bits(value);
-}
-
-bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb)
-{
-  MIR* mir;
-  ArenaBitVector *is_constant_v = cu->is_constant_v;
-
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-
-    DecodedInstruction *d_insn = &mir->dalvikInsn;
-
-    if (!(df_attributes & DF_HAS_DEFS)) continue;
-
-    /* Handle instructions that set up constants directly */
-    if (df_attributes & DF_SETS_CONST) {
-      if (df_attributes & DF_DA) {
-        int32_t vB = static_cast<int32_t>(d_insn->vB);
-        switch (d_insn->opcode) {
-          case Instruction::CONST_4:
-          case Instruction::CONST_16:
-          case Instruction::CONST:
-            SetConstant(cu, mir->ssa_rep->defs[0], vB);
-            break;
-          case Instruction::CONST_HIGH16:
-            SetConstant(cu, mir->ssa_rep->defs[0], vB << 16);
-            break;
-          case Instruction::CONST_WIDE_16:
-          case Instruction::CONST_WIDE_32:
-            SetConstantWide(cu, mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
-            break;
-          case Instruction::CONST_WIDE:
-            SetConstantWide(cu, mir->ssa_rep->defs[0],d_insn->vB_wide);
-            break;
-          case Instruction::CONST_WIDE_HIGH16:
-            SetConstantWide(cu, mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
-            break;
-          default:
-            break;
-        }
-      }
-      /* Handle instructions that set up constants directly */
-    } else if (df_attributes & DF_IS_MOVE) {
-      int i;
-
-      for (i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (!IsBitSet(is_constant_v, mir->ssa_rep->uses[i])) break;
-      }
-      /* Move a register holding a constant to another register */
-      if (i == mir->ssa_rep->num_uses) {
-        SetConstant(cu, mir->ssa_rep->defs[0],
-                    cu->constant_values[mir->ssa_rep->uses[0]]);
-        if (df_attributes & DF_A_WIDE) {
-          SetConstant(cu, mir->ssa_rep->defs[1],
-                      cu->constant_values[mir->ssa_rep->uses[1]]);
-        }
-      }
-    } else if (df_attributes & DF_NULL_TRANSFER_N) {
-      /*
-       * Mark const sregs that appear in merges.  Need to flush those to home location.
-       * TUNING: instead of flushing on def, we could insert a flush on the appropriate
-       * edge[s].
-       */
-      DCHECK_EQ(static_cast<int32_t>(d_insn->opcode), kMirOpPhi);
-      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (IsConst(cu, mir->ssa_rep->uses[i])) {
-          SetBit(cu, cu->must_flush_constant_v, mir->ssa_rep->uses[i]);
-        }
-      }
-    }
-  }
-  /* TODO: implement code to handle arithmetic operations */
-  return true;
-}
-
-/* Setup the basic data structures for SSA conversion */
-void CompilerInitializeSSAConversion(CompilationUnit* cu)
-{
-  int i;
-  int num_dalvik_reg = cu->num_dalvik_registers;
-
-  cu->ssa_base_vregs =
-      static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
-  cu->ssa_subscripts =
-      static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
-  cu->ssa_strings =
-      static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
-  // Create the ssa mappings, estimating the max size
-  CompilerInitGrowableList(cu, cu->ssa_base_vregs,
-                      num_dalvik_reg + cu->def_count + 128,
-                      kListSSAtoDalvikMap);
-  CompilerInitGrowableList(cu, cu->ssa_subscripts,
-                      num_dalvik_reg + cu->def_count + 128,
-                      kListSSAtoDalvikMap);
-  CompilerInitGrowableList(cu, cu->ssa_strings,
-                      num_dalvik_reg + cu->def_count + 128,
-                      kListSSAtoDalvikMap);
-  /*
-   * Initial number of SSA registers is equal to the number of Dalvik
-   * registers.
-   */
-  cu->num_ssa_regs = num_dalvik_reg;
-
-  /*
-   * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
-   * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
-   * into "(0 << 16) | i"
-   */
-  for (i = 0; i < num_dalvik_reg; i++) {
-    InsertGrowableList(cu, cu->ssa_base_vregs, i);
-    InsertGrowableList(cu, cu->ssa_subscripts, 0);
-    std::string ssa_name = GetSSAName(cu, i);
-    char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, true, kAllocDFInfo));
-    strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
-    InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
-  }
-
-  /*
-   * Initialize the DalvikToSSAMap map. There is one entry for each
-   * Dalvik register, and the SSA names for those are the same.
-   */
-  cu->vreg_to_ssa_map =
-      static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
-  /* Keep track of the higest def for each dalvik reg */
-  cu->ssa_last_defs =
-      static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
-
-  for (i = 0; i < num_dalvik_reg; i++) {
-    cu->vreg_to_ssa_map[i] = i;
-    cu->ssa_last_defs[i] = 0;
-  }
-
-  /* Add ssa reg for Method* */
-  cu->method_sreg = AddNewSReg(cu, SSA_METHOD_BASEREG);
-
-  /*
-   * Allocate the BasicBlockDataFlow structure for the entry and code blocks
-   */
-  GrowableListIterator iterator;
-
-  GrowableListIteratorInit(&cu->block_list, &iterator);
-
-  while (true) {
-    BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
-    if (bb == NULL) break;
-    if (bb->hidden == true) continue;
-    if (bb->block_type == kDalvikByteCode ||
-      bb->block_type == kEntryBlock ||
-      bb->block_type == kExitBlock) {
-      bb->data_flow_info = static_cast<BasicBlockDataFlow*>(NewMem(cu, sizeof(BasicBlockDataFlow),
-                                                                 true, kAllocDFInfo));
-      }
-  }
-}
-
-/* Clear the visited flag for each BB */
-bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  bb->visited = false;
-  return true;
-}
-
-void DataFlowAnalysisDispatcher(CompilationUnit* cu,
-                                   bool (*func)(CompilationUnit*, BasicBlock*),
-                                   DataFlowAnalysisMode dfa_mode,
-                                   bool is_iterative)
-{
-  bool change = true;
-
-  while (change) {
-    change = false;
-
-    switch (dfa_mode) {
-      /* Scan all blocks and perform the operations specified in func */
-      case kAllNodes:
-        {
-          GrowableListIterator iterator;
-          GrowableListIteratorInit(&cu->block_list, &iterator);
-          while (true) {
-            BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
-            if (bb == NULL) break;
-            if (bb->hidden == true) continue;
-              change |= (*func)(cu, bb);
-          }
-        }
-        break;
-      /* Scan reachable blocks and perform the ops specified in func. */
-      case kReachableNodes:
-        {
-          int num_reachable_blocks = cu->num_reachable_blocks;
-          int idx;
-          const GrowableList *block_list = &cu->block_list;
-
-          for (idx = 0; idx < num_reachable_blocks; idx++) {
-            int block_idx = cu->dfs_order.elem_list[idx];
-            BasicBlock* bb =
-                reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, block_idx));
-            change |= (*func)(cu, bb);
-          }
-        }
-        break;
-
-      /* Scan reachable blocks by pre-order dfs and invoke func on each. */
-      case kPreOrderDFSTraversal:
-        {
-          int num_reachable_blocks = cu->num_reachable_blocks;
-          int idx;
-          const GrowableList *block_list = &cu->block_list;
-
-          for (idx = 0; idx < num_reachable_blocks; idx++) {
-            int dfs_idx = cu->dfs_order.elem_list[idx];
-            BasicBlock* bb =
-                reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_idx));
-            change |= (*func)(cu, bb);
-            }
-        }
-        break;
-      /* Scan reachable blocks post-order dfs and invoke func on each. */
-      case kPostOrderDFSTraversal:
-        {
-          int num_reachable_blocks = cu->num_reachable_blocks;
-          int idx;
-          const GrowableList *block_list = &cu->block_list;
-
-          for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
-            int dfs_idx = cu->dfs_order.elem_list[idx];
-            BasicBlock* bb =
-                reinterpret_cast<BasicBlock *>( GrowableListGetElement(block_list, dfs_idx));
-            change |= (*func)(cu, bb);
-            }
-        }
-        break;
-      /* Scan reachable post-order dom tree and invoke func on each. */
-      case kPostOrderDOMTraversal:
-        {
-          int num_reachable_blocks = cu->num_reachable_blocks;
-          int idx;
-          const GrowableList *block_list = &cu->block_list;
-
-          for (idx = 0; idx < num_reachable_blocks; idx++) {
-            int dom_idx = cu->dom_post_order_traversal.elem_list[idx];
-            BasicBlock* bb =
-                reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, dom_idx));
-            change |= (*func)(cu, bb);
-          }
-        }
-        break;
-      /* Scan reachable blocks reverse post-order dfs, invoke func on each */
-      case kReversePostOrderTraversal:
-        {
-          int num_reachable_blocks = cu->num_reachable_blocks;
-          int idx;
-          const GrowableList *block_list = &cu->block_list;
-
-          for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
-            int rev_idx = cu->dfs_post_order.elem_list[idx];
-            BasicBlock* bb =
-                reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, rev_idx));
-            change |= (*func)(cu, bb);
-            }
-        }
-        break;
-      default:
-        LOG(FATAL) << "Unknown traversal mode: " << dfa_mode;
-    }
-    /* If is_iterative is false, exit the loop after the first iteration */
-    change &= is_iterative;
-  }
-}
-
-/* Advance to next strictly dominated MIR node in an extended basic block */
-static MIR* AdvanceMIR(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
-{
-  BasicBlock* bb = *p_bb;
-  if (mir != NULL) {
-    mir = mir->next;
-    if (mir == NULL) {
-      bb = bb->fall_through;
-      if ((bb == NULL) || Predecessors(bb) != 1) {
-        mir = NULL;
-      } else {
-      *p_bb = bb;
-      mir = bb->first_mir_insn;
-      }
-    }
-  }
-  return mir;
-}
-
-/*
- * To be used at an invoke mir.  If the logically next mir node represents
- * a move-result, return it.  Else, return NULL.  If a move-result exists,
- * it is required to immediately follow the invoke with no intervening
- * opcodes or incoming arcs.  However, if the result of the invoke is not
- * used, a move-result may not be present.
- */
-MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
-{
-  BasicBlock* tbb = bb;
-  mir = AdvanceMIR(cu, &tbb, mir);
-  while (mir != NULL) {
-    int opcode = mir->dalvikInsn.opcode;
-    if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
-        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
-        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
-      break;
-    }
-    // Keep going if pseudo op, otherwise terminate
-    if (opcode < kNumPackedOpcodes) {
-      mir = NULL;
-    } else {
-      mir = AdvanceMIR(cu, &tbb, mir);
-    }
-  }
-  return mir;
-}
-
-static BasicBlock* NextDominatedBlock(CompilationUnit* cu, BasicBlock* bb)
-{
-  if (bb->block_type == kDead) {
-    return NULL;
-  }
-  DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
-      || (bb->block_type == kExitBlock));
-  bb = bb->fall_through;
-  if (bb == NULL || (Predecessors(bb) != 1)) {
-    return NULL;
-  }
-  DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
-  return bb;
-}
-
-static MIR* FindPhi(CompilationUnit* cu, BasicBlock* bb, int ssa_name)
-{
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
-      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (mir->ssa_rep->uses[i] == ssa_name) {
-          return mir;
-        }
-      }
-    }
-  }
-  return NULL;
-}
-
-static SelectInstructionKind SelectKind(MIR* mir)
-{
-  switch (mir->dalvikInsn.opcode) {
-    case Instruction::MOVE:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_OBJECT_16:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_OBJECT_FROM16:
-      return kSelectMove;
-   case Instruction::CONST:
-   case Instruction::CONST_4:
-   case Instruction::CONST_16:
-      return kSelectConst;
-   case Instruction::GOTO:
-   case Instruction::GOTO_16:
-   case Instruction::GOTO_32:
-      return kSelectGoto;
-   default:;
-  }
-  return kSelectNone;
-}
-
-/* Do some MIR-level extended basic block optimizations */
-static bool BasicBlockOpt(CompilationUnit* cu, BasicBlock* bb)
-{
-  if (bb->block_type == kDead) {
-    return true;
-  }
-  int num_temps = 0;
-  BBOpt bb_opt(cu);
-  while (bb != NULL) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-      // TUNING: use the returned value number for CSE.
-      bb_opt.GetValueNumber(mir);
-      // Look for interesting opcodes, skip otherwise
-      Instruction::Code opcode = mir->dalvikInsn.opcode;
-      switch (opcode) {
-        case Instruction::CMPL_FLOAT:
-        case Instruction::CMPL_DOUBLE:
-        case Instruction::CMPG_FLOAT:
-        case Instruction::CMPG_DOUBLE:
-        case Instruction::CMP_LONG:
-          if (cu->gen_bitcode) {
-            // Bitcode doesn't allow this optimization.
-            break;
-          }
-          if (mir->next != NULL) {
-            MIR* mir_next = mir->next;
-            Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
-            ConditionCode ccode = kCondNv;
-            switch(br_opcode) {
-              case Instruction::IF_EQZ:
-                ccode = kCondEq;
-                break;
-              case Instruction::IF_NEZ:
-                ccode = kCondNe;
-                break;
-              case Instruction::IF_LTZ:
-                ccode = kCondLt;
-                break;
-              case Instruction::IF_GEZ:
-                ccode = kCondGe;
-                break;
-              case Instruction::IF_GTZ:
-                ccode = kCondGt;
-                break;
-              case Instruction::IF_LEZ:
-                ccode = kCondLe;
-                break;
-              default:
-                break;
-            }
-            // Make sure result of cmp is used by next insn and nowhere else
-            if ((ccode != kCondNv) &&
-                (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
-                (GetSSAUseCount(cu, mir->ssa_rep->defs[0]) == 1)) {
-              mir_next->dalvikInsn.arg[0] = ccode;
-              switch(opcode) {
-                case Instruction::CMPL_FLOAT:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
-                  break;
-                case Instruction::CMPL_DOUBLE:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
-                  break;
-                case Instruction::CMPG_FLOAT:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
-                  break;
-                case Instruction::CMPG_DOUBLE:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
-                  break;
-                case Instruction::CMP_LONG:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpLong);
-                  break;
-                default: LOG(ERROR) << "Unexpected opcode: " << opcode;
-              }
-              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-              mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
-              mir_next->ssa_rep->uses = mir->ssa_rep->uses;
-              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
-              mir_next->ssa_rep->num_defs = 0;
-              mir->ssa_rep->num_uses = 0;
-              mir->ssa_rep->num_defs = 0;
-            }
-          }
-          break;
-        case Instruction::GOTO:
-        case Instruction::GOTO_16:
-        case Instruction::GOTO_32:
-        case Instruction::IF_EQ:
-        case Instruction::IF_NE:
-        case Instruction::IF_LT:
-        case Instruction::IF_GE:
-        case Instruction::IF_GT:
-        case Instruction::IF_LE:
-        case Instruction::IF_EQZ:
-        case Instruction::IF_NEZ:
-        case Instruction::IF_LTZ:
-        case Instruction::IF_GEZ:
-        case Instruction::IF_GTZ:
-        case Instruction::IF_LEZ:
-          if (bb->taken->dominates_return) {
-            mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
-            if (cu->verbose) {
-              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
-            }
-          }
-          break;
-        default:
-          break;
-      }
-      // Is this the select pattern?
-      // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
-      // TUNING: expand to support IF_xx compare & branches
-      if (!cu->gen_bitcode && (cu->instruction_set == kThumb2) &&
-          ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
-          (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
-        BasicBlock* ft = bb->fall_through;
-        DCHECK(ft != NULL);
-        BasicBlock* ft_ft = ft->fall_through;
-        BasicBlock* ft_tk = ft->taken;
-
-        BasicBlock* tk = bb->taken;
-        DCHECK(tk != NULL);
-        BasicBlock* tk_ft = tk->fall_through;
-        BasicBlock* tk_tk = tk->taken;
-
-        /*
-         * In the select pattern, the taken edge goes to a block that unconditionally
-         * transfers to the rejoin block and the fall_though edge goes to a block that
-         * unconditionally falls through to the rejoin block.
-         */
-        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
-            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
-          /*
-           * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
-           * suspend check on the taken-taken branch back to the join point.
-           */
-          if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
-              tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
-          }
-          // Are the block bodies something we can handle?
-          if ((ft->first_mir_insn == ft->last_mir_insn) &&
-              (tk->first_mir_insn != tk->last_mir_insn) &&
-              (tk->first_mir_insn->next == tk->last_mir_insn) &&
-              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
-              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
-              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
-              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
-            // Almost there.  Are the instructions targeting the same vreg?
-            MIR* if_true = tk->first_mir_insn;
-            MIR* if_false = ft->first_mir_insn;
-            // It's possible that the target of the select isn't used - skip those (rare) cases.
-            MIR* phi = FindPhi(cu, tk_tk, if_true->ssa_rep->defs[0]);
-            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
-              /*
-               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
-               * Phi node in the merge block and delete it (while using the SSA name
-               * of the merge as the target of the SELECT.  Delete both taken and
-               * fallthrough blocks, and set fallthrough to merge block.
-               * NOTE: not updating other dataflow info (no longer used at this point).
-               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
-               */
-              if (opcode == Instruction::IF_NEZ) {
-                // Normalize.
-                MIR* tmp_mir = if_true;
-                if_true = if_false;
-                if_false = tmp_mir;
-              }
-              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
-              bool const_form = (SelectKind(if_true) == kSelectConst);
-              if ((SelectKind(if_true) == kSelectMove)) {
-                if (IsConst(cu, if_true->ssa_rep->uses[0]) &&
-                    IsConst(cu, if_false->ssa_rep->uses[0])) {
-                    const_form = true;
-                    if_true->dalvikInsn.vB = ConstantValue(cu, if_true->ssa_rep->uses[0]);
-                    if_false->dalvikInsn.vB = ConstantValue(cu, if_false->ssa_rep->uses[0]);
-                }
-              }
-              if (const_form) {
-                // "true" set val in vB
-                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
-                // "false" set val in vC
-                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
-              } else {
-                DCHECK_EQ(SelectKind(if_true), kSelectMove);
-                DCHECK_EQ(SelectKind(if_false), kSelectMove);
-                int* src_ssa = static_cast<int*>(NewMem(cu, sizeof(int) * 3, false,
-                                                 kAllocDFInfo));
-                src_ssa[0] = mir->ssa_rep->uses[0];
-                src_ssa[1] = if_true->ssa_rep->uses[0];
-                src_ssa[2] = if_false->ssa_rep->uses[0];
-                mir->ssa_rep->uses = src_ssa;
-                mir->ssa_rep->num_uses = 3;
-              }
-              mir->ssa_rep->num_defs = 1;
-              mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * 1, false,
-                                                     kAllocDFInfo));
-              mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * 1, false,
-                                                     kAllocDFInfo));
-              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
-              /*
-               * There is usually a Phi node in the join block for our two cases.  If the
-               * Phi node only contains our two cases as input, we will use the result
-               * SSA name of the Phi node as our select result and delete the Phi.  If
-               * the Phi node has more than two operands, we will arbitrarily use the SSA
-               * name of the "true" path, delete the SSA name of the "false" path from the
-               * Phi node (and fix up the incoming arc list).
-               */
-              if (phi->ssa_rep->num_uses == 2) {
-                mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
-                phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-              } else {
-                int dead_def = if_false->ssa_rep->defs[0];
-                int live_def = if_true->ssa_rep->defs[0];
-                mir->ssa_rep->defs[0] = live_def;
-                int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
-                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
-                  if (phi->ssa_rep->uses[i] == live_def) {
-                    incoming[i] = bb->id;
-                  }
-                }
-                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
-                  if (phi->ssa_rep->uses[i] == dead_def) {
-                    int last_slot = phi->ssa_rep->num_uses - 1;
-                    phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
-                    incoming[i] = incoming[last_slot];
-                  }
-                }
-              }
-              phi->ssa_rep->num_uses--;
-              bb->taken = NULL;
-              tk->block_type = kDead;
-              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
-                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-              }
-            }
-          }
-        }
-      }
-    }
-    bb = NextDominatedBlock(cu, bb);
-  }
-
-  if (num_temps > cu->num_compiler_temps) {
-    cu->num_compiler_temps = num_temps;
-  }
-  return true;
-}
-
-static bool NullCheckEliminationInit(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if (bb->data_flow_info == NULL) return false;
-  bb->data_flow_info->ending_null_check_v =
-      AllocBitVector(cu, cu->num_ssa_regs, false, kBitMapNullCheck);
-  ClearAllBits(bb->data_flow_info->ending_null_check_v);
-  return true;
-}
-
-/* Collect stats on number of checks removed */
-static bool CountChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if (bb->data_flow_info == NULL) return false;
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-      continue;
-    }
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-    if (df_attributes & DF_HAS_NULL_CHKS) {
-      cu->checkstats->null_checks++;
-      if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
-        cu->checkstats->null_checks_eliminated++;
-      }
-    }
-    if (df_attributes & DF_HAS_RANGE_CHKS) {
-      cu->checkstats->range_checks++;
-      if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
-        cu->checkstats->range_checks_eliminated++;
-      }
-    }
-  }
-  return false;
-}
-
-/* Try to make common case the fallthrough path */
-static bool LayoutBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
-  if (!bb->explicit_throw) {
-    return false;
-  }
-  BasicBlock* walker = bb;
-  while (true) {
-    // Check termination conditions
-    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
-      break;
-    }
-    BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
-    if (prev->conditional_branch) {
-      if (prev->fall_through == walker) {
-        // Already done - return
-        break;
-      }
-      DCHECK_EQ(walker, prev->taken);
-      // Got one.  Flip it and exit
-      Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
-      switch (opcode) {
-        case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
-        case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
-        case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
-        case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
-        case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
-        case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
-        case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
-        case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
-        case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
-        case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
-        case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
-        case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
-        default: LOG(FATAL) << "Unexpected opcode " << opcode;
-      }
-      prev->last_mir_insn->dalvikInsn.opcode = opcode;
-      BasicBlock* t_bb = prev->taken;
-      prev->taken = prev->fall_through;
-      prev->fall_through = t_bb;
-      break;
-    }
-    walker = prev;
-  }
-  return false;
-}
-
-/* Combine any basic blocks terminated by instructions that we now know can't throw */
-static bool CombineBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  // Loop here to allow combining a sequence of blocks
-  while (true) {
-    // Check termination conditions
-    if ((bb->first_mir_insn == NULL)
-        || (bb->data_flow_info == NULL)
-        || (bb->block_type == kExceptionHandling)
-        || (bb->block_type == kExitBlock)
-        || (bb->block_type == kDead)
-        || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
-        || (bb->successor_block_list.block_list_type != kNotUsed)
-        || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
-      break;
-    }
-
-    // Test the kMirOpCheck instruction
-    MIR* mir = bb->last_mir_insn;
-    // Grab the attributes from the paired opcode
-    MIR* throw_insn = mir->meta.throw_insn;
-    int df_attributes = oat_data_flow_attributes[throw_insn->dalvikInsn.opcode];
-    bool can_combine = true;
-    if (df_attributes & DF_HAS_NULL_CHKS) {
-      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
-    }
-    if (df_attributes & DF_HAS_RANGE_CHKS) {
-      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
-    }
-    if (!can_combine) {
-      break;
-    }
-    // OK - got one.  Combine
-    BasicBlock* bb_next = bb->fall_through;
-    DCHECK(!bb_next->catch_entry);
-    DCHECK_EQ(Predecessors(bb_next), 1U);
-    MIR* t_mir = bb->last_mir_insn->prev;
-    // Overwrite the kOpCheck insn with the paired opcode
-    DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
-    *bb->last_mir_insn = *throw_insn;
-    bb->last_mir_insn->prev = t_mir;
-    // Use the successor info from the next block
-    bb->successor_block_list = bb_next->successor_block_list;
-    // Use the ending block linkage from the next block
-    bb->fall_through = bb_next->fall_through;
-    bb->taken->block_type = kDead;  // Kill the unused exception block
-    bb->taken = bb_next->taken;
-    // Include the rest of the instructions
-    bb->last_mir_insn = bb_next->last_mir_insn;
-    /*
-     * If lower-half of pair of blocks to combine contained a return, move the flag
-     * to the newly combined block.
-     */
-    bb->terminated_by_return = bb_next->terminated_by_return;
-
-    /*
-     * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
-     * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
-     */
-
-    // Kill bb_next and remap now-dead id to parent
-    bb_next->block_type = kDead;
-    cu->block_id_map.Overwrite(bb_next->id, bb->id);
-
-    // Now, loop back and see if we can keep going
-  }
-  return false;
-}
-
-/* Eliminate unnecessary null checks for a basic block. */
-static bool EliminateNullChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if (bb->data_flow_info == NULL) return false;
-
-  /*
-   * Set initial state.  Be conservative with catch
-   * blocks and start with no assumptions about null check
-   * status (except for "this").
-   */
-  if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
-    ClearAllBits(cu->temp_ssa_register_v);
-    if ((cu->access_flags & kAccStatic) == 0) {
-      // If non-static method, mark "this" as non-null
-      int this_reg = cu->num_dalvik_registers - cu->num_ins;
-      SetBit(cu, cu->temp_ssa_register_v, this_reg);
-    }
-  } else {
-    // Starting state is intesection of all incoming arcs
-    GrowableListIterator iter;
-    GrowableListIteratorInit(bb->predecessors, &iter);
-    BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
-    DCHECK(pred_bb != NULL);
-    CopyBitVector(cu->temp_ssa_register_v,
-                     pred_bb->data_flow_info->ending_null_check_v);
-    while (true) {
-      pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
-      if (!pred_bb) break;
-      if ((pred_bb->data_flow_info == NULL) ||
-          (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
-        continue;
-      }
-      IntersectBitVectors(cu->temp_ssa_register_v,
-                             cu->temp_ssa_register_v,
-                             pred_bb->data_flow_info->ending_null_check_v);
-    }
-  }
-
-  // Walk through the instruction in the block, updating as necessary
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-        continue;
-    }
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-
-    // Mark target of NEW* as non-null
-    if (df_attributes & DF_NON_NULL_DST) {
-      SetBit(cu, cu->temp_ssa_register_v, mir->ssa_rep->defs[0]);
-    }
-
-    // Mark non-null returns from invoke-style NEW*
-    if (df_attributes & DF_NON_NULL_RET) {
-      MIR* next_mir = mir->next;
-      // Next should be an MOVE_RESULT_OBJECT
-      if (next_mir &&
-          next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-        // Mark as null checked
-        SetBit(cu, cu->temp_ssa_register_v, next_mir->ssa_rep->defs[0]);
-      } else {
-        if (next_mir) {
-          LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
-        } else if (bb->fall_through) {
-          // Look in next basic block
-          struct BasicBlock* next_bb = bb->fall_through;
-          for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
-            tmir =tmir->next) {
-            if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
-              continue;
-            }
-            // First non-pseudo should be MOVE_RESULT_OBJECT
-            if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-              // Mark as null checked
-              SetBit(cu, cu->temp_ssa_register_v, tmir->ssa_rep->defs[0]);
-            } else {
-              LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
-            }
-            break;
-          }
-        }
-      }
-    }
-
-    /*
-     * Propagate nullcheck state on register copies (including
-     * Phi pseudo copies.  For the latter, nullcheck state is
-     * the "and" of all the Phi's operands.
-     */
-    if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
-      int tgt_sreg = mir->ssa_rep->defs[0];
-      int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
-          mir->ssa_rep->num_uses;
-      bool null_checked = true;
-      for (int i = 0; i < operands; i++) {
-        null_checked &= IsBitSet(cu->temp_ssa_register_v,
-        mir->ssa_rep->uses[i]);
-      }
-      if (null_checked) {
-        SetBit(cu, cu->temp_ssa_register_v, tgt_sreg);
-      }
-    }
-
-    // Already nullchecked?
-    if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
-      int src_idx;
-      if (df_attributes & DF_NULL_CHK_1) {
-        src_idx = 1;
-      } else if (df_attributes & DF_NULL_CHK_2) {
-        src_idx = 2;
-      } else {
-        src_idx = 0;
-      }
-      int src_sreg = mir->ssa_rep->uses[src_idx];
-        if (IsBitSet(cu->temp_ssa_register_v, src_sreg)) {
-          // Eliminate the null check
-          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-        } else {
-          // Mark s_reg as null-checked
-          SetBit(cu, cu->temp_ssa_register_v, src_sreg);
-        }
-     }
-  }
-
-  // Did anything change?
-  bool res = CompareBitVectors(bb->data_flow_info->ending_null_check_v,
-                                  cu->temp_ssa_register_v);
-  if (res) {
-    CopyBitVector(bb->data_flow_info->ending_null_check_v,
-                     cu->temp_ssa_register_v);
-  }
-  return res;
-}
-
-void NullCheckElimination(CompilationUnit *cu)
-{
-  if (!(cu->disable_opt & (1 << kNullCheckElimination))) {
-    DCHECK(cu->temp_ssa_register_v != NULL);
-    DataFlowAnalysisDispatcher(cu, NullCheckEliminationInit, kAllNodes,
-                                  false /* is_iterative */);
-    DataFlowAnalysisDispatcher(cu, EliminateNullChecks,
-                                  kPreOrderDFSTraversal,
-                                  true /* is_iterative */);
-  }
-}
-
-void BasicBlockCombine(CompilationUnit* cu)
-{
-  DataFlowAnalysisDispatcher(cu, CombineBlocks, kPreOrderDFSTraversal, false);
-}
-
-void CodeLayout(CompilationUnit* cu)
-{
-  DataFlowAnalysisDispatcher(cu, LayoutBlocks, kAllNodes, false);
-}
-
-void DumpCheckStats(CompilationUnit *cu)
-{
-  Checkstats* stats =
-      static_cast<Checkstats*>(NewMem(cu, sizeof(Checkstats), true, kAllocDFInfo));
-  cu->checkstats = stats;
-  DataFlowAnalysisDispatcher(cu, CountChecks, kAllNodes, false /* is_iterative */);
-  if (stats->null_checks > 0) {
-    float eliminated = static_cast<float>(stats->null_checks_eliminated);
-    float checks = static_cast<float>(stats->null_checks);
-    LOG(INFO) << "Null Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-              << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
-              << (eliminated/checks) * 100.0 << "%";
-    }
-  if (stats->range_checks > 0) {
-    float eliminated = static_cast<float>(stats->range_checks_eliminated);
-    float checks = static_cast<float>(stats->range_checks);
-    LOG(INFO) << "Range Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-              << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
-              << (eliminated/checks) * 100.0 << "%";
-  }
-}
-
-bool BuildExtendedBBList(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if (bb->visited) return false;
-  if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
-      || (bb->block_type == kExitBlock))) {
-    // Ignore special blocks
-    bb->visited = true;
-    return false;
-  }
-  // Must be head of extended basic block.
-  BasicBlock* start_bb = bb;
-  cu->extended_basic_blocks.push_back(bb);
-  bool terminated_by_return = false;
-  // Visit blocks strictly dominated by this head.
-  while (bb != NULL) {
-    bb->visited = true;
-    terminated_by_return |= bb->terminated_by_return;
-    bb = NextDominatedBlock(cu, bb);
-  }
-  if (terminated_by_return) {
-    // This extended basic block contains a return, so mark all members.
-    bb = start_bb;
-    while (bb != NULL) {
-      bb->dominates_return = true;
-      bb = NextDominatedBlock(cu, bb);
-    }
-  }
-  return false; // Not iterative - return value will be ignored
-}
-
-void BasicBlockOptimization(CompilationUnit *cu)
-{
-  if (!(cu->disable_opt & (1 << kBBOpt))) {
-    CompilerInitGrowableList(cu, &cu->compiler_temps, 6, kListMisc);
-    DCHECK_EQ(cu->num_compiler_temps, 0);
-    // Mark all blocks as not visited
-    DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
-                                  kAllNodes, false /* is_iterative */);
-    DataFlowAnalysisDispatcher(cu, BuildExtendedBBList,
-                                  kPreOrderDFSTraversal,
-                                  false /* is_iterative */);
-    // Perform extended basic block optimizations.
-    for (unsigned int i = 0; i < cu->extended_basic_blocks.size(); i++) {
-      BasicBlockOpt(cu, cu->extended_basic_blocks[i]);
-    }
-  }
-}
-
-static void AddLoopHeader(CompilationUnit* cu, BasicBlock* header,
-                          BasicBlock* back_edge)
-{
-  GrowableListIterator iter;
-  GrowableListIteratorInit(&cu->loop_headers, &iter);
-  for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
-      (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
-    if (loop->header == header) {
-      InsertGrowableList(cu, &loop->incoming_back_edges,
-                            reinterpret_cast<uintptr_t>(back_edge));
-      return;
-    }
-  }
-  LoopInfo* info = static_cast<LoopInfo*>(NewMem(cu, sizeof(LoopInfo), true, kAllocDFInfo));
-  info->header = header;
-  CompilerInitGrowableList(cu, &info->incoming_back_edges, 2, kListMisc);
-  InsertGrowableList(cu, &info->incoming_back_edges, reinterpret_cast<uintptr_t>(back_edge));
-  InsertGrowableList(cu, &cu->loop_headers, reinterpret_cast<uintptr_t>(info));
-}
-
-static bool FindBackEdges(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if ((bb->data_flow_info == NULL) || (bb->last_mir_insn == NULL)) {
-    return false;
-  }
-  Instruction::Code opcode = bb->last_mir_insn->dalvikInsn.opcode;
-  if (Instruction::FlagsOf(opcode) & Instruction::kBranch) {
-    if (bb->taken && (bb->taken->start_offset <= bb->start_offset)) {
-      DCHECK(bb->dominators != NULL);
-      if (IsBitSet(bb->dominators, bb->taken->id)) {
-        if (cu->verbose) {
-          LOG(INFO) << "Loop backedge from 0x"
-                    << std::hex << bb->last_mir_insn->offset
-                    << " to 0x" << std::hex << bb->taken->start_offset;
-        }
-        AddLoopHeader(cu, bb->taken, bb);
-      }
-    }
-  }
-  return false;
-}
-
-static void AddBlocksToLoop(CompilationUnit* cu, ArenaBitVector* blocks,
-                            BasicBlock* bb, int head_id)
-{
-  if (!IsBitSet(bb->dominators, head_id) ||
-    IsBitSet(blocks, bb->id)) {
-    return;
-  }
-  SetBit(cu, blocks, bb->id);
-  GrowableListIterator iter;
-  GrowableListIteratorInit(bb->predecessors, &iter);
-  BasicBlock* pred_bb;
-  for (pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); pred_bb != NULL;
-       pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
-    AddBlocksToLoop(cu, blocks, pred_bb, head_id);
-  }
-}
-
-static void DumpLoops(CompilationUnit *cu)
-{
-  GrowableListIterator iter;
-  GrowableListIteratorInit(&cu->loop_headers, &iter);
-  for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
-      (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
-    LOG(INFO) << "Loop head block id " << loop->header->id
-              << ", offset 0x" << std::hex << loop->header->start_offset
-              << ", Depth: " << loop->header->nesting_depth;
-    GrowableListIterator iter;
-    GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
-    BasicBlock* edge_bb;
-    for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb != NULL;
-         edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
-      LOG(INFO) << "    Backedge block id " << edge_bb->id
-                << ", offset 0x" << std::hex << edge_bb->start_offset;
-      ArenaBitVectorIterator b_iter;
-      BitVectorIteratorInit(loop->blocks, &b_iter);
-      for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
-           bb_id = BitVectorIteratorNext(&b_iter)) {
-        BasicBlock *bb;
-        bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
-        LOG(INFO) << "        (" << bb->id << ", 0x" << std::hex
-                  << bb->start_offset << ")";
-      }
-    }
-  }
-}
-
-void LoopDetection(CompilationUnit *cu)
-{
-  if (cu->disable_opt & (1 << kPromoteRegs)) {
-    return;
-  }
-  CompilerInitGrowableList(cu, &cu->loop_headers, 6, kListMisc);
-  // Find the loop headers
-  DataFlowAnalysisDispatcher(cu, FindBackEdges, kAllNodes, false /* is_iterative */);
-  GrowableListIterator iter;
-  GrowableListIteratorInit(&cu->loop_headers, &iter);
-  // Add blocks to each header
-  for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
-       loop != NULL; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
-    loop->blocks = AllocBitVector(cu, cu->num_blocks, true,
-                                     kBitMapMisc);
-    SetBit(cu, loop->blocks, loop->header->id);
-    GrowableListIterator iter;
-    GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
-    BasicBlock* edge_bb;
-    for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb != NULL;
-         edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
-      AddBlocksToLoop(cu, loop->blocks, edge_bb, loop->header->id);
-    }
-  }
-  // Compute the nesting depth of each header
-  GrowableListIteratorInit(&cu->loop_headers, &iter);
-  for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
-       loop != NULL; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
-    GrowableListIterator iter2;
-    GrowableListIteratorInit(&cu->loop_headers, &iter2);
-    LoopInfo* loop2;
-    for (loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2));
-         loop2 != NULL; loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2))) {
-      if (IsBitSet(loop2->blocks, loop->header->id)) {
-         loop->header->nesting_depth++;
-      }
-    }
-  }
-  // Assign nesting depth to each block in all loops
-  GrowableListIteratorInit(&cu->loop_headers, &iter);
-  for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
-       (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
-    ArenaBitVectorIterator b_iter;
-    BitVectorIteratorInit(loop->blocks, &b_iter);
-    for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
-        bb_id = BitVectorIteratorNext(&b_iter)) {
-      BasicBlock *bb;
-      bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
-      bb->nesting_depth = std::max(bb->nesting_depth,
-                                  loop->header->nesting_depth);
-    }
-  }
-  if (cu->verbose) {
-    DumpLoops(cu);
-  }
-}
-
-/*
- * This function will make a best guess at whether the invoke will
- * end up using Method*.  It isn't critical to get it exactly right,
- * and attempting to do would involve more complexity than it's
- * worth.
- */
-static bool InvokeUsesMethodStar(CompilationUnit* cu, MIR* mir)
-{
-  InvokeType type;
-  Instruction::Code opcode = mir->dalvikInsn.opcode;
-  switch (opcode) {
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-      type = kStatic;
-      break;
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-      type = kDirect;
-      break;
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-      type = kVirtual;
-      break;
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE:
-      return false;
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_SUPER:
-      type = kSuper;
-      break;
-    default:
-      LOG(WARNING) << "Unexpected invoke op: " << opcode;
-      return false;
-  }
-  DexCompilationUnit m_unit(cu);
-  // TODO: add a flag so we don't counts the stats for this twice
-  uint32_t dex_method_idx = mir->dalvikInsn.vB;
-  int vtable_idx;
-  uintptr_t direct_code;
-  uintptr_t direct_method;
-  bool fast_path =
-      cu->compiler_driver->ComputeInvokeInfo(dex_method_idx, &m_unit, type,
-                                             vtable_idx, direct_code,
-                                             direct_method) &&
-      !SLOW_INVOKE_PATH;
-  return (((type == kDirect) || (type == kStatic)) &&
-          fast_path && ((direct_code == 0) || (direct_method == 0)));
-}
-
-/*
- * Count uses, weighting by loop nesting depth.  This code only
- * counts explicitly used s_regs.  A later phase will add implicit
- * counts for things such as Method*, null-checked references, etc.
- */
-static bool CountUses(struct CompilationUnit* cu, struct BasicBlock* bb)
-{
-  if (bb->block_type != kDalvikByteCode) {
-    return false;
-  }
-  for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-      continue;
-    }
-    uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
-    for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-      int s_reg = mir->ssa_rep->uses[i];
-      DCHECK_LT(s_reg, static_cast<int>(cu->use_counts.num_used));
-      cu->raw_use_counts.elem_list[s_reg]++;
-      cu->use_counts.elem_list[s_reg] += (1 << weight);
-    }
-    if (!(cu->disable_opt & (1 << kPromoteCompilerTemps))) {
-      int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-      // Implicit use of Method* ? */
-      if (df_attributes & DF_UMS) {
-        /*
-         * Some invokes will not use Method* - need to perform test similar
-         * to that found in GenInvoke() to decide whether to count refs
-         * for Method* on invoke-class opcodes.
-         * TODO: refactor for common test here, save results for GenInvoke
-         */
-        int uses_method_star = true;
-        if ((df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
-            !(df_attributes & DF_NON_NULL_RET)) {
-          uses_method_star &= InvokeUsesMethodStar(cu, mir);
-        }
-        if (uses_method_star) {
-          cu->raw_use_counts.elem_list[cu->method_sreg]++;
-          cu->use_counts.elem_list[cu->method_sreg] += (1 << weight);
-        }
-      }
-    }
-  }
-  return false;
-}
-
-void MethodUseCount(CompilationUnit *cu)
-{
-  CompilerInitGrowableList(cu, &cu->use_counts, cu->num_ssa_regs + 32, kListMisc);
-  CompilerInitGrowableList(cu, &cu->raw_use_counts, cu->num_ssa_regs + 32, kListMisc);
-  // Initialize list
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
-    InsertGrowableList(cu, &cu->use_counts, 0);
-    InsertGrowableList(cu, &cu->raw_use_counts, 0);
-  }
-  if (cu->disable_opt & (1 << kPromoteRegs)) {
-    return;
-  }
-  DataFlowAnalysisDispatcher(cu, CountUses,
-                                kAllNodes, false /* is_iterative */);
-}
-
-}  // namespace art
diff --git a/src/compiler/dex/dataflow.h b/src/compiler/dex/dataflow.h
deleted file mode 100644
index 5bf97ec..0000000
--- a/src/compiler/dex/dataflow.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_H_
-#define ART_SRC_COMPILER_DEX_DATAFLOW_H_
-
-#include "compiler_internals.h"
-
-namespace art {
-
-enum DataFlowAttributePos {
-  kUA = 0,
-  kUB,
-  kUC,
-  kAWide,
-  kBWide,
-  kCWide,
-  kDA,
-  kIsMove,
-  kSetsConst,
-  kFormat35c,
-  kFormat3rc,
-  kNullCheckSrc0,        // Null check of uses[0].
-  kNullCheckSrc1,        // Null check of uses[1].
-  kNullCheckSrc2,        // Null check of uses[2].
-  kNullCheckOut0,        // Null check out outgoing arg0.
-  kDstNonNull,           // May assume dst is non-null.
-  kRetNonNull,           // May assume retval is non-null.
-  kNullTransferSrc0,     // Object copy src[0] -> dst.
-  kNullTransferSrcN,     // Phi null check state transfer.
-  kRangeCheckSrc1,       // Range check of uses[1].
-  kRangeCheckSrc2,       // Range check of uses[2].
-  kRangeCheckSrc3,       // Range check of uses[3].
-  kFPA,
-  kFPB,
-  kFPC,
-  kCoreA,
-  kCoreB,
-  kCoreC,
-  kRefA,
-  kRefB,
-  kRefC,
-  kUsesMethodStar,       // Implicit use of Method*.
-};
-
-#define DF_NOP                  0
-#define DF_UA                   (1 << kUA)
-#define DF_UB                   (1 << kUB)
-#define DF_UC                   (1 << kUC)
-#define DF_A_WIDE               (1 << kAWide)
-#define DF_B_WIDE               (1 << kBWide)
-#define DF_C_WIDE               (1 << kCWide)
-#define DF_DA                   (1 << kDA)
-#define DF_IS_MOVE              (1 << kIsMove)
-#define DF_SETS_CONST           (1 << kSetsConst)
-#define DF_FORMAT_35C           (1 << kFormat35c)
-#define DF_FORMAT_3RC           (1 << kFormat3rc)
-#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
-#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
-#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
-#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
-#define DF_NON_NULL_DST         (1 << kDstNonNull)
-#define DF_NON_NULL_RET         (1 << kRetNonNull)
-#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
-#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
-#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
-#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
-#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
-#define DF_FP_A                 (1 << kFPA)
-#define DF_FP_B                 (1 << kFPB)
-#define DF_FP_C                 (1 << kFPC)
-#define DF_CORE_A               (1 << kCoreA)
-#define DF_CORE_B               (1 << kCoreB)
-#define DF_CORE_C               (1 << kCoreC)
-#define DF_REF_A                (1 << kRefA)
-#define DF_REF_B                (1 << kRefB)
-#define DF_REF_C                (1 << kRefC)
-#define DF_UMS                  (1 << kUsesMethodStar)
-
-#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
-
-#define DF_HAS_DEFS             (DF_DA)
-
-#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
-                                 DF_NULL_CHK_1 | \
-                                 DF_NULL_CHK_2 | \
-                                 DF_NULL_CHK_OUT0)
-
-#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
-                                 DF_RANGE_CHK_2 | \
-                                 DF_RANGE_CHK_3)
-
-#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
-                                 DF_HAS_RANGE_CHKS)
-
-#define DF_A_IS_REG             (DF_UA | DF_DA)
-#define DF_B_IS_REG             (DF_UB)
-#define DF_C_IS_REG             (DF_UC)
-#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
-#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
-
-extern const int oat_data_flow_attributes[kMirOpLast];
-
-struct BasicBlockDataFlow {
-  ArenaBitVector* use_v;
-  ArenaBitVector* def_v;
-  ArenaBitVector* live_in_v;
-  ArenaBitVector* phi_v;
-  int* vreg_to_ssa_map;
-  ArenaBitVector* ending_null_check_v;
-};
-
-struct SSARepresentation {
-  int num_uses;
-  int* uses;
-  bool* fp_use;
-  int num_defs;
-  int* defs;
-  bool* fp_def;
-};
-
-/*
- * An induction variable is represented by "m*i + c", where i is a basic
- * induction variable.
- */
-struct InductionVariableInfo {
-  int ssa_reg;
-  int basic_ssa_reg;
-  int m;      // multiplier
-  int c;      // constant
-  int inc;    // loop increment
-};
-
-struct ArrayAccessInfo {
-  int array_reg;
-  int iv_reg;
-  int max_c;                   // For DIV - will affect upper bound checking.
-  int min_c;                   // For DIV - will affect lower bound checking.
-};
-
-struct LoopInfo {
-  BasicBlock* header;
-  GrowableList incoming_back_edges;
-  ArenaBitVector* blocks;
-};
-
-static inline unsigned int Predecessors(BasicBlock* bb) {return bb->predecessors->num_used;}
-
-int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
-char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir);
-bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb);
-bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb);
-bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb);
-void CompilerInitializeSSAConversion(CompilationUnit* cu);
-bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb);
-void DataFlowAnalysisDispatcher(CompilationUnit* cu, bool (*func)(CompilationUnit*, BasicBlock*),
-                                DataFlowAnalysisMode dfa_mode, bool is_iterative);
-MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-void NullCheckElimination(CompilationUnit *cu);
-void BasicBlockCombine(CompilationUnit* cu);
-void CodeLayout(CompilationUnit* cu);
-void DumpCheckStats(CompilationUnit *cu);
-void BasicBlockOptimization(CompilationUnit *cu);
-void LoopDetection(CompilationUnit *cu);
-void MethodUseCount(CompilationUnit *cu);
-
-}  // namespace art
-
-#endif  // ART_SRC_COMPILER_DEX_DATAFLOW_H_
diff --git a/src/compiler/dex/dataflow_iterator.cc b/src/compiler/dex/dataflow_iterator.cc
new file mode 100644
index 0000000..6a3975e
--- /dev/null
+++ b/src/compiler/dex/dataflow_iterator.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dataflow_iterator.h"
+
+namespace art {
+
+  DataflowIterator::DataflowIterator(MIRGraph* mir_graph, DataFlowAnalysisMode dfa_mode, bool is_iterative)
+      : mir_graph_(mir_graph),
+        mode_(dfa_mode),
+        is_iterative_(is_iterative),
+        changed_(false) {
+    switch(mode_) {
+      case kAllNodes:
+        GrowableListIteratorInit(mir_graph_->GetBlockList(), &all_nodes_iterator_);
+        break;
+
+      case kReachableNodes:
+      case kPreOrderDFSTraversal:
+        start_idx_ = 0;
+        end_idx_ = mir_graph_->GetNumReachableBlocks();
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph_->GetDfsOrder();
+        reverse_ = false;
+        break;
+
+      case kPostOrderDFSTraversal:
+        start_idx_ = mir_graph_->GetNumReachableBlocks() - 1;
+        end_idx_ = 0;
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph_->GetDfsOrder();
+        reverse_ = true;
+        break;
+
+      case kPostOrderDOMTraversal:
+        start_idx_ = 0;
+        end_idx_ = mir_graph_->GetNumReachableBlocks();
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph_->GetDomPostOrder();
+        reverse_ = false;
+        break;
+
+      case kReversePostOrderTraversal:
+        start_idx_ = mir_graph_->GetNumReachableBlocks() - 1;
+        end_idx_ = 0;
+        idx_ = start_idx_;
+        block_id_list_ = mir_graph_->GetDfsPostOrder();
+        reverse_ = true;
+        break;
+      default:
+        LOG(FATAL) << "Unknown traversal mode: " << dfa_mode;
+    }
+  }
+
+  BasicBlock* DataflowIterator::NextBody(bool had_change)
+  {
+    changed_ |= had_change;
+    BasicBlock* res = NULL;
+    if (mode_ == kAllNodes) {
+      bool keep_looking = true;
+      while (keep_looking) {
+        res = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&all_nodes_iterator_));
+        if (is_iterative_ && changed_ && (res == NULL)) {
+          GrowableListIteratorInit(mir_graph_->GetBlockList(), &all_nodes_iterator_);
+          changed_ = false;
+        } else if ((res == NULL) || (!res->hidden)) {
+          keep_looking = false;
+        }
+      }
+    } else if (reverse_) {
+      if (is_iterative_ && changed_ && (idx_ < 0)) {
+        idx_ = start_idx_;
+        changed_ = false;
+      }
+      if (idx_ >= 0) {
+        int bb_id = block_id_list_->elem_list[idx_--];
+        res = mir_graph_->GetBasicBlock(bb_id);
+      }
+    } else {
+      if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
+        idx_ = start_idx_;
+        changed_ = false;
+      }
+      if (idx_ < end_idx_) {
+        int bb_id = block_id_list_->elem_list[idx_++];
+        res = mir_graph_->GetBasicBlock(bb_id);
+      }
+    }
+    return res;
+  }
+
+  BasicBlock* DataflowIterator::Next(bool had_change)
+  {
+    DCHECK(is_iterative_);
+    return NextBody(had_change);
+  }
+
+  BasicBlock* DataflowIterator::Next()
+  {
+    DCHECK(!is_iterative_);
+    return NextBody(false);
+  }
+
+}  // namespace art
diff --git a/src/compiler/dex/dataflow_iterator.h b/src/compiler/dex/dataflow_iterator.h
new file mode 100644
index 0000000..7acaf43
--- /dev/null
+++ b/src/compiler/dex/dataflow_iterator.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
+#define ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
+
+#include "compiler_ir.h"
+#include "mir_graph.h"
+
+namespace art {
+
+  class DataflowIterator {
+    public:
+      DataflowIterator(MIRGraph* mir_graph, DataFlowAnalysisMode dfa_mode, bool is_iterative);
+      ~DataflowIterator(){}
+
+      BasicBlock* Next(bool had_change);
+      BasicBlock* Next();
+
+    private:
+      // TODO: rework this class.
+      MIRGraph* mir_graph_;
+      DataFlowAnalysisMode mode_;
+      bool is_iterative_;
+      bool changed_;
+      int start_idx_;
+      int end_idx_;
+      int idx_;
+      bool reverse_;
+      GrowableList* block_id_list_;
+      GrowableListIterator all_nodes_iterator_;
+
+      BasicBlock* NextBody(bool had_change);
+
+  }; // DataflowIterator
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_H_
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
index 0d3cb2e..049d662 100644
--- a/src/compiler/dex/frontend.cc
+++ b/src/compiler/dex/frontend.cc
@@ -18,12 +18,10 @@
 
 #include "compiler/driver/compiler_driver.h"
 #include "compiler_internals.h"
+#include "dataflow_iterator.h"
 #if defined(ART_USE_PORTABLE_COMPILER)
 #include "compiler/llvm/llvm_compilation_unit.h"
 #endif
-#include "dataflow.h"
-#include "dex_file-inl.h"
-#include "ssa_transformation.h"
 #include "leb128.h"
 #include "mirror/object.h"
 #include "runtime.h"
@@ -80,7 +78,6 @@
   //(1 << kNullCheckElimination) |
   //(1 << kPromoteRegs) |
   //(1 << kTrackLiveTemps) |
-  (1 << kSkipLargeMethodOptimization) |
   //(1 << kSafeOptimizations) |
   //(1 << kBBOpt) |
   //(1 << kMatch) |
@@ -106,681 +103,6 @@
   //(1 << kDebugVerifyBitcode) |
   0;
 
-static bool ContentIsInsn(const uint16_t* code_ptr) {
-  uint16_t instr = *code_ptr;
-  Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff);
-
-  /*
-   * Since the low 8-bit in metadata may look like NOP, we need to check
-   * both the low and whole sub-word to determine whether it is code or data.
-   */
-  return (opcode != Instruction::NOP || instr == 0);
-}
-
-/*
- * Parse an instruction, return the length of the instruction
- */
-static int ParseInsn(CompilationUnit* cu, const uint16_t* code_ptr,
-                     DecodedInstruction* decoded_instruction)
-{
-  // Don't parse instruction data
-  if (!ContentIsInsn(code_ptr)) {
-    return 0;
-  }
-
-  const Instruction* instruction = Instruction::At(code_ptr);
-  *decoded_instruction = DecodedInstruction(instruction);
-
-  return instruction->SizeInCodeUnits();
-}
-
-#define UNKNOWN_TARGET 0xffffffff
-
-/* Split an existing block from the specified code offset into two */
-static BasicBlock *SplitBlock(CompilationUnit* cu, unsigned int code_offset,
-                              BasicBlock* orig_block, BasicBlock** immed_pred_block_p)
-{
-  MIR* insn = orig_block->first_mir_insn;
-  while (insn) {
-    if (insn->offset == code_offset) break;
-    insn = insn->next;
-  }
-  if (insn == NULL) {
-    LOG(FATAL) << "Break split failed";
-  }
-  BasicBlock *bottom_block = NewMemBB(cu, kDalvikByteCode,
-                                     cu->num_blocks++);
-  InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bottom_block));
-
-  bottom_block->start_offset = code_offset;
-  bottom_block->first_mir_insn = insn;
-  bottom_block->last_mir_insn = orig_block->last_mir_insn;
-
-  /* If this block was terminated by a return, the flag needs to go with the bottom block */
-  bottom_block->terminated_by_return = orig_block->terminated_by_return;
-  orig_block->terminated_by_return = false;
-
-  /* Add it to the quick lookup cache */
-  cu->block_map.Put(bottom_block->start_offset, bottom_block);
-
-  /* Handle the taken path */
-  bottom_block->taken = orig_block->taken;
-  if (bottom_block->taken) {
-    orig_block->taken = NULL;
-    DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block));
-    InsertGrowableList(cu, bottom_block->taken->predecessors,
-                          reinterpret_cast<uintptr_t>(bottom_block));
-  }
-
-  /* Handle the fallthrough path */
-  bottom_block->fall_through = orig_block->fall_through;
-  orig_block->fall_through = bottom_block;
-  InsertGrowableList(cu, bottom_block->predecessors,
-                        reinterpret_cast<uintptr_t>(orig_block));
-  if (bottom_block->fall_through) {
-    DeleteGrowableList(bottom_block->fall_through->predecessors,
-                          reinterpret_cast<uintptr_t>(orig_block));
-    InsertGrowableList(cu, bottom_block->fall_through->predecessors,
-                          reinterpret_cast<uintptr_t>(bottom_block));
-  }
-
-  /* Handle the successor list */
-  if (orig_block->successor_block_list.block_list_type != kNotUsed) {
-    bottom_block->successor_block_list = orig_block->successor_block_list;
-    orig_block->successor_block_list.block_list_type = kNotUsed;
-    GrowableListIterator iterator;
-
-    GrowableListIteratorInit(&bottom_block->successor_block_list.blocks,
-                                &iterator);
-    while (true) {
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
-      if (successor_block_info == NULL) break;
-      BasicBlock *bb = successor_block_info->block;
-      DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block));
-      InsertGrowableList(cu, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block));
-    }
-  }
-
-  orig_block->last_mir_insn = insn->prev;
-
-  insn->prev->next = NULL;
-  insn->prev = NULL;
-  /*
-   * Update the immediate predecessor block pointer so that outgoing edges
-   * can be applied to the proper block.
-   */
-  if (immed_pred_block_p) {
-    DCHECK_EQ(*immed_pred_block_p, orig_block);
-    *immed_pred_block_p = bottom_block;
-  }
-  return bottom_block;
-}
-
-/*
- * Given a code offset, find out the block that starts with it. If the offset
- * is in the middle of an existing block, split it into two.  If immed_pred_block_p
- * is not non-null and is the block being split, update *immed_pred_block_p to
- * point to the bottom block so that outgoing edges can be set up properly
- * (by the caller)
- * Utilizes a map for fast lookup of the typical cases.
- */
-BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
-                      bool split, bool create, BasicBlock** immed_pred_block_p)
-{
-  GrowableList* block_list = &cu->block_list;
-  BasicBlock* bb;
-  unsigned int i;
-  SafeMap<unsigned int, BasicBlock*>::iterator it;
-
-  it = cu->block_map.find(code_offset);
-  if (it != cu->block_map.end()) {
-    return it->second;
-  } else if (!create) {
-    return NULL;
-  }
-
-  if (split) {
-    for (i = 0; i < block_list->num_used; i++) {
-      bb = reinterpret_cast<BasicBlock*>(block_list->elem_list[i]);
-      if (bb->block_type != kDalvikByteCode) continue;
-      /* Check if a branch jumps into the middle of an existing block */
-      if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
-          (code_offset <= bb->last_mir_insn->offset)) {
-        BasicBlock *new_bb = SplitBlock(cu, code_offset, bb,
-                                       bb == *immed_pred_block_p ?
-                                       immed_pred_block_p : NULL);
-        return new_bb;
-      }
-    }
-  }
-
-  /* Create a new one */
-  bb = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
-  InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bb));
-  bb->start_offset = code_offset;
-  cu->block_map.Put(bb->start_offset, bb);
-  return bb;
-}
-
-/* Find existing block */
-BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset)
-{
-  return FindBlock(cu, code_offset, false, false, NULL);
-}
-
-/* Turn method name into a legal Linux file name */
-void ReplaceSpecialChars(std::string& str)
-{
-  static const struct { const char before; const char after; } match[] =
-      {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
-       {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
-  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
-    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
-  }
-}
-
-/* Dump the CFG into a DOT graph */
-void DumpCFG(CompilationUnit* cu, const char* dir_prefix, bool all_blocks)
-{
-  FILE* file;
-  std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
-  ReplaceSpecialChars(fname);
-  fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
-                      cu->entry_block->fall_through->start_offset);
-  file = fopen(fname.c_str(), "w");
-  if (file == NULL) {
-    return;
-  }
-  fprintf(file, "digraph G {\n");
-
-  fprintf(file, "  rankdir=TB\n");
-
-  int num_blocks = all_blocks ? cu->num_blocks : cu->num_reachable_blocks;
-  int idx;
-  const GrowableList *block_list = &cu->block_list;
-
-  for (idx = 0; idx < num_blocks; idx++) {
-    int block_idx = all_blocks ? idx : cu->dfs_order.elem_list[idx];
-    BasicBlock *bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, block_idx));
-    if (bb == NULL) break;
-    if (bb->block_type == kDead) continue;
-    if (bb->block_type == kEntryBlock) {
-      fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
-    } else if (bb->block_type == kExitBlock) {
-      fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
-    } else if (bb->block_type == kDalvikByteCode) {
-      fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
-              bb->start_offset, bb->id);
-      const MIR *mir;
-        fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
-                bb->first_mir_insn ? " | " : " ");
-        for (mir = bb->first_mir_insn; mir; mir = mir->next) {
-            int opcode = mir->dalvikInsn.opcode;
-            fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
-                    mir->ssa_rep ? GetDalvikDisassembly(cu, mir) :
-                    (opcode < kMirOpFirst) ?  Instruction::Name(mir->dalvikInsn.opcode) :
-                    extended_mir_op_names[opcode - kMirOpFirst],
-                    (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
-                    (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
-                    mir->next ? " | " : " ");
-        }
-        fprintf(file, "  }\"];\n\n");
-    } else if (bb->block_type == kExceptionHandling) {
-      char block_name[BLOCK_NAME_LEN];
-
-      GetBlockName(bb, block_name);
-      fprintf(file, "  %s [shape=invhouse];\n", block_name);
-    }
-
-    char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
-
-    if (bb->taken) {
-      GetBlockName(bb, block_name1);
-      GetBlockName(bb->taken, block_name2);
-      fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
-              block_name1, block_name2);
-    }
-    if (bb->fall_through) {
-      GetBlockName(bb, block_name1);
-      GetBlockName(bb->fall_through, block_name2);
-      fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
-    }
-
-    if (bb->successor_block_list.block_list_type != kNotUsed) {
-      fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
-              bb->start_offset, bb->id,
-              (bb->successor_block_list.block_list_type == kCatch) ?
-               "Mrecord" : "record");
-      GrowableListIterator iterator;
-      GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                  &iterator);
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
-
-      int succ_id = 0;
-      while (true) {
-        if (successor_block_info == NULL) break;
-
-        BasicBlock *dest_block = successor_block_info->block;
-        SuccessorBlockInfo *next_successor_block_info =
-            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
-
-        fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
-                succ_id++,
-                successor_block_info->key,
-                dest_block->start_offset,
-                (next_successor_block_info != NULL) ? " | " : " ");
-
-        successor_block_info = next_successor_block_info;
-      }
-      fprintf(file, "  }\"];\n\n");
-
-      GetBlockName(bb, block_name1);
-      fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
-              block_name1, bb->start_offset, bb->id);
-
-      if (bb->successor_block_list.block_list_type == kPackedSwitch ||
-          bb->successor_block_list.block_list_type == kSparseSwitch) {
-
-        GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                    &iterator);
-
-        succ_id = 0;
-        while (true) {
-          SuccessorBlockInfo *successor_block_info =
-              reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator));
-          if (successor_block_info == NULL) break;
-
-          BasicBlock *dest_block = successor_block_info->block;
-
-          GetBlockName(dest_block, block_name2);
-          fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
-                  bb->id, succ_id++, block_name2);
-        }
-      }
-    }
-    fprintf(file, "\n");
-
-    if (cu->verbose) {
-      /* Display the dominator tree */
-      GetBlockName(bb, block_name1);
-      fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
-              block_name1, block_name1);
-      if (bb->i_dom) {
-        GetBlockName(bb->i_dom, block_name2);
-        fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
-      }
-    }
-  }
-  fprintf(file, "}\n");
-  fclose(file);
-}
-
-/* Verify if all the successor is connected with all the claimed predecessors */
-static bool VerifyPredInfo(CompilationUnit* cu, BasicBlock* bb)
-{
-  GrowableListIterator iter;
-
-  GrowableListIteratorInit(bb->predecessors, &iter);
-  while (true) {
-    BasicBlock *pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
-    if (!pred_bb) break;
-    bool found = false;
-    if (pred_bb->taken == bb) {
-        found = true;
-    } else if (pred_bb->fall_through == bb) {
-        found = true;
-    } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
-      GrowableListIterator iterator;
-      GrowableListIteratorInit(&pred_bb->successor_block_list.blocks,
-                                  &iterator);
-      while (true) {
-        SuccessorBlockInfo *successor_block_info =
-            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
-        if (successor_block_info == NULL) break;
-        BasicBlock *succ_bb = successor_block_info->block;
-        if (succ_bb == bb) {
-            found = true;
-            break;
-        }
-      }
-    }
-    if (found == false) {
-      char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
-      GetBlockName(bb, block_name1);
-      GetBlockName(pred_bb, block_name2);
-      DumpCFG(cu, "/sdcard/cfg/", false);
-      LOG(FATAL) << "Successor " << block_name1 << "not found from "
-                 << block_name2;
-    }
-  }
-  return true;
-}
-
-/* Identify code range in try blocks and set up the empty catch blocks */
-static void ProcessTryCatchBlocks(CompilationUnit* cu)
-{
-  const DexFile::CodeItem* code_item = cu->code_item;
-  int tries_size = code_item->tries_size_;
-  int offset;
-
-  if (tries_size == 0) {
-    return;
-  }
-
-  ArenaBitVector* try_block_addr = cu->try_block_addr;
-
-  for (int i = 0; i < tries_size; i++) {
-    const DexFile::TryItem* pTry =
-        DexFile::GetTryItems(*code_item, i);
-    int start_offset = pTry->start_addr_;
-    int end_offset = start_offset + pTry->insn_count_;
-    for (offset = start_offset; offset < end_offset; offset++) {
-      SetBit(cu, try_block_addr, offset);
-    }
-  }
-
-  // Iterate over each of the handlers to enqueue the empty Catch blocks
-  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
-  uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
-  for (uint32_t idx = 0; idx < handlers_size; idx++) {
-    CatchHandlerIterator iterator(handlers_ptr);
-    for (; iterator.HasNext(); iterator.Next()) {
-      uint32_t address = iterator.GetHandlerAddress();
-      FindBlock(cu, address, false /* split */, true /*create*/,
-                /* immed_pred_block_p */ NULL);
-    }
-    handlers_ptr = iterator.EndDataPointer();
-  }
-}
-
-/* Process instructions with the kBranch flag */
-static BasicBlock* ProcessCanBranch(CompilationUnit* cu, BasicBlock* cur_block,
-                                    MIR* insn, int cur_offset, int width, int flags,
-                                    const uint16_t* code_ptr, const uint16_t* code_end)
-{
-  int target = cur_offset;
-  switch (insn->dalvikInsn.opcode) {
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-      target += insn->dalvikInsn.vA;
-      break;
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-      cur_block->conditional_branch = true;
-      target += insn->dalvikInsn.vC;
-      break;
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-      cur_block->conditional_branch = true;
-      target += insn->dalvikInsn.vB;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
-  }
-  BasicBlock *taken_block = FindBlock(cu, target,
-                                     /* split */
-                                     true,
-                                     /* create */
-                                     true,
-                                     /* immed_pred_block_p */
-                                     &cur_block);
-  cur_block->taken = taken_block;
-  InsertGrowableList(cu, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
-
-  /* Always terminate the current block for conditional branches */
-  if (flags & Instruction::kContinue) {
-    BasicBlock *fallthrough_block = FindBlock(cu,
-                                             cur_offset +  width,
-                                             /*
-                                              * If the method is processed
-                                              * in sequential order from the
-                                              * beginning, we don't need to
-                                              * specify split for continue
-                                              * blocks. However, this
-                                              * routine can be called by
-                                              * compileLoop, which starts
-                                              * parsing the method from an
-                                              * arbitrary address in the
-                                              * method body.
-                                              */
-                                             true,
-                                             /* create */
-                                             true,
-                                             /* immed_pred_block_p */
-                                             &cur_block);
-    cur_block->fall_through = fallthrough_block;
-    InsertGrowableList(cu, fallthrough_block->predecessors,
-                          reinterpret_cast<uintptr_t>(cur_block));
-  } else if (code_ptr < code_end) {
-    /* Create a fallthrough block for real instructions (incl. NOP) */
-    if (ContentIsInsn(code_ptr)) {
-      FindBlock(cu, cur_offset + width,
-                /* split */
-                false,
-                /* create */
-                true,
-                /* immed_pred_block_p */
-                NULL);
-    }
-  }
-  return cur_block;
-}
-
-/* Process instructions with the kSwitch flag */
-static void ProcessCanSwitch(CompilationUnit* cu, BasicBlock* cur_block,
-                             MIR* insn, int cur_offset, int width, int flags)
-{
-  const uint16_t* switch_data =
-      reinterpret_cast<const uint16_t*>(cu->insns + cur_offset + insn->dalvikInsn.vB);
-  int size;
-  const int* keyTable;
-  const int* target_table;
-  int i;
-  int first_key;
-
-  /*
-   * Packed switch data format:
-   *  ushort ident = 0x0100   magic value
-   *  ushort size             number of entries in the table
-   *  int first_key           first (and lowest) switch case value
-   *  int targets[size]       branch targets, relative to switch opcode
-   *
-   * Total size is (4+size*2) 16-bit code units.
-   */
-  if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
-    DCHECK_EQ(static_cast<int>(switch_data[0]),
-              static_cast<int>(Instruction::kPackedSwitchSignature));
-    size = switch_data[1];
-    first_key = switch_data[2] | (switch_data[3] << 16);
-    target_table = reinterpret_cast<const int*>(&switch_data[4]);
-    keyTable = NULL;        // Make the compiler happy
-  /*
-   * Sparse switch data format:
-   *  ushort ident = 0x0200   magic value
-   *  ushort size             number of entries in the table; > 0
-   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
-   *  int targets[size]       branch targets, relative to switch opcode
-   *
-   * Total size is (2+size*4) 16-bit code units.
-   */
-  } else {
-    DCHECK_EQ(static_cast<int>(switch_data[0]),
-              static_cast<int>(Instruction::kSparseSwitchSignature));
-    size = switch_data[1];
-    keyTable = reinterpret_cast<const int*>(&switch_data[2]);
-    target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
-    first_key = 0;   // To make the compiler happy
-  }
-
-  if (cur_block->successor_block_list.block_list_type != kNotUsed) {
-    LOG(FATAL) << "Successor block list already in use: "
-               << static_cast<int>(cur_block->successor_block_list.block_list_type);
-  }
-  cur_block->successor_block_list.block_list_type =
-      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
-      kPackedSwitch : kSparseSwitch;
-  CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, size,
-                      kListSuccessorBlocks);
-
-  for (i = 0; i < size; i++) {
-    BasicBlock *case_block = FindBlock(cu, cur_offset + target_table[i],
-                                      /* split */
-                                      true,
-                                      /* create */
-                                      true,
-                                      /* immed_pred_block_p */
-                                      &cur_block);
-    SuccessorBlockInfo *successor_block_info =
-        static_cast<SuccessorBlockInfo*>(NewMem(cu, sizeof(SuccessorBlockInfo),
-                                         false, kAllocSuccessor));
-    successor_block_info->block = case_block;
-    successor_block_info->key =
-        (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
-        first_key + i : keyTable[i];
-    InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
-                          reinterpret_cast<uintptr_t>(successor_block_info));
-    InsertGrowableList(cu, case_block->predecessors,
-                          reinterpret_cast<uintptr_t>(cur_block));
-  }
-
-  /* Fall-through case */
-  BasicBlock* fallthrough_block = FindBlock(cu,
-                                           cur_offset +  width,
-                                           /* split */
-                                           false,
-                                           /* create */
-                                           true,
-                                           /* immed_pred_block_p */
-                                           NULL);
-  cur_block->fall_through = fallthrough_block;
-  InsertGrowableList(cu, fallthrough_block->predecessors,
-                        reinterpret_cast<uintptr_t>(cur_block));
-}
-
-/* Process instructions with the kThrow flag */
-static BasicBlock* ProcessCanThrow(CompilationUnit* cu, BasicBlock* cur_block,
-                                   MIR* insn, int cur_offset, int width, int flags,
-                                   ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
-                                   const uint16_t* code_end)
-{
-  const DexFile::CodeItem* code_item = cu->code_item;
-  bool in_try_block = IsBitSet(try_block_addr, cur_offset);
-
-  /* In try block */
-  if (in_try_block) {
-    CatchHandlerIterator iterator(*code_item, cur_offset);
-
-    if (cur_block->successor_block_list.block_list_type != kNotUsed) {
-      LOG(INFO) << PrettyMethod(cu->method_idx, *cu->dex_file);
-      LOG(FATAL) << "Successor block list already in use: "
-                 << static_cast<int>(cur_block->successor_block_list.block_list_type);
-    }
-
-    cur_block->successor_block_list.block_list_type = kCatch;
-    CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, 2,
-                        kListSuccessorBlocks);
-
-    for (;iterator.HasNext(); iterator.Next()) {
-      BasicBlock *catch_block = FindBlock(cu, iterator.GetHandlerAddress(),
-                                         false /* split*/,
-                                         false /* creat */,
-                                         NULL  /* immed_pred_block_p */);
-      catch_block->catch_entry = true;
-      cu->catches.insert(catch_block->start_offset);
-      SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-          (NewMem(cu, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
-      successor_block_info->block = catch_block;
-      successor_block_info->key = iterator.GetHandlerTypeIndex();
-      InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
-                            reinterpret_cast<uintptr_t>(successor_block_info));
-      InsertGrowableList(cu, catch_block->predecessors,
-                            reinterpret_cast<uintptr_t>(cur_block));
-    }
-  } else {
-    BasicBlock *eh_block = NewMemBB(cu, kExceptionHandling,
-                                   cu->num_blocks++);
-    cur_block->taken = eh_block;
-    InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(eh_block));
-    eh_block->start_offset = cur_offset;
-    InsertGrowableList(cu, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
-  }
-
-  if (insn->dalvikInsn.opcode == Instruction::THROW){
-    cur_block->explicit_throw = true;
-    if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) {
-      // Force creation of new block following THROW via side-effect
-      FindBlock(cu, cur_offset + width, /* split */ false,
-                /* create */ true, /* immed_pred_block_p */ NULL);
-    }
-    if (!in_try_block) {
-       // Don't split a THROW that can't rethrow - we're done.
-      return cur_block;
-    }
-  }
-
-  /*
-   * Split the potentially-throwing instruction into two parts.
-   * The first half will be a pseudo-op that captures the exception
-   * edges and terminates the basic block.  It always falls through.
-   * Then, create a new basic block that begins with the throwing instruction
-   * (minus exceptions).  Note: this new basic block must NOT be entered into
-   * the block_map.  If the potentially-throwing instruction is the target of a
-   * future branch, we need to find the check psuedo half.  The new
-   * basic block containing the work portion of the instruction should
-   * only be entered via fallthrough from the block containing the
-   * pseudo exception edge MIR.  Note also that this new block is
-   * not automatically terminated after the work portion, and may
-   * contain following instructions.
-   */
-  BasicBlock *new_block = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
-  InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(new_block));
-  new_block->start_offset = insn->offset;
-  cur_block->fall_through = new_block;
-  InsertGrowableList(cu, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
-  MIR* new_insn = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocMIR));
-  *new_insn = *insn;
-  insn->dalvikInsn.opcode =
-      static_cast<Instruction::Code>(kMirOpCheck);
-  // Associate the two halves
-  insn->meta.throw_insn = new_insn;
-  new_insn->meta.throw_insn = insn;
-  AppendMIR(new_block, new_insn);
-  return new_block;
-}
-
-void CompilerInit(CompilationUnit* cu, const CompilerDriver& compiler) {
-  bool success = false;
-  switch (compiler.GetInstructionSet()) {
-    case kThumb2:
-      success = InitArmCodegen(cu);
-      break;
-    case kMips:
-      success = InitMipsCodegen(cu);
-      break;
-    case kX86:
-      success = InitX86Codegen(cu);
-      break;
-    default:;
-  }
-  if (!success) {
-    LOG(FATAL) << "Failed to initialize codegen for " << compiler.GetInstructionSet();
-  }
-  if (!HeapInit(cu)) {
-    LOG(FATAL) << "Failed to initialize oat heap";
-  }
-}
-
 static CompiledMethod* CompileMethod(CompilerDriver& compiler,
                                      const CompilerBackend compiler_backend,
                                      const DexFile::CodeItem* code_item,
@@ -794,37 +116,25 @@
 {
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
 
-  const uint16_t* code_ptr = code_item->insns_;
-  const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_;
-  int num_blocks = 0;
-  unsigned int cur_offset = 0;
+  // FIXME: now we detect this in MIRGraph.
+  SpecialCaseHandler special_case = kNoHandler;
 
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   UniquePtr<CompilationUnit> cu(new CompilationUnit);
 
-  CompilerInit(cu.get(), compiler);
+  if (!HeapInit(cu.get())) {
+    LOG(FATAL) << "Failed to initialize compiler heap";
+  }
 
   cu->compiler_driver = &compiler;
   cu->class_linker = class_linker;
-  cu->dex_file = &dex_file;
-  cu->class_def_idx = class_def_idx;
-  cu->method_idx = method_idx;
-  cu->code_item = code_item;
-  cu->access_flags = access_flags;
-  cu->invoke_type = invoke_type;
-  cu->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
   cu->instruction_set = compiler.GetInstructionSet();
-  cu->insns = code_item->insns_;
-  cu->insns_size = code_item->insns_size_in_code_units_;
-  cu->num_ins = code_item->ins_size_;
-  cu->num_regs = code_item->registers_size_ - cu->num_ins;
-  cu->num_outs = code_item->outs_size_;
   DCHECK((cu->instruction_set == kThumb2) ||
          (cu->instruction_set == kX86) ||
          (cu->instruction_set == kMips));
-  if ((compiler_backend == kQuickGBC) || (compiler_backend == kPortable)) {
-    cu->gen_bitcode = true;
-  }
+
+  cu->gen_bitcode = (compiler_backend == kPortable);
+
 #if defined(ART_USE_PORTABLE_COMPILER)
   cu->llvm_compilation_unit = llvm_compilation_unit;
   cu->llvm_info = llvm_compilation_unit->GetQuickContext();
@@ -844,11 +154,11 @@
     cu->verbose = VLOG_IS_ON(compiler) ||
         (cu->enable_debug & (1 << kDebugVerbose));
   }
-#ifndef NDEBUG
-  if (cu->gen_bitcode) {
+
+  // If debug build, always verify bitcode.
+  if (kIsDebugBuild && cu->gen_bitcode) {
     cu->enable_debug |= (1 << kDebugVerifyBitcode);
   }
-#endif
 
   if (cu->instruction_set == kMips) {
     // Disable some optimizations for mips for now
@@ -859,294 +169,57 @@
         (1 << kNullCheckElimination) |
         (1 << kPromoteRegs) |
         (1 << kTrackLiveTemps) |
-        (1 << kSkipLargeMethodOptimization) |
         (1 << kSafeOptimizations) |
         (1 << kBBOpt) |
         (1 << kMatch) |
         (1 << kPromoteCompilerTemps));
   }
 
+  /* Assume leaf */
+  cu->attributes = METHOD_IS_LEAF;
+
+  cu->mir_graph.reset(new MIRGraph(cu.get()));
+
   /* Gathering opcode stats? */
   if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu->opcode_count =
-        static_cast<int*>(NewMem(cu.get(), kNumPackedOpcodes * sizeof(int), true, kAllocMisc));
+    cu->mir_graph->EnableOpcodeCounting();
   }
 
-  /* Assume non-throwing leaf */
-  cu->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
-
-  /* Initialize the block list, estimate size based on insns_size */
-  CompilerInitGrowableList(cu.get(), &cu->block_list, cu->insns_size,
-                      kListBlockList);
-
-  /* Initialize the switch_tables list */
-  CompilerInitGrowableList(cu.get(), &cu->switch_tables, 4,
-                      kListSwitchTables);
-
-  /* Intialize the fill_array_data list */
-  CompilerInitGrowableList(cu.get(), &cu->fill_array_data, 4,
-                      kListFillArrayData);
-
-  /* Intialize the throw_launchpads list, estimate size based on insns_size */
-  CompilerInitGrowableList(cu.get(), &cu->throw_launchpads, cu->insns_size,
-                      kListThrowLaunchPads);
-
-  /* Intialize the instrinsic_launchpads list */
-  CompilerInitGrowableList(cu.get(), &cu->intrinsic_launchpads, 4,
-                      kListMisc);
-
-
-  /* Intialize the suspend_launchpads list */
-  CompilerInitGrowableList(cu.get(), &cu->suspend_launchpads, 2048,
-                      kListSuspendLaunchPads);
-
-  /* Allocate the bit-vector to track the beginning of basic blocks */
-  ArenaBitVector *try_block_addr = AllocBitVector(cu.get(),
-                                                   cu->insns_size,
-                                                   true /* expandable */);
-  cu->try_block_addr = try_block_addr;
-
-  /* Create the default entry and exit blocks and enter them to the list */
-  BasicBlock *entry_block = NewMemBB(cu.get(), kEntryBlock, num_blocks++);
-  BasicBlock *exit_block = NewMemBB(cu.get(), kExitBlock, num_blocks++);
-
-  cu->entry_block = entry_block;
-  cu->exit_block = exit_block;
-
-  InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(entry_block));
-  InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(exit_block));
-
-  /* Current block to record parsed instructions */
-  BasicBlock *cur_block = NewMemBB(cu.get(), kDalvikByteCode, num_blocks++);
-  cur_block->start_offset = 0;
-  InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(cur_block));
-  /* Add first block to the fast lookup cache */
-  cu->block_map.Put(cur_block->start_offset, cur_block);
-  entry_block->fall_through = cur_block;
-  InsertGrowableList(cu.get(), cur_block->predecessors,
-                        reinterpret_cast<uintptr_t>(entry_block));
-
-  /*
-   * Store back the number of blocks since new blocks may be created of
-   * accessing cu.
-   */
-  cu->num_blocks = num_blocks;
-
-  /* Identify code range in try blocks and set up the empty catch blocks */
-  ProcessTryCatchBlocks(cu.get());
-
-  /* Set up for simple method detection */
-  int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
-  bool live_pattern = (num_patterns > 0) && !(cu->disable_opt & (1 << kMatch));
-  bool* dead_pattern =
-      static_cast<bool*>(NewMem(cu.get(), sizeof(bool) * num_patterns, true, kAllocMisc));
-  SpecialCaseHandler special_case = kNoHandler;
-  int pattern_pos = 0;
-
-  /* Parse all instructions and put them into containing basic blocks */
-  while (code_ptr < code_end) {
-    MIR *insn = static_cast<MIR *>(NewMem(cu.get(), sizeof(MIR), true, kAllocMIR));
-    insn->offset = cur_offset;
-    int width = ParseInsn(cu.get(), code_ptr, &insn->dalvikInsn);
-    insn->width = width;
-    Instruction::Code opcode = insn->dalvikInsn.opcode;
-    if (cu->opcode_count != NULL) {
-      cu->opcode_count[static_cast<int>(opcode)]++;
-    }
-
-    /* Terminate when the data section is seen */
-    if (width == 0)
-      break;
-
-    /* Possible simple method? */
-    if (live_pattern) {
-      live_pattern = false;
-      special_case = kNoHandler;
-      for (int i = 0; i < num_patterns; i++) {
-        if (!dead_pattern[i]) {
-          if (special_patterns[i].opcodes[pattern_pos] == opcode) {
-            live_pattern = true;
-            special_case = special_patterns[i].handler_code;
-          } else {
-             dead_pattern[i] = true;
-          }
-        }
-      }
-    pattern_pos++;
-    }
-
-    AppendMIR(cur_block, insn);
-
-    code_ptr += width;
-    int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
-
-    int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode];
-
-    if (df_flags & DF_HAS_DEFS) {
-      cu->def_count += (df_flags & DF_A_WIDE) ? 2 : 1;
-    }
-
-    if (flags & Instruction::kBranch) {
-      cur_block = ProcessCanBranch(cu.get(), cur_block, insn, cur_offset,
-                                  width, flags, code_ptr, code_end);
-    } else if (flags & Instruction::kReturn) {
-      cur_block->terminated_by_return = true;
-      cur_block->fall_through = exit_block;
-      InsertGrowableList(cu.get(), exit_block->predecessors,
-                            reinterpret_cast<uintptr_t>(cur_block));
-      /*
-       * Terminate the current block if there are instructions
-       * afterwards.
-       */
-      if (code_ptr < code_end) {
-        /*
-         * Create a fallthrough block for real instructions
-         * (incl. NOP).
-         */
-        if (ContentIsInsn(code_ptr)) {
-            FindBlock(cu.get(), cur_offset + width,
-                      /* split */
-                      false,
-                      /* create */
-                      true,
-                      /* immed_pred_block_p */
-                      NULL);
-        }
-      }
-    } else if (flags & Instruction::kThrow) {
-      cur_block = ProcessCanThrow(cu.get(), cur_block, insn, cur_offset,
-                                 width, flags, try_block_addr, code_ptr, code_end);
-    } else if (flags & Instruction::kSwitch) {
-      ProcessCanSwitch(cu.get(), cur_block, insn, cur_offset, width, flags);
-    }
-    cur_offset += width;
-    BasicBlock *next_block = FindBlock(cu.get(), cur_offset,
-                                      /* split */
-                                      false,
-                                      /* create */
-                                      false,
-                                      /* immed_pred_block_p */
-                                      NULL);
-    if (next_block) {
-      /*
-       * The next instruction could be the target of a previously parsed
-       * forward branch so a block is already created. If the current
-       * instruction is not an unconditional branch, connect them through
-       * the fall-through link.
-       */
-      DCHECK(cur_block->fall_through == NULL ||
-             cur_block->fall_through == next_block ||
-             cur_block->fall_through == exit_block);
-
-      if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
-        cur_block->fall_through = next_block;
-        InsertGrowableList(cu.get(), next_block->predecessors,
-                              reinterpret_cast<uintptr_t>(cur_block));
-      }
-      cur_block = next_block;
-    }
-  }
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/1_post_parse_cfg/", true);
-  }
-
-  if (!(cu->disable_opt & (1 << kSkipLargeMethodOptimization))) {
-    if ((cu->num_blocks > MANY_BLOCKS) ||
-        ((cu->num_blocks > MANY_BLOCKS_INITIALIZER) &&
-      PrettyMethod(method_idx, dex_file, false).find("init>") !=
-          std::string::npos)) {
-        cu->qd_mode = true;
-    }
-  }
-
-  if (cu->qd_mode) {
-    // Bitcode generation requires full dataflow analysis
-    cu->disable_dataflow = !cu->gen_bitcode;
-    // Disable optimization which require dataflow/ssa
-    cu->disable_opt |= (1 << kBBOpt) | (1 << kPromoteRegs) | (1 << kNullCheckElimination);
-    if (cu->verbose) {
-        LOG(INFO) << "QD mode enabled: "
-                  << PrettyMethod(method_idx, dex_file)
-                  << " num blocks: " << cu->num_blocks;
-    }
-  }
-
-  if (cu->verbose) {
-    DumpCompilationUnit(cu.get());
-  }
+  /* Build the raw MIR graph */
+  cu->mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+                              class_loader, dex_file);
 
   /* Do a code layout pass */
-  CodeLayout(cu.get());
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/2_post_layout_cfg/", true);
-  }
+  cu->mir_graph->CodeLayout();
 
   if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
-    /* Verify if all blocks are connected as claimed */
-    DataFlowAnalysisDispatcher(cu.get(), VerifyPredInfo, kAllNodes,
-                                  false /* is_iterative */);
+    cu->mir_graph->VerifyDataflow();
   }
 
   /* Perform SSA transformation for the whole method */
-  SSATransformation(cu.get());
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/3_post_ssa_cfg/", false);
-  }
+  cu->mir_graph->SSATransformation();
 
   /* Do constant propagation */
-  cu->is_constant_v = AllocBitVector(cu.get(), cu->num_ssa_regs, false  /* not expandable */);
-  cu->must_flush_constant_v = AllocBitVector(cu.get(), cu->num_ssa_regs,
-                                             false  /* not expandable */);
-  cu->constant_values =
-      static_cast<int*>(NewMem(cu.get(), sizeof(int) * cu->num_ssa_regs, true, kAllocDFInfo));
-  DataFlowAnalysisDispatcher(cu.get(), DoConstantPropogation,
-                                kAllNodes,
-                                false /* is_iterative */);
-
-  /* Detect loops */
-  LoopDetection(cu.get());
+  cu->mir_graph->PropagateConstants();
 
   /* Count uses */
-  MethodUseCount(cu.get());
+  cu->mir_graph->MethodUseCount();
 
   /* Perform null check elimination */
-  NullCheckElimination(cu.get());
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/4_post_nce_cfg/", false);
-  }
+  cu->mir_graph->NullCheckElimination();
 
   /* Combine basic blocks where possible */
-  BasicBlockCombine(cu.get());
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/5_post_bbcombine_cfg/", false);
-  }
+  cu->mir_graph->BasicBlockCombine();
 
   /* Do some basic block optimizations */
-  BasicBlockOptimization(cu.get());
-
-  // Debugging only
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/6_post_bbo_cfg/", false);
-  }
+  cu->mir_graph->BasicBlockOptimization();
 
   if (cu->enable_debug & (1 << kDebugDumpCheckStats)) {
-    DumpCheckStats(cu.get());
+    cu->mir_graph->DumpCheckStats();
   }
 
-  cu.get()->cg->CompilerInitializeRegAlloc(cu.get());  // Needs to happen after SSA naming
-
-  /* Allocate Registers using simple local allocation scheme */
-  SimpleRegAlloc(cu.get());
-
-  if (cu->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG(cu.get(), "/sdcard/7_post_ralloc_cfg/", true);
-  }
-
+  /* Set up regLocation[] array to describe values - one for each ssa_name. */
+  cu->mir_graph->BuildRegLocations();
 
 #if defined(ART_USE_PORTABLE_COMPILER)
   /* Go the LLVM path? */
@@ -1158,11 +231,49 @@
       ArenaReset(cu.get());
       return NULL;
     }
-    // Bitcode->LIR
-    MethodBitcode2LIR(cu.get());
   } else
 #endif
   {
+    switch (compiler.GetInstructionSet()) {
+      case kThumb2:
+        InitArmCodegen(cu.get()); break;
+      case kMips:
+        InitMipsCodegen(cu.get()); break;
+      case kX86:
+        InitX86Codegen(cu.get()); break;
+      default:
+        LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
+    }
+
+// ** MOVE ALL OF THIS TO Codegen.materialize()
+
+  /* Initialize the switch_tables list */                       // TO CODEGEN
+  CompilerInitGrowableList(cu.get(), &cu->switch_tables, 4,
+                      kListSwitchTables);
+
+  /* Intialize the fill_array_data list */                     // TO CODEGEN
+  CompilerInitGrowableList(cu.get(), &cu->fill_array_data, 4,
+                      kListFillArrayData);
+
+  /* Intialize the throw_launchpads list, estimate size based on insns_size */ // TO CODEGEN
+  CompilerInitGrowableList(cu.get(), &cu->throw_launchpads, code_item->insns_size_in_code_units_,
+                      kListThrowLaunchPads);
+
+  /* Intialize the instrinsic_launchpads list */  // TO_CODEGEN
+  CompilerInitGrowableList(cu.get(), &cu->intrinsic_launchpads, 4,
+                      kListMisc);
+
+
+  /* Intialize the suspend_launchpads list */ // TO_CODEGEN
+  CompilerInitGrowableList(cu.get(), &cu->suspend_launchpads, 2048,
+                      kListSuspendLaunchPads);
+
+    // TODO: Push these to codegen
+    cu.get()->cg->CompilerInitializeRegAlloc(cu.get());  // Needs to happen after SSA naming
+
+    /* Allocate Registers using simple local allocation scheme */
+    cu.get()->cg->SimpleRegAlloc(cu.get());
+
     if (special_case != kNoHandler) {
       /*
        * Custom codegen for special cases.  If for any reason the
@@ -1191,16 +302,10 @@
       CodegenDump(cu.get());
     }
 
-    if (cu->opcode_count != NULL) {
-      LOG(INFO) << "Opcode Count";
-      for (int i = 0; i < kNumPackedOpcodes; i++) {
-        if (cu->opcode_count[i] != 0) {
-          LOG(INFO) << "-C- "
-                    << Instruction::Name(static_cast<Instruction::Code>(i))
-                    << " " << cu->opcode_count[i];
-        }
-      }
-    }
+  }
+
+  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+    cu->mir_graph->ShowOpcodeStats();
   }
 
   // Combine vmap tables - core regs, then fp regs - into vmap_table
diff --git a/src/compiler/dex/frontend.h b/src/compiler/dex/frontend.h
index 4c906be..874ee0b 100644
--- a/src/compiler/dex/frontend.h
+++ b/src/compiler/dex/frontend.h
@@ -31,17 +31,6 @@
   class IRBuilder;
 }
 
-#define COMPILER_TRACED(X)
-#define COMPILER_TRACEE(X)
-
-/*
- * Special offsets to denote method entry/exit for debugger update.
- * NOTE: bit pattern must be loadable using 1 instruction and must
- * not be a valid Dalvik offset.
- */
-#define DEBUGGER_METHOD_ENTRY -1
-#define DEBUGGER_METHOD_EXIT -2
-
 /*
  * Assembly is an iterative process, and usually terminates within
  * two or three passes.  This should be high enough to handle bizarre
@@ -57,7 +46,6 @@
   kNullCheckElimination,
   kPromoteRegs,
   kTrackLiveTemps,
-  kSkipLargeMethodOptimization,
   kSafeOptimizations,
   kBBOpt,
   kMatch,
@@ -86,24 +74,12 @@
 };
 
 enum OatMethodAttributes {
-  kIsCallee = 0,      // Code is part of a callee (invoked by a hot trace).
-  kIsHot,             // Code is part of a hot trace.
   kIsLeaf,            // Method is leaf.
-  kIsEmpty,           // Method is empty.
-  kIsThrowFree,       // Method doesn't throw.
-  kIsGetter,          // Method fits the getter pattern.
-  kIsSetter,          // Method fits the setter pattern.
-  kCannotCompile,     // Method cannot be compiled.
+  kHasLoop,           // Method contains simple loop.
 };
 
-#define METHOD_IS_CALLEE        (1 << kIsCallee)
-#define METHOD_IS_HOT           (1 << kIsHot)
 #define METHOD_IS_LEAF          (1 << kIsLeaf)
-#define METHOD_IS_EMPTY         (1 << kIsEmpty)
-#define METHOD_IS_THROW_FREE    (1 << kIsThrowFree)
-#define METHOD_IS_GETTER        (1 << kIsGetter)
-#define METHOD_IS_SETTER        (1 << kIsSetter)
-#define METHOD_CANNOT_COMPILE   (1 << kCannotCompile)
+#define METHOD_HAS_LOOP         (1 << kHasLoop)
 
 class LLVMInfo {
   public:
@@ -136,9 +112,6 @@
 struct CompilationUnit;
 struct BasicBlock;
 
-BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset);
-void ReplaceSpecialChars(std::string& str);
-
 }  // namespace art
 
 extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
diff --git a/src/compiler/dex/bb_opt.cc b/src/compiler/dex/local_value_numbering.cc
similarity index 99%
rename from src/compiler/dex/bb_opt.cc
rename to src/compiler/dex/local_value_numbering.cc
index 3ad5821..ec5ab5d 100644
--- a/src/compiler/dex/bb_opt.cc
+++ b/src/compiler/dex/local_value_numbering.cc
@@ -14,13 +14,12 @@
  * limitations under the License.
  */
 
-#include "bb_opt.h"
-#include "dataflow.h"
+#include "local_value_numbering.h"
 
 namespace art {
 
 
-uint16_t BBOpt::GetValueNumber(MIR* mir)
+uint16_t LocalValueNumbering::GetValueNumber(MIR* mir)
 {
   uint16_t res = NO_VALUE;
   uint16_t opcode = mir->dalvikInsn.opcode;
diff --git a/src/compiler/dex/bb_opt.h b/src/compiler/dex/local_value_numbering.h
similarity index 97%
rename from src/compiler/dex/bb_opt.h
rename to src/compiler/dex/local_value_numbering.h
index aedbc10..dd008f6 100644
--- a/src/compiler/dex/bb_opt.h
+++ b/src/compiler/dex/local_value_numbering.h
@@ -31,9 +31,9 @@
 // Key represents a memory address, value is generation.
 typedef SafeMap<uint32_t, uint16_t> MemoryVersionMap;
 
-class BBOpt {
+class LocalValueNumbering {
  public:
-  BBOpt(CompilationUnit* cu) : cu_(cu) {};
+  LocalValueNumbering(CompilationUnit* cu) : cu_(cu) {};
 
   uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier)
   {
diff --git a/src/compiler/dex/mir_dataflow.cc b/src/compiler/dex/mir_dataflow.cc
new file mode 100644
index 0000000..c5ebc1b
--- /dev/null
+++ b/src/compiler/dex/mir_dataflow.cc
@@ -0,0 +1,1380 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "local_value_numbering.h"
+#include "dataflow_iterator.h"
+
+namespace art {
+
+/*
+ * Main table containing data flow attributes for each bytecode. The
+ * first kNumPackedOpcodes entries are for Dalvik bytecode
+ * instructions, where extended opcode at the MIR level are appended
+ * afterwards.
+ *
+ * TODO - many optimization flags are incomplete - they will only limit the
+ * scope of optimizations but will not cause mis-optimizations.
+ */
+const int oat_data_flow_attributes[kMirOpLast] = {
+  // 00 NOP
+  DF_NOP,
+
+  // 01 MOVE vA, vB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 02 MOVE_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 03 MOVE_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 04 MOVE_WIDE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 06 MOVE_WIDE_16 vAAAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 07 MOVE_OBJECT vA, vB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 0A MOVE_RESULT vAA
+  DF_DA,
+
+  // 0B MOVE_RESULT_WIDE vAA
+  DF_DA | DF_A_WIDE,
+
+  // 0C MOVE_RESULT_OBJECT vAA
+  DF_DA | DF_REF_A,
+
+  // 0D MOVE_EXCEPTION vAA
+  DF_DA | DF_REF_A,
+
+  // 0E RETURN_VOID
+  DF_NOP,
+
+  // 0F RETURN vAA
+  DF_UA,
+
+  // 10 RETURN_WIDE vAA
+  DF_UA | DF_A_WIDE,
+
+  // 11 RETURN_OBJECT vAA
+  DF_UA | DF_REF_A,
+
+  // 12 CONST_4 vA, #+B
+  DF_DA | DF_SETS_CONST,
+
+  // 13 CONST_16 vAA, #+BBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 14 CONST vAA, #+BBBBBBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 15 CONST_HIGH16 VAA, #+BBBB0000
+  DF_DA | DF_SETS_CONST,
+
+  // 16 CONST_WIDE_16 vAA, #+BBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 1A CONST_STRING vAA, string@BBBB
+  DF_DA | DF_REF_A,
+
+  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+  DF_DA | DF_REF_A,
+
+  // 1C CONST_CLASS vAA, type@BBBB
+  DF_DA | DF_REF_A,
+
+  // 1D MONITOR_ENTER vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1E MONITOR_EXIT vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1F CHK_CAST vAA, type@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 20 INSTANCE_OF vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+  // 21 ARRAY_LENGTH vA, vB
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+  // 22 NEW_INSTANCE vAA, type@BBBB
+  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+  // 23 NEW_ARRAY vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 27 THROW vAA
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 28 GOTO
+  DF_NOP,
+
+  // 29 GOTO_16
+  DF_NOP,
+
+  // 2A GOTO_32
+  DF_NOP,
+
+  // 2B PACKED_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2D CMPL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2E CMPG_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2F CMPL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 30 CMPG_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 31 CMP_LONG vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 32 IF_EQ vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 33 IF_NE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 34 IF_LT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 35 IF_GE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 36 IF_GT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 37 IF_LE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 38 IF_EQZ vAA, +BBBB
+  DF_UA,
+
+  // 39 IF_NEZ vAA, +BBBB
+  DF_UA,
+
+  // 3A IF_LTZ vAA, +BBBB
+  DF_UA,
+
+  // 3B IF_GEZ vAA, +BBBB
+  DF_UA,
+
+  // 3C IF_GTZ vAA, +BBBB
+  DF_UA,
+
+  // 3D IF_LEZ vAA, +BBBB
+  DF_UA,
+
+  // 3E UNUSED_3E
+  DF_NOP,
+
+  // 3F UNUSED_3F
+  DF_NOP,
+
+  // 40 UNUSED_40
+  DF_NOP,
+
+  // 41 UNUSED_41
+  DF_NOP,
+
+  // 42 UNUSED_42
+  DF_NOP,
+
+  // 43 UNUSED_43
+  DF_NOP,
+
+  // 44 AGET vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 45 AGET_WIDE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 46 AGET_OBJECT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 47 AGET_BOOLEAN vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 48 AGET_BYTE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 49 AGET_CHAR vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4A AGET_SHORT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4B APUT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4C APUT_WIDE vAA, vBB, vCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+  // 4D APUT_OBJECT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 4E APUT_BOOLEAN vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4F APUT_BYTE vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 50 APUT_CHAR vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 51 APUT_SHORT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 52 IGET vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 53 IGET_WIDE vA, vB, field@CCCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 54 IGET_OBJECT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // 55 IGET_BOOLEAN vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 56 IGET_BYTE vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 57 IGET_CHAR vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 58 IGET_SHORT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 59 IPUT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5A IPUT_WIDE vA, vB, field@CCCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // 5B IPUT_OBJECT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5D IPUT_BYTE vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5E IPUT_CHAR vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5F IPUT_SHORT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 60 SGET vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 61 SGET_WIDE vAA, field@BBBB
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // 62 SGET_OBJECT vAA, field@BBBB
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // 63 SGET_BOOLEAN vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 64 SGET_BYTE vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 65 SGET_CHAR vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 66 SGET_SHORT vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 67 SPUT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 68 SPUT_WIDE vAA, field@BBBB
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // 69 SPUT_OBJECT vAA, field@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 6A SPUT_BOOLEAN vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6B SPUT_BYTE vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6C SPUT_CHAR vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6D SPUT_SHORT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 73 UNUSED_73
+  DF_NOP,
+
+  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 79 UNUSED_79
+  DF_NOP,
+
+  // 7A UNUSED_7A
+  DF_NOP,
+
+  // 7B NEG_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7C NOT_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7D NEG_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7E NOT_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7F NEG_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 80 NEG_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 81 INT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 82 INT_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 83 INT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 84 LONG_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 85 LONG_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 86 LONG_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 87 FLOAT_TO_INT vA, vB
+  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 88 FLOAT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 89 FLOAT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 8A DOUBLE_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8B DOUBLE_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8C DOUBLE_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 8D INT_TO_BYTE vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8E INT_TO_CHAR vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8F INT_TO_SHORT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 90 ADD_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 91 SUB_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 92 MUL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 93 DIV_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 94 REM_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 95 AND_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 96 OR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 97 XOR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 98 SHL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 99 SHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9A USHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9B ADD_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9C SUB_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9D MUL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9E DIV_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9F REM_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A0 AND_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A1 OR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A2 XOR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A3 SHL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A4 SHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A5 USHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A6 ADD_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A7 SUB_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A8 MUL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A9 DIV_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AA REM_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AB ADD_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AC SUB_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AD MUL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AE DIV_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AF REM_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // B0 ADD_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B1 SUB_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B2 MUL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B3 DIV_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B4 REM_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B5 AND_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B6 OR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B7 XOR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B8 SHL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B9 SHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BA USHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BB ADD_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BC SUB_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BD MUL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BE DIV_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BF REM_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C0 AND_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C1 OR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C2 XOR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C3 SHL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C4 SHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C5 USHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C6 ADD_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C7 SUB_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C8 MUL_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C9 DIV_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CA REM_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CB ADD_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CC SUB_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CD MUL_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CE DIV_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CF REM_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D1 RSUB_INT vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D4 REM_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D5 AND_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D6 OR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DA MUL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DB DIV_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DC REM_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DD AND_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DE OR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DF XOR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E3 IGET_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E4 IPUT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // E5 SGET_VOLATILE
+  DF_DA | DF_UMS,
+
+  // E6 SPUT_VOLATILE
+  DF_UA | DF_UMS,
+
+  // E7 IGET_OBJECT_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // E8 IGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E9 IPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // EA SGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // EB SPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // EC BREAKPOINT
+  DF_NOP,
+
+  // ED THROW_VERIFICATION_ERROR
+  DF_NOP | DF_UMS,
+
+  // EE EXECUTE_INLINE
+  DF_FORMAT_35C,
+
+  // EF EXECUTE_INLINE_RANGE
+  DF_FORMAT_3RC,
+
+  // F0 INVOKE_OBJECT_INIT_RANGE
+  DF_NOP | DF_NULL_CHK_0,
+
+  // F1 RETURN_VOID_BARRIER
+  DF_NOP,
+
+  // F2 IGET_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F3 IGET_WIDE_QUICK
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+  // F4 IGET_OBJECT_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F5 IPUT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F6 IPUT_WIDE_QUICK
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+  // F7 IPUT_OBJECT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F8 INVOKE_VIRTUAL_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // F9 INVOKE_VIRTUAL_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FA INVOKE_SUPER_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FB INVOKE_SUPER_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FC IPUT_OBJECT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // FD SGET_OBJECT_VOLATILE
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // FE SPUT_OBJECT_VOLATILE
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // FF UNUSED_FF
+  DF_NOP,
+
+  // Beginning of extended MIR opcodes
+  // 100 MIR_PHI
+  DF_DA | DF_NULL_TRANSFER_N,
+
+  // 101 MIR_COPY
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 102 MIR_FUSED_CMPL_FLOAT
+  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 103 MIR_FUSED_CMPG_FLOAT
+  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 104 MIR_FUSED_CMPL_DOUBLE
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 105 MIR_FUSED_CMPG_DOUBLE
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 106 MIR_FUSED_CMP_LONG
+  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 107 MIR_NOP
+  DF_NOP,
+
+  // 108 MIR_NULL_CHECK
+  0,
+
+  // 109 MIR_RANGE_CHECK
+  0,
+
+  // 110 MIR_DIV_ZERO_CHECK
+  0,
+
+  // 111 MIR_CHECK
+  0,
+
+  // 112 MIR_CHECKPART2
+  0,
+
+  // 113 MIR_SELECT
+  DF_DA | DF_UB,
+};
+
+/* Return the base virtual register for a SSA name */
+int MIRGraph::SRegToVReg(int ssa_reg)
+{
+  DCHECK_LT(ssa_reg, static_cast<int>(ssa_base_vregs_->num_used));
+  return GET_ELEM_N(ssa_base_vregs_, int, ssa_reg);
+}
+
+/* Any register that is used before being defined is considered live-in */
+void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
+                            ArenaBitVector* live_in_v, int dalvik_reg_id)
+{
+  SetBit(cu_, use_v, dalvik_reg_id);
+  if (!IsBitSet(def_v, dalvik_reg_id)) {
+    SetBit(cu_, live_in_v, dalvik_reg_id);
+  }
+}
+
+/* Mark a reg as being defined */
+void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id)
+{
+  SetBit(cu_, def_v, dalvik_reg_id);
+}
+
+/*
+ * Find out live-in variables for natural loops. Variables that are live-in in
+ * the main loop body are considered to be defined in the entry block.
+ */
+bool MIRGraph::FindLocalLiveIn(BasicBlock* bb)
+{
+  MIR* mir;
+  ArenaBitVector *use_v, *def_v, *live_in_v;
+
+  if (bb->data_flow_info == NULL) return false;
+
+  use_v = bb->data_flow_info->use_v =
+      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapUse);
+  def_v = bb->data_flow_info->def_v =
+      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapDef);
+  live_in_v = bb->data_flow_info->live_in_v =
+      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (df_attributes & DF_HAS_USES) {
+      if (df_attributes & DF_UA) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA);
+        if (df_attributes & DF_A_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA+1);
+        }
+      }
+      if (df_attributes & DF_UB) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB);
+        if (df_attributes & DF_B_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB+1);
+        }
+      }
+      if (df_attributes & DF_UC) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC);
+        if (df_attributes & DF_C_WIDE) {
+          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+1);
+        }
+      }
+    }
+    if (df_attributes & DF_FORMAT_35C) {
+      for (unsigned int i = 0; i < d_insn->vA; i++) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->arg[i]);
+      }
+    }
+    if (df_attributes & DF_FORMAT_3RC) {
+      for (unsigned int i = 0; i < d_insn->vA; i++) {
+        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+i);
+      }
+    }
+    if (df_attributes & DF_HAS_DEFS) {
+      HandleDef(def_v, d_insn->vA);
+      if (df_attributes & DF_A_WIDE) {
+        HandleDef(def_v, d_insn->vA+1);
+      }
+    }
+  }
+  return true;
+}
+
+int MIRGraph::AddNewSReg(int v_reg)
+{
+  // Compiler temps always have a subscript of 0
+  int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg];
+  int ssa_reg = GetNumSSARegs();
+  SetNumSSARegs(ssa_reg + 1);
+  InsertGrowableList(cu_, ssa_base_vregs_, v_reg);
+  InsertGrowableList(cu_, ssa_subscripts_, subscript);
+  std::string ssa_name = GetSSAName(cu_, ssa_reg);
+  char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, false, kAllocDFInfo));
+  strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+  InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
+  DCHECK_EQ(ssa_base_vregs_->num_used, ssa_subscripts_->num_used);
+  return ssa_reg;
+}
+
+/* Find out the latest SSA register for a given Dalvik register */
+void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index)
+{
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
+}
+
+/* Setup a new SSA register for a given Dalvik register */
+void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index)
+{
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  int ssa_reg = AddNewSReg(dalvik_reg);
+  vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
+  defs[reg_index] = ssa_reg;
+}
+
+/* Look up new SSA names for format_35c instructions */
+void MIRGraph::DataFlowSSAFormat35C(MIR* mir)
+{
+  DecodedInstruction *d_insn = &mir->dalvikInsn;
+  int num_uses = d_insn->vA;
+  int i;
+
+  mir->ssa_rep->num_uses = num_uses;
+  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, true, kAllocDFInfo));
+  // NOTE: will be filled in during type & size inference pass
+  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true,
+                                                 kAllocDFInfo));
+
+  for (i = 0; i < num_uses; i++) {
+    HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
+  }
+}
+
+/* Look up new SSA names for format_3rc instructions */
+void MIRGraph::DataFlowSSAFormat3RC(MIR* mir)
+{
+  DecodedInstruction *d_insn = &mir->dalvikInsn;
+  int num_uses = d_insn->vA;
+  int i;
+
+  mir->ssa_rep->num_uses = num_uses;
+  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, true, kAllocDFInfo));
+  // NOTE: will be filled in during type & size inference pass
+  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true,
+                                                 kAllocDFInfo));
+
+  for (i = 0; i < num_uses; i++) {
+    HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
+  }
+}
+
+/* Entry function to convert a block into SSA representation */
+bool MIRGraph::DoSSAConversion(BasicBlock* bb)
+{
+  MIR* mir;
+
+  if (bb->data_flow_info == NULL) return false;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu_, sizeof(SSARepresentation),
+                                                                 true, kAllocDFInfo));
+
+    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+      // If not a pseudo-op, note non-leaf or can throw
+    if (static_cast<int>(mir->dalvikInsn.opcode) <
+        static_cast<int>(kNumPackedOpcodes)) {
+      int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
+
+      if (flags & Instruction::kInvoke) {
+        cu_->attributes &= ~METHOD_IS_LEAF;
+      }
+    }
+
+    int num_uses = 0;
+
+    if (df_attributes & DF_FORMAT_35C) {
+      DataFlowSSAFormat35C(mir);
+      continue;
+    }
+
+    if (df_attributes & DF_FORMAT_3RC) {
+      DataFlowSSAFormat3RC(mir);
+      continue;
+    }
+
+    if (df_attributes & DF_HAS_USES) {
+      if (df_attributes & DF_UA) {
+        num_uses++;
+        if (df_attributes & DF_A_WIDE) {
+          num_uses ++;
+        }
+      }
+      if (df_attributes & DF_UB) {
+        num_uses++;
+        if (df_attributes & DF_B_WIDE) {
+          num_uses ++;
+        }
+      }
+      if (df_attributes & DF_UC) {
+        num_uses++;
+        if (df_attributes & DF_C_WIDE) {
+          num_uses ++;
+        }
+      }
+    }
+
+    if (num_uses) {
+      mir->ssa_rep->num_uses = num_uses;
+      mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false,
+                                                   kAllocDFInfo));
+      mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, false,
+                                                     kAllocDFInfo));
+    }
+
+    int num_defs = 0;
+
+    if (df_attributes & DF_HAS_DEFS) {
+      num_defs++;
+      if (df_attributes & DF_A_WIDE) {
+        num_defs++;
+      }
+    }
+
+    if (num_defs) {
+      mir->ssa_rep->num_defs = num_defs;
+      mir->ssa_rep->defs = static_cast<int*>(NewMem(cu_, sizeof(int) * num_defs, false,
+                                                   kAllocDFInfo));
+      mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_defs, false,
+                                                     kAllocDFInfo));
+    }
+
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (df_attributes & DF_HAS_USES) {
+      num_uses = 0;
+      if (df_attributes & DF_UA) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
+        if (df_attributes & DF_A_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
+        }
+      }
+      if (df_attributes & DF_UB) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
+        if (df_attributes & DF_B_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
+        }
+      }
+      if (df_attributes & DF_UC) {
+        mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+        HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
+        if (df_attributes & DF_C_WIDE) {
+          mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+          HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
+        }
+      }
+    }
+    if (df_attributes & DF_HAS_DEFS) {
+      mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
+      HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
+      if (df_attributes & DF_A_WIDE) {
+        mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
+        HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
+      }
+    }
+  }
+
+  if (!cu_->disable_dataflow) {
+    /*
+     * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+     * input to PHI nodes can be derived from the snapshot of all
+     * predecessor blocks.
+     */
+    bb->data_flow_info->vreg_to_ssa_map =
+        static_cast<int*>(NewMem(cu_, sizeof(int) * cu_->num_dalvik_registers, false,
+                                 kAllocDFInfo));
+
+    memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+           sizeof(int) * cu_->num_dalvik_registers);
+  }
+  return true;
+}
+
+/* Setup the basic data structures for SSA conversion */
+void MIRGraph::CompilerInitializeSSAConversion()
+{
+  int i;
+  int num_dalvik_reg = cu_->num_dalvik_registers;
+
+  ssa_base_vregs_ =
+      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
+  ssa_subscripts_ =
+      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
+  ssa_strings_ =
+      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
+  // Create the ssa mappings, estimating the max size
+  CompilerInitGrowableList(cu_, ssa_base_vregs_, num_dalvik_reg + GetDefCount() + 128,
+                           kListSSAtoDalvikMap);
+  CompilerInitGrowableList(cu_, ssa_subscripts_, num_dalvik_reg + GetDefCount() + 128,
+                           kListSSAtoDalvikMap);
+  CompilerInitGrowableList(cu_, ssa_strings_, num_dalvik_reg + GetDefCount() + 128,
+                           kListSSAtoDalvikMap);
+  /*
+   * Initial number of SSA registers is equal to the number of Dalvik
+   * registers.
+   */
+  SetNumSSARegs(num_dalvik_reg);
+
+  /*
+   * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
+   * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
+   * into "(0 << 16) | i"
+   */
+  for (i = 0; i < num_dalvik_reg; i++) {
+    InsertGrowableList(cu_, ssa_base_vregs_, i);
+    InsertGrowableList(cu_, ssa_subscripts_, 0);
+    std::string ssa_name = GetSSAName(cu_, i);
+    char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, true, kAllocDFInfo));
+    strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+    InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
+  }
+
+  /*
+   * Initialize the DalvikToSSAMap map. There is one entry for each
+   * Dalvik register, and the SSA names for those are the same.
+   */
+  vreg_to_ssa_map_ =
+      static_cast<int*>(NewMem(cu_, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+  /* Keep track of the higest def for each dalvik reg */
+  ssa_last_defs_ =
+      static_cast<int*>(NewMem(cu_, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+
+  for (i = 0; i < num_dalvik_reg; i++) {
+    vreg_to_ssa_map_[i] = i;
+    ssa_last_defs_[i] = 0;
+  }
+
+  /* Add ssa reg for Method* */
+  cu_->method_sreg = AddNewSReg(SSA_METHOD_BASEREG);
+
+  /*
+   * Allocate the BasicBlockDataFlow structure for the entry and code blocks
+   */
+  GrowableListIterator iterator = GetBasicBlockIterator();
+
+  while (true) {
+    BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+    if (bb == NULL) break;
+    if (bb->hidden == true) continue;
+    if (bb->block_type == kDalvikByteCode ||
+      bb->block_type == kEntryBlock ||
+      bb->block_type == kExitBlock) {
+      bb->data_flow_info = static_cast<BasicBlockDataFlow*>(NewMem(cu_, sizeof(BasicBlockDataFlow),
+                                                                 true, kAllocDFInfo));
+      }
+  }
+}
+
+/* Clear the visited flag for each BB */
+bool MIRGraph::ClearVisitedFlag(struct BasicBlock* bb)
+{
+  bb->visited = false;
+  return true;
+}
+
+/*
+ * This function will make a best guess at whether the invoke will
+ * end up using Method*.  It isn't critical to get it exactly right,
+ * and attempting to do would involve more complexity than it's
+ * worth.
+ */
+bool MIRGraph::InvokeUsesMethodStar(MIR* mir)
+{
+  InvokeType type;
+  Instruction::Code opcode = mir->dalvikInsn.opcode;
+  switch (opcode) {
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
+      type = kStatic;
+      break;
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+      type = kDirect;
+      break;
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      type = kVirtual;
+      break;
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE:
+      return false;
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_SUPER:
+      type = kSuper;
+      break;
+    default:
+      LOG(WARNING) << "Unexpected invoke op: " << opcode;
+      return false;
+  }
+  DexCompilationUnit m_unit(cu_);
+  // TODO: add a flag so we don't counts the stats for this twice
+  uint32_t dex_method_idx = mir->dalvikInsn.vB;
+  int vtable_idx;
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  bool fast_path =
+      cu_->compiler_driver->ComputeInvokeInfo(dex_method_idx, &m_unit, type,
+                                             vtable_idx, direct_code,
+                                             direct_method) &&
+                                             !(cu_->enable_debug & (1 << kDebugSlowInvokePath));
+  return (((type == kDirect) || (type == kStatic)) &&
+          fast_path && ((direct_code == 0) || (direct_method == 0)));
+}
+
+/*
+ * Count uses, weighting by loop nesting depth.  This code only
+ * counts explicitly used s_regs.  A later phase will add implicit
+ * counts for things such as Method*, null-checked references, etc.
+ */
+bool MIRGraph::CountUses(struct BasicBlock* bb)
+{
+  if (bb->block_type != kDalvikByteCode) {
+    return false;
+  }
+  for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
+    if (mir->ssa_rep == NULL) {
+      continue;
+    }
+    uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
+    for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+      int s_reg = mir->ssa_rep->uses[i];
+      DCHECK_LT(s_reg, static_cast<int>(use_counts_.num_used));
+      raw_use_counts_.elem_list[s_reg]++;
+      use_counts_.elem_list[s_reg] += (1 << weight);
+    }
+    if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
+      int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+      // Implicit use of Method* ? */
+      if (df_attributes & DF_UMS) {
+        /*
+         * Some invokes will not use Method* - need to perform test similar
+         * to that found in GenInvoke() to decide whether to count refs
+         * for Method* on invoke-class opcodes.
+         * TODO: refactor for common test here, save results for GenInvoke
+         */
+        int uses_method_star = true;
+        if ((df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
+            !(df_attributes & DF_NON_NULL_RET)) {
+          uses_method_star &= InvokeUsesMethodStar(mir);
+        }
+        if (uses_method_star) {
+          raw_use_counts_.elem_list[cu_->method_sreg]++;
+          use_counts_.elem_list[cu_->method_sreg] += (1 << weight);
+        }
+      }
+    }
+  }
+  return false;
+}
+
+void MIRGraph::MethodUseCount()
+{
+  int num_ssa_regs = GetNumSSARegs();
+  CompilerInitGrowableList(cu_, &use_counts_, num_ssa_regs + 32, kListMisc);
+  CompilerInitGrowableList(cu_, &raw_use_counts_, num_ssa_regs + 32, kListMisc);
+  // Initialize list
+  for (int i = 0; i < num_ssa_regs; i++) {
+    InsertGrowableList(cu_, &use_counts_, 0);
+    InsertGrowableList(cu_, &raw_use_counts_, 0);
+  }
+  if (cu_->disable_opt & (1 << kPromoteRegs)) {
+    return;
+  }
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CountUses(bb);
+  }
+}
+
+/* Verify if all the successor is connected with all the claimed predecessors */
+bool MIRGraph::VerifyPredInfo(BasicBlock* bb)
+{
+  GrowableListIterator iter;
+
+  GrowableListIteratorInit(bb->predecessors, &iter);
+  while (true) {
+    BasicBlock *pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+    if (!pred_bb) break;
+    bool found = false;
+    if (pred_bb->taken == bb) {
+        found = true;
+    } else if (pred_bb->fall_through == bb) {
+        found = true;
+    } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
+      GrowableListIterator iterator;
+      GrowableListIteratorInit(&pred_bb->successor_block_list.blocks,
+                                  &iterator);
+      while (true) {
+        SuccessorBlockInfo *successor_block_info =
+            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+        if (successor_block_info == NULL) break;
+        BasicBlock *succ_bb = successor_block_info->block;
+        if (succ_bb == bb) {
+            found = true;
+            break;
+        }
+      }
+    }
+    if (found == false) {
+      char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+      GetBlockName(bb, block_name1);
+      GetBlockName(pred_bb, block_name2);
+      DumpCFG("/sdcard/cfg/", false);
+      LOG(FATAL) << "Successor " << block_name1 << "not found from "
+                 << block_name2;
+    }
+  }
+  return true;
+}
+
+void MIRGraph::VerifyDataflow()
+{
+    /* Verify if all blocks are connected as claimed */
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    VerifyPredInfo(bb);
+  }
+}
+
+}  // namespace art
diff --git a/src/compiler/dex/mir_graph.cc b/src/compiler/dex/mir_graph.cc
new file mode 100644
index 0000000..71aaa38
--- /dev/null
+++ b/src/compiler/dex/mir_graph.cc
@@ -0,0 +1,840 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "mir_graph.h"
+#include "leb128.h"
+#include "dex_file-inl.h"
+
+namespace art {
+
+#define MAX_PATTERN_LEN 5
+
+struct CodePattern {
+  const Instruction::Code opcodes[MAX_PATTERN_LEN];
+  const SpecialCaseHandler handler_code;
+};
+
+static const CodePattern special_patterns[] = {
+  {{Instruction::RETURN_VOID}, kNullMethod},
+  {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
+  {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
+  {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
+  {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
+  {{Instruction::IGET, Instruction:: RETURN}, kIGet},
+  {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
+  {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
+  {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
+  {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
+  {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
+  {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
+  {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
+  {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
+  {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
+  {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
+  {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
+  {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
+  {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
+  {{Instruction::RETURN}, kIdentity},
+  {{Instruction::RETURN_OBJECT}, kIdentity},
+  {{Instruction::RETURN_WIDE}, kIdentity},
+};
+
+MIRGraph::MIRGraph(CompilationUnit* cu)
+    : cu_(cu),
+      ssa_base_vregs_(NULL),
+      ssa_subscripts_(NULL),
+      ssa_strings_(NULL),
+      vreg_to_ssa_map_(NULL),
+      ssa_last_defs_(NULL),
+      is_constant_v_(NULL),
+      constant_values_(NULL),
+      num_reachable_blocks_(0),
+      i_dom_list_(NULL),
+      def_block_matrix_(NULL),
+      temp_block_v_(NULL),
+      temp_dalvik_register_v_(NULL),
+      temp_ssa_register_v_(NULL),
+      try_block_addr_(NULL),
+      entry_block_(NULL),
+      exit_block_(NULL),
+      cur_block_(NULL),
+      num_blocks_(0),
+      current_code_item_(NULL),
+      current_method_(kInvalidEntry),
+      current_offset_(kInvalidEntry),
+      def_count_(0),
+      opcode_count_(NULL),
+      num_ssa_regs_(0) {
+  CompilerInitGrowableList(cu, &block_list_, 0, kListBlockList);
+  try_block_addr_ = AllocBitVector(cu, 0, true /* expandable */);
+}
+
+bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) {
+  uint16_t instr = *code_ptr;
+  Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff);
+  /*
+   * Since the low 8-bit in metadata may look like NOP, we need to check
+   * both the low and whole sub-word to determine whether it is code or data.
+   */
+  return (opcode != Instruction::NOP || instr == 0);
+}
+
+/*
+ * Parse an instruction, return the length of the instruction
+ */
+int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction)
+{
+  // Don't parse instruction data
+  if (!ContentIsInsn(code_ptr)) {
+    return 0;
+  }
+
+  const Instruction* instruction = Instruction::At(code_ptr);
+  *decoded_instruction = DecodedInstruction(instruction);
+
+  return instruction->SizeInCodeUnits();
+}
+
+
+/* Split an existing block from the specified code offset into two */
+BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset,
+                                 BasicBlock* orig_block, BasicBlock** immed_pred_block_p)
+{
+  MIR* insn = orig_block->first_mir_insn;
+  while (insn) {
+    if (insn->offset == code_offset) break;
+    insn = insn->next;
+  }
+  if (insn == NULL) {
+    LOG(FATAL) << "Break split failed";
+  }
+  BasicBlock *bottom_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
+  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bottom_block));
+
+  bottom_block->start_offset = code_offset;
+  bottom_block->first_mir_insn = insn;
+  bottom_block->last_mir_insn = orig_block->last_mir_insn;
+
+  /* If this block was terminated by a return, the flag needs to go with the bottom block */
+  bottom_block->terminated_by_return = orig_block->terminated_by_return;
+  orig_block->terminated_by_return = false;
+
+  /* Add it to the quick lookup cache */
+  block_map_.Put(bottom_block->start_offset, bottom_block);
+
+  /* Handle the taken path */
+  bottom_block->taken = orig_block->taken;
+  if (bottom_block->taken) {
+    orig_block->taken = NULL;
+    DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+    InsertGrowableList(cu_, bottom_block->taken->predecessors,
+                          reinterpret_cast<uintptr_t>(bottom_block));
+  }
+
+  /* Handle the fallthrough path */
+  bottom_block->fall_through = orig_block->fall_through;
+  orig_block->fall_through = bottom_block;
+  InsertGrowableList(cu_, bottom_block->predecessors,
+                        reinterpret_cast<uintptr_t>(orig_block));
+  if (bottom_block->fall_through) {
+    DeleteGrowableList(bottom_block->fall_through->predecessors,
+                          reinterpret_cast<uintptr_t>(orig_block));
+    InsertGrowableList(cu_, bottom_block->fall_through->predecessors,
+                          reinterpret_cast<uintptr_t>(bottom_block));
+  }
+
+  /* Handle the successor list */
+  if (orig_block->successor_block_list.block_list_type != kNotUsed) {
+    bottom_block->successor_block_list = orig_block->successor_block_list;
+    orig_block->successor_block_list.block_list_type = kNotUsed;
+    GrowableListIterator iterator;
+
+    GrowableListIteratorInit(&bottom_block->successor_block_list.blocks,
+                                &iterator);
+    while (true) {
+      SuccessorBlockInfo *successor_block_info =
+          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+      if (successor_block_info == NULL) break;
+      BasicBlock *bb = successor_block_info->block;
+      DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+      InsertGrowableList(cu_, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block));
+    }
+  }
+
+  orig_block->last_mir_insn = insn->prev;
+
+  insn->prev->next = NULL;
+  insn->prev = NULL;
+  /*
+   * Update the immediate predecessor block pointer so that outgoing edges
+   * can be applied to the proper block.
+   */
+  if (immed_pred_block_p) {
+    DCHECK_EQ(*immed_pred_block_p, orig_block);
+    *immed_pred_block_p = bottom_block;
+  }
+  return bottom_block;
+}
+
+/*
+ * Given a code offset, find out the block that starts with it. If the offset
+ * is in the middle of an existing block, split it into two.  If immed_pred_block_p
+ * is not non-null and is the block being split, update *immed_pred_block_p to
+ * point to the bottom block so that outgoing edges can be set up properly
+ * (by the caller)
+ * Utilizes a map for fast lookup of the typical cases.
+ */
+BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create,
+                                BasicBlock** immed_pred_block_p)
+{
+  BasicBlock* bb;
+  unsigned int i;
+  SafeMap<unsigned int, BasicBlock*>::iterator it;
+
+  it = block_map_.find(code_offset);
+  if (it != block_map_.end()) {
+    return it->second;
+  } else if (!create) {
+    return NULL;
+  }
+
+  if (split) {
+    for (i = 0; i < block_list_.num_used; i++) {
+      bb = reinterpret_cast<BasicBlock*>(block_list_.elem_list[i]);
+      if (bb->block_type != kDalvikByteCode) continue;
+      /* Check if a branch jumps into the middle of an existing block */
+      if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
+          (code_offset <= bb->last_mir_insn->offset)) {
+        BasicBlock *new_bb = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?
+                                       immed_pred_block_p : NULL);
+        return new_bb;
+      }
+    }
+  }
+
+  /* Create a new one */
+  bb = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
+  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bb));
+  bb->start_offset = code_offset;
+  block_map_.Put(bb->start_offset, bb);
+  return bb;
+}
+
+/* Identify code range in try blocks and set up the empty catch blocks */
+void MIRGraph::ProcessTryCatchBlocks()
+{
+  int tries_size = current_code_item_->tries_size_;
+  int offset;
+
+  if (tries_size == 0) {
+    return;
+  }
+
+  for (int i = 0; i < tries_size; i++) {
+    const DexFile::TryItem* pTry =
+        DexFile::GetTryItems(*current_code_item_, i);
+    int start_offset = pTry->start_addr_;
+    int end_offset = start_offset + pTry->insn_count_;
+    for (offset = start_offset; offset < end_offset; offset++) {
+      SetBit(cu_, try_block_addr_, offset);
+    }
+  }
+
+  // Iterate over each of the handlers to enqueue the empty Catch blocks
+  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
+  uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+  for (uint32_t idx = 0; idx < handlers_size; idx++) {
+    CatchHandlerIterator iterator(handlers_ptr);
+    for (; iterator.HasNext(); iterator.Next()) {
+      uint32_t address = iterator.GetHandlerAddress();
+      FindBlock(address, false /* split */, true /*create*/,
+                /* immed_pred_block_p */ NULL);
+    }
+    handlers_ptr = iterator.EndDataPointer();
+  }
+}
+
+/* Process instructions with the kBranch flag */
+BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                       int flags, const uint16_t* code_ptr,
+                                       const uint16_t* code_end)
+{
+  int target = cur_offset;
+  switch (insn->dalvikInsn.opcode) {
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32:
+      target += insn->dalvikInsn.vA;
+      break;
+    case Instruction::IF_EQ:
+    case Instruction::IF_NE:
+    case Instruction::IF_LT:
+    case Instruction::IF_GE:
+    case Instruction::IF_GT:
+    case Instruction::IF_LE:
+      cur_block->conditional_branch = true;
+      target += insn->dalvikInsn.vC;
+      break;
+    case Instruction::IF_EQZ:
+    case Instruction::IF_NEZ:
+    case Instruction::IF_LTZ:
+    case Instruction::IF_GEZ:
+    case Instruction::IF_GTZ:
+    case Instruction::IF_LEZ:
+      cur_block->conditional_branch = true;
+      target += insn->dalvikInsn.vB;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
+  }
+  BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
+                                      /* immed_pred_block_p */ &cur_block);
+  cur_block->taken = taken_block;
+  InsertGrowableList(cu_, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+
+  /* Always terminate the current block for conditional branches */
+  if (flags & Instruction::kContinue) {
+    BasicBlock *fallthrough_block = FindBlock(cur_offset +  width,
+                                             /*
+                                              * If the method is processed
+                                              * in sequential order from the
+                                              * beginning, we don't need to
+                                              * specify split for continue
+                                              * blocks. However, this
+                                              * routine can be called by
+                                              * compileLoop, which starts
+                                              * parsing the method from an
+                                              * arbitrary address in the
+                                              * method body.
+                                              */
+                                             true,
+                                             /* create */
+                                             true,
+                                             /* immed_pred_block_p */
+                                             &cur_block);
+    cur_block->fall_through = fallthrough_block;
+    InsertGrowableList(cu_, fallthrough_block->predecessors,
+                          reinterpret_cast<uintptr_t>(cur_block));
+  } else if (code_ptr < code_end) {
+    /* Create a fallthrough block for real instructions (incl. NOP) */
+    if (ContentIsInsn(code_ptr)) {
+      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
+                /* immed_pred_block_p */ NULL);
+    }
+  }
+  return cur_block;
+}
+
+/* Process instructions with the kSwitch flag */
+void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                int flags)
+{
+  const uint16_t* switch_data =
+      reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
+  int size;
+  const int* keyTable;
+  const int* target_table;
+  int i;
+  int first_key;
+
+  /*
+   * Packed switch data format:
+   *  ushort ident = 0x0100   magic value
+   *  ushort size             number of entries in the table
+   *  int first_key           first (and lowest) switch case value
+   *  int targets[size]       branch targets, relative to switch opcode
+   *
+   * Total size is (4+size*2) 16-bit code units.
+   */
+  if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
+    DCHECK_EQ(static_cast<int>(switch_data[0]),
+              static_cast<int>(Instruction::kPackedSwitchSignature));
+    size = switch_data[1];
+    first_key = switch_data[2] | (switch_data[3] << 16);
+    target_table = reinterpret_cast<const int*>(&switch_data[4]);
+    keyTable = NULL;        // Make the compiler happy
+  /*
+   * Sparse switch data format:
+   *  ushort ident = 0x0200   magic value
+   *  ushort size             number of entries in the table; > 0
+   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
+   *  int targets[size]       branch targets, relative to switch opcode
+   *
+   * Total size is (2+size*4) 16-bit code units.
+   */
+  } else {
+    DCHECK_EQ(static_cast<int>(switch_data[0]),
+              static_cast<int>(Instruction::kSparseSwitchSignature));
+    size = switch_data[1];
+    keyTable = reinterpret_cast<const int*>(&switch_data[2]);
+    target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
+    first_key = 0;   // To make the compiler happy
+  }
+
+  if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+    LOG(FATAL) << "Successor block list already in use: "
+               << static_cast<int>(cur_block->successor_block_list.block_list_type);
+  }
+  cur_block->successor_block_list.block_list_type =
+      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+      kPackedSwitch : kSparseSwitch;
+  CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, size,
+                      kListSuccessorBlocks);
+
+  for (i = 0; i < size; i++) {
+    BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
+                                      /* create */ true, /* immed_pred_block_p */ &cur_block);
+    SuccessorBlockInfo *successor_block_info =
+        static_cast<SuccessorBlockInfo*>(NewMem(cu_, sizeof(SuccessorBlockInfo),
+                                         false, kAllocSuccessor));
+    successor_block_info->block = case_block;
+    successor_block_info->key =
+        (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+        first_key + i : keyTable[i];
+    InsertGrowableList(cu_, &cur_block->successor_block_list.blocks,
+                          reinterpret_cast<uintptr_t>(successor_block_info));
+    InsertGrowableList(cu_, case_block->predecessors,
+                          reinterpret_cast<uintptr_t>(cur_block));
+  }
+
+  /* Fall-through case */
+  BasicBlock* fallthrough_block = FindBlock( cur_offset +  width, /* split */ false,
+                                           /* create */ true, /* immed_pred_block_p */ NULL);
+  cur_block->fall_through = fallthrough_block;
+  InsertGrowableList(cu_, fallthrough_block->predecessors,
+                        reinterpret_cast<uintptr_t>(cur_block));
+}
+
+/* Process instructions with the kThrow flag */
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                      int flags, ArenaBitVector* try_block_addr,
+                                      const uint16_t* code_ptr, const uint16_t* code_end)
+{
+  bool in_try_block = IsBitSet(try_block_addr, cur_offset);
+
+  /* In try block */
+  if (in_try_block) {
+    CatchHandlerIterator iterator(*current_code_item_, cur_offset);
+
+    if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+      LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+      LOG(FATAL) << "Successor block list already in use: "
+                 << static_cast<int>(cur_block->successor_block_list.block_list_type);
+    }
+
+    cur_block->successor_block_list.block_list_type = kCatch;
+    CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, 2,
+                        kListSuccessorBlocks);
+
+    for (;iterator.HasNext(); iterator.Next()) {
+      BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
+                                         false /* creat */, NULL  /* immed_pred_block_p */);
+      catch_block->catch_entry = true;
+      if (kIsDebugBuild) {
+        catches_.insert(catch_block->start_offset);
+      }
+      SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+          (NewMem(cu_, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
+      successor_block_info->block = catch_block;
+      successor_block_info->key = iterator.GetHandlerTypeIndex();
+      InsertGrowableList(cu_, &cur_block->successor_block_list.blocks,
+                            reinterpret_cast<uintptr_t>(successor_block_info));
+      InsertGrowableList(cu_, catch_block->predecessors,
+                            reinterpret_cast<uintptr_t>(cur_block));
+    }
+  } else {
+    BasicBlock *eh_block = NewMemBB(cu_, kExceptionHandling, num_blocks_++);
+    cur_block->taken = eh_block;
+    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(eh_block));
+    eh_block->start_offset = cur_offset;
+    InsertGrowableList(cu_, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+  }
+
+  if (insn->dalvikInsn.opcode == Instruction::THROW){
+    cur_block->explicit_throw = true;
+    if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) {
+      // Force creation of new block following THROW via side-effect
+      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
+                /* immed_pred_block_p */ NULL);
+    }
+    if (!in_try_block) {
+       // Don't split a THROW that can't rethrow - we're done.
+      return cur_block;
+    }
+  }
+
+  /*
+   * Split the potentially-throwing instruction into two parts.
+   * The first half will be a pseudo-op that captures the exception
+   * edges and terminates the basic block.  It always falls through.
+   * Then, create a new basic block that begins with the throwing instruction
+   * (minus exceptions).  Note: this new basic block must NOT be entered into
+   * the block_map.  If the potentially-throwing instruction is the target of a
+   * future branch, we need to find the check psuedo half.  The new
+   * basic block containing the work portion of the instruction should
+   * only be entered via fallthrough from the block containing the
+   * pseudo exception edge MIR.  Note also that this new block is
+   * not automatically terminated after the work portion, and may
+   * contain following instructions.
+   */
+  BasicBlock *new_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
+  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(new_block));
+  new_block->start_offset = insn->offset;
+  cur_block->fall_through = new_block;
+  InsertGrowableList(cu_, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+  MIR* new_insn = static_cast<MIR*>(NewMem(cu_, sizeof(MIR), true, kAllocMIR));
+  *new_insn = *insn;
+  insn->dalvikInsn.opcode =
+      static_cast<Instruction::Code>(kMirOpCheck);
+  // Associate the two halves
+  insn->meta.throw_insn = new_insn;
+  new_insn->meta.throw_insn = insn;
+  AppendMIR(new_block, new_insn);
+  return new_block;
+}
+
+/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
+void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                           InvokeType invoke_type, uint32_t class_def_idx,
+                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file)
+{
+  current_code_item_ = code_item;
+  method_stack_.push_back(std::make_pair(current_method_, current_offset_));
+  current_method_ = m_units_.size();
+  current_offset_ = 0;
+  // TODO: will need to snapshot stack image and use that as the mir context identification.
+  m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
+                     dex_file, current_code_item_, class_def_idx, method_idx, access_flags));
+  const uint16_t* code_ptr = current_code_item_->insns_;
+  const uint16_t* code_end =
+      current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
+
+  // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
+  ReallocGrowableList(cu_, &block_list_, block_list_.num_used +
+                      current_code_item_->insns_size_in_code_units_);
+  // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
+  SetBit(cu_, try_block_addr_, current_code_item_->insns_size_in_code_units_);
+  ClearBit(try_block_addr_, current_code_item_->insns_size_in_code_units_);
+
+  // If this is the first method, set up default entry and exit blocks.
+  if (current_method_ == 0) {
+    DCHECK(entry_block_ == NULL);
+    DCHECK(exit_block_ == NULL);
+    DCHECK(num_blocks_ == 0);
+    entry_block_ = NewMemBB(cu_, kEntryBlock, num_blocks_++);
+    exit_block_ = NewMemBB(cu_, kExitBlock, num_blocks_++);
+    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(entry_block_));
+    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(exit_block_));
+    // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
+    cu_->dex_file = &dex_file;
+    cu_->class_def_idx = class_def_idx;
+    cu_->method_idx = method_idx;
+    cu_->access_flags = access_flags;
+    cu_->invoke_type = invoke_type;
+    cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+    cu_->num_ins = current_code_item_->ins_size_;
+    cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins;
+    cu_->num_outs = current_code_item_->outs_size_;
+    cu_->num_dalvik_registers = current_code_item_->registers_size_;
+    cu_->insns = current_code_item_->insns_;
+    cu_->code_item = current_code_item_;
+  } else {
+    UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
+    /*
+     * Will need to manage storage for ins & outs, push prevous state and update
+     * insert point.
+     */
+  }
+
+  /* Current block to record parsed instructions */
+  BasicBlock *cur_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
+  DCHECK_EQ(current_offset_, 0);
+  cur_block->start_offset = current_offset_;
+  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(cur_block));
+  /* Add first block to the fast lookup cache */
+// FIXME: block map needs association with offset/method pair rather than just offset
+  block_map_.Put(cur_block->start_offset, cur_block);
+// FIXME: this needs to insert at the insert point rather than entry block.
+  entry_block_->fall_through = cur_block;
+  InsertGrowableList(cu_, cur_block->predecessors, reinterpret_cast<uintptr_t>(entry_block_));
+
+    /* Identify code range in try blocks and set up the empty catch blocks */
+  ProcessTryCatchBlocks();
+
+  /* Set up for simple method detection */
+  int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
+  bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch));
+  bool* dead_pattern =
+      static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_patterns, true, kAllocMisc));
+  SpecialCaseHandler special_case = kNoHandler;
+  // FIXME - wire this up
+  (void)special_case;
+  int pattern_pos = 0;
+
+  /* Parse all instructions and put them into containing basic blocks */
+  while (code_ptr < code_end) {
+    MIR *insn = static_cast<MIR *>(NewMem(cu_, sizeof(MIR), true, kAllocMIR));
+    insn->offset = current_offset_;
+    insn->m_unit_index = current_method_;
+    int width = ParseInsn(code_ptr, &insn->dalvikInsn);
+    insn->width = width;
+    Instruction::Code opcode = insn->dalvikInsn.opcode;
+    if (opcode_count_ != NULL) {
+      opcode_count_[static_cast<int>(opcode)]++;
+    }
+
+    /* Terminate when the data section is seen */
+    if (width == 0)
+      break;
+
+    /* Possible simple method? */
+    if (live_pattern) {
+      live_pattern = false;
+      special_case = kNoHandler;
+      for (int i = 0; i < num_patterns; i++) {
+        if (!dead_pattern[i]) {
+          if (special_patterns[i].opcodes[pattern_pos] == opcode) {
+            live_pattern = true;
+            special_case = special_patterns[i].handler_code;
+          } else {
+             dead_pattern[i] = true;
+          }
+        }
+      }
+    pattern_pos++;
+    }
+
+    AppendMIR(cur_block, insn);
+
+    code_ptr += width;
+    int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+
+    int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode];
+
+    if (df_flags & DF_HAS_DEFS) {
+      def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
+    }
+
+    if (flags & Instruction::kBranch) {
+      cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
+                                   width, flags, code_ptr, code_end);
+    } else if (flags & Instruction::kReturn) {
+      cur_block->terminated_by_return = true;
+      cur_block->fall_through = exit_block_;
+      InsertGrowableList(cu_, exit_block_->predecessors,
+                            reinterpret_cast<uintptr_t>(cur_block));
+      /*
+       * Terminate the current block if there are instructions
+       * afterwards.
+       */
+      if (code_ptr < code_end) {
+        /*
+         * Create a fallthrough block for real instructions
+         * (incl. NOP).
+         */
+        if (ContentIsInsn(code_ptr)) {
+            FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
+                      /* immed_pred_block_p */ NULL);
+        }
+      }
+    } else if (flags & Instruction::kThrow) {
+      cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
+                                  code_ptr, code_end);
+    } else if (flags & Instruction::kSwitch) {
+      ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
+    }
+    current_offset_ += width;
+    BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
+                                      false, /* immed_pred_block_p */ NULL);
+    if (next_block) {
+      /*
+       * The next instruction could be the target of a previously parsed
+       * forward branch so a block is already created. If the current
+       * instruction is not an unconditional branch, connect them through
+       * the fall-through link.
+       */
+      DCHECK(cur_block->fall_through == NULL ||
+             cur_block->fall_through == next_block ||
+             cur_block->fall_through == exit_block_);
+
+      if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
+        cur_block->fall_through = next_block;
+        InsertGrowableList(cu_, next_block->predecessors,
+                              reinterpret_cast<uintptr_t>(cur_block));
+      }
+      cur_block = next_block;
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/1_post_parse_cfg/", true);
+  }
+
+  if (cu_->verbose) {
+    DumpCompilationUnit(cu_);
+  }
+}
+
+void MIRGraph::ShowOpcodeStats()
+{
+  DCHECK(opcode_count_ != NULL);
+  LOG(INFO) << "Opcode Count";
+  for (int i = 0; i < kNumPackedOpcodes; i++) {
+    if (opcode_count_[i] != 0) {
+      LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
+                << " " << opcode_count_[i];
+    }
+  }
+}
+
+// TODO: use a configurable base prefix, and adjust callers to supply pass name.
+/* Dump the CFG into a DOT graph */
+void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks)
+{
+  FILE* file;
+  std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
+  ReplaceSpecialChars(fname);
+  fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
+                      GetEntryBlock()->fall_through->start_offset);
+  file = fopen(fname.c_str(), "w");
+  if (file == NULL) {
+    return;
+  }
+  fprintf(file, "digraph G {\n");
+
+  fprintf(file, "  rankdir=TB\n");
+
+  int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
+  int idx;
+
+  for (idx = 0; idx < num_blocks; idx++) {
+    int block_idx = all_blocks ? idx : dfs_order_.elem_list[idx];
+    BasicBlock *bb = GetBasicBlock(block_idx);
+    if (bb == NULL) break;
+    if (bb->block_type == kDead) continue;
+    if (bb->block_type == kEntryBlock) {
+      fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
+    } else if (bb->block_type == kExitBlock) {
+      fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
+    } else if (bb->block_type == kDalvikByteCode) {
+      fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
+              bb->start_offset, bb->id);
+      const MIR *mir;
+        fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
+                bb->first_mir_insn ? " | " : " ");
+        for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+            int opcode = mir->dalvikInsn.opcode;
+            fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
+                    mir->ssa_rep ? GetDalvikDisassembly(cu_, mir) :
+                    (opcode < kMirOpFirst) ?  Instruction::Name(mir->dalvikInsn.opcode) :
+                    extended_mir_op_names[opcode - kMirOpFirst],
+                    (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
+                    (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
+                    mir->next ? " | " : " ");
+        }
+        fprintf(file, "  }\"];\n\n");
+    } else if (bb->block_type == kExceptionHandling) {
+      char block_name[BLOCK_NAME_LEN];
+
+      GetBlockName(bb, block_name);
+      fprintf(file, "  %s [shape=invhouse];\n", block_name);
+    }
+
+    char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+
+    if (bb->taken) {
+      GetBlockName(bb, block_name1);
+      GetBlockName(bb->taken, block_name2);
+      fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
+              block_name1, block_name2);
+    }
+    if (bb->fall_through) {
+      GetBlockName(bb, block_name1);
+      GetBlockName(bb->fall_through, block_name2);
+      fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
+    }
+
+    if (bb->successor_block_list.block_list_type != kNotUsed) {
+      fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
+              bb->start_offset, bb->id,
+              (bb->successor_block_list.block_list_type == kCatch) ?
+               "Mrecord" : "record");
+      GrowableListIterator iterator;
+      GrowableListIteratorInit(&bb->successor_block_list.blocks,
+                                  &iterator);
+      SuccessorBlockInfo *successor_block_info =
+          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+
+      int succ_id = 0;
+      while (true) {
+        if (successor_block_info == NULL) break;
+
+        BasicBlock *dest_block = successor_block_info->block;
+        SuccessorBlockInfo *next_successor_block_info =
+            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+
+        fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
+                succ_id++,
+                successor_block_info->key,
+                dest_block->start_offset,
+                (next_successor_block_info != NULL) ? " | " : " ");
+
+        successor_block_info = next_successor_block_info;
+      }
+      fprintf(file, "  }\"];\n\n");
+
+      GetBlockName(bb, block_name1);
+      fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
+              block_name1, bb->start_offset, bb->id);
+
+      if (bb->successor_block_list.block_list_type == kPackedSwitch ||
+          bb->successor_block_list.block_list_type == kSparseSwitch) {
+
+        GrowableListIteratorInit(&bb->successor_block_list.blocks,
+                                    &iterator);
+
+        succ_id = 0;
+        while (true) {
+          SuccessorBlockInfo *successor_block_info =
+              reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator));
+          if (successor_block_info == NULL) break;
+
+          BasicBlock *dest_block = successor_block_info->block;
+
+          GetBlockName(dest_block, block_name2);
+          fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
+                  bb->id, succ_id++, block_name2);
+        }
+      }
+    }
+    fprintf(file, "\n");
+
+    if (cu_->verbose) {
+      /* Display the dominator tree */
+      GetBlockName(bb, block_name1);
+      fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
+              block_name1, block_name1);
+      if (bb->i_dom) {
+        GetBlockName(bb->i_dom, block_name2);
+        fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
+      }
+    }
+  }
+  fprintf(file, "}\n");
+  fclose(file);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/mir_graph.h b/src/compiler/dex/mir_graph.h
new file mode 100644
index 0000000..2f91787
--- /dev/null
+++ b/src/compiler/dex/mir_graph.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_MIRGRAPH_H_
+#define ART_SRC_COMPILER_DEX_MIRGRAPH_H_
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "compiler_ir.h"
+
+namespace art {
+
+enum DataFlowAttributePos {
+  kUA = 0,
+  kUB,
+  kUC,
+  kAWide,
+  kBWide,
+  kCWide,
+  kDA,
+  kIsMove,
+  kSetsConst,
+  kFormat35c,
+  kFormat3rc,
+  kNullCheckSrc0,        // Null check of uses[0].
+  kNullCheckSrc1,        // Null check of uses[1].
+  kNullCheckSrc2,        // Null check of uses[2].
+  kNullCheckOut0,        // Null check out outgoing arg0.
+  kDstNonNull,           // May assume dst is non-null.
+  kRetNonNull,           // May assume retval is non-null.
+  kNullTransferSrc0,     // Object copy src[0] -> dst.
+  kNullTransferSrcN,     // Phi null check state transfer.
+  kRangeCheckSrc1,       // Range check of uses[1].
+  kRangeCheckSrc2,       // Range check of uses[2].
+  kRangeCheckSrc3,       // Range check of uses[3].
+  kFPA,
+  kFPB,
+  kFPC,
+  kCoreA,
+  kCoreB,
+  kCoreC,
+  kRefA,
+  kRefB,
+  kRefC,
+  kUsesMethodStar,       // Implicit use of Method*.
+};
+
+#define DF_NOP                  0
+#define DF_UA                   (1 << kUA)
+#define DF_UB                   (1 << kUB)
+#define DF_UC                   (1 << kUC)
+#define DF_A_WIDE               (1 << kAWide)
+#define DF_B_WIDE               (1 << kBWide)
+#define DF_C_WIDE               (1 << kCWide)
+#define DF_DA                   (1 << kDA)
+#define DF_IS_MOVE              (1 << kIsMove)
+#define DF_SETS_CONST           (1 << kSetsConst)
+#define DF_FORMAT_35C           (1 << kFormat35c)
+#define DF_FORMAT_3RC           (1 << kFormat3rc)
+#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST         (1 << kDstNonNull)
+#define DF_NON_NULL_RET         (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
+#define DF_FP_A                 (1 << kFPA)
+#define DF_FP_B                 (1 << kFPB)
+#define DF_FP_C                 (1 << kFPC)
+#define DF_CORE_A               (1 << kCoreA)
+#define DF_CORE_B               (1 << kCoreB)
+#define DF_CORE_C               (1 << kCoreC)
+#define DF_REF_A                (1 << kRefA)
+#define DF_REF_B                (1 << kRefB)
+#define DF_REF_C                (1 << kRefC)
+#define DF_UMS                  (1 << kUsesMethodStar)
+
+#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS             (DF_DA)
+
+#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
+                                 DF_NULL_CHK_1 | \
+                                 DF_NULL_CHK_2 | \
+                                 DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
+                                 DF_RANGE_CHK_2 | \
+                                 DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
+                                 DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG             (DF_UA | DF_DA)
+#define DF_B_IS_REG             (DF_UB)
+#define DF_C_IS_REG             (DF_UC)
+#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
+
+extern const int oat_data_flow_attributes[kMirOpLast];
+
+class MIRGraph {
+  public:
+    MIRGraph(CompilationUnit* cu);
+    ~MIRGraph() {}
+
+    /*
+     * Parse dex method and add MIR at current insert point.  Returns id (which is
+     * actually the index of the method in the m_units_ array).
+     */
+    void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+                      InvokeType invoke_type, uint32_t class_def_idx,
+                      uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
+
+    /* Find existing block */
+    BasicBlock* FindBlock(unsigned int code_offset)
+    {
+      return FindBlock(code_offset, false, false, NULL);
+    }
+
+    const uint16_t* GetCurrentInsns()
+    {
+      return current_code_item_->insns_;
+    }
+
+    const uint16_t* GetInsns(int m_unit_index)
+    {
+      return m_units_[m_unit_index]->GetCodeItem()->insns_;
+    }
+
+    int GetNumBlocks()
+    {
+      return num_blocks_;
+    }
+
+    ArenaBitVector* GetTryBlockAddr()
+    {
+      return try_block_addr_;
+    }
+
+    BasicBlock* GetEntryBlock()
+    {
+      return entry_block_;
+    }
+
+    BasicBlock* GetExitBlock()
+    {
+      return exit_block_;
+    }
+
+    GrowableListIterator GetBasicBlockIterator()
+    {
+      GrowableListIterator iterator;
+      GrowableListIteratorInit(&block_list_, &iterator);
+      return iterator;
+    }
+
+    BasicBlock* GetBasicBlock(int block_id)
+    {
+      return reinterpret_cast<BasicBlock*>(GrowableListGetElement(&block_list_, block_id));
+    }
+
+    size_t GetBasicBlockListCount()
+    {
+      return block_list_.num_used;
+    }
+
+    GrowableList* GetBlockList()
+    {
+      return &block_list_;
+    }
+
+    GrowableList* GetDfsOrder()
+    {
+      return &dfs_order_;
+    }
+
+    GrowableList* GetDfsPostOrder()
+    {
+      return &dfs_post_order_;
+    }
+
+    GrowableList* GetDomPostOrder()
+    {
+      return &dom_post_order_traversal_;
+    }
+
+    GrowableList* GetSSASubscripts()
+    {
+      return ssa_subscripts_;
+    }
+
+    int GetDefCount()
+    {
+      return def_count_;
+    }
+
+    void EnableOpcodeCounting()
+    {
+      opcode_count_ = static_cast<int*>(NewMem(cu_, kNumPackedOpcodes * sizeof(int), true,
+                                        kAllocMisc));
+    }
+
+    void ShowOpcodeStats();
+
+    DexCompilationUnit* GetCurrentDexCompilationUnit()
+    {
+      return m_units_[current_method_];
+    }
+
+    void DumpCFG(const char* dir_prefix, bool all_blocks);
+
+    void BuildRegLocations();
+
+    void DumpRegLocTable(RegLocation* table, int count);
+
+    int ComputeFrameSize();
+
+    void BasicBlockOptimization();
+
+    bool IsConst(int32_t s_reg)
+    {
+      return (IsBitSet(is_constant_v_, s_reg));
+    }
+
+    bool IsConst(RegLocation loc)
+    {
+      return (IsConst(loc.orig_sreg));
+    }
+
+    int32_t ConstantValue(RegLocation loc)
+    {
+      DCHECK(IsConst(loc));
+      return constant_values_[loc.orig_sreg];
+    }
+
+    int32_t ConstantValue(int32_t s_reg)
+    {
+      DCHECK(IsConst(s_reg));
+      return constant_values_[s_reg];
+    }
+
+    int64_t ConstantValueWide(RegLocation loc)
+    {
+      DCHECK(IsConst(loc));
+      return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
+          Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
+    }
+
+    bool IsConstantNullRef(RegLocation loc)
+    {
+      return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
+    }
+
+    int GetNumSSARegs()
+    {
+      return num_ssa_regs_;
+    }
+
+    void SetNumSSARegs(int new_num)
+    {
+      num_ssa_regs_ = new_num;
+    }
+
+    int GetNumReachableBlocks()
+    {
+      return num_reachable_blocks_;
+    }
+
+    int GetUseCount(int vreg)
+    {
+      return GrowableListGetElement(&use_counts_, vreg);
+    }
+
+    int GetRawUseCount(int vreg)
+    {
+      return GrowableListGetElement(&raw_use_counts_, vreg);
+    }
+
+    int GetSSASubscript(int ssa_reg)
+    {
+      return GrowableListGetElement(ssa_subscripts_, ssa_reg);
+    }
+
+    const char* GetSSAString(int ssa_reg)
+    {
+      return GET_ELEM_N(ssa_strings_, char*, ssa_reg);
+    }
+
+    void BasicBlockCombine();
+    void CodeLayout();
+    void DumpCheckStats();
+    void PropagateConstants();
+    MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
+    int SRegToVReg(int ssa_reg);
+    void VerifyDataflow();
+    void MethodUseCount();
+    void SSATransformation();
+    void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
+    void NullCheckElimination();
+
+    /*
+     * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
+     * we can verify that all catch entries have native PC entries.
+     */
+    std::set<uint32_t> catches_;
+
+  private:
+
+    int FindCommonParent(int block1, int block2);
+    void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
+                           const ArenaBitVector* src2);
+    void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
+                                ArenaBitVector* live_in_v, int dalvik_reg_id);
+    void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
+    void CompilerInitializeSSAConversion();
+    bool DoSSAConversion(BasicBlock* bb);
+    bool InvokeUsesMethodStar(MIR* mir);
+    int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
+    bool ContentIsInsn(const uint16_t* code_ptr);
+    BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
+                           BasicBlock** immed_pred_block_p);
+    BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
+                          BasicBlock** immed_pred_block_p);
+    void ProcessTryCatchBlocks();
+    BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                 int flags, const uint16_t* code_ptr, const uint16_t* code_end);
+    void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
+    BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+                                int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
+                                const uint16_t* code_end);
+    int AddNewSReg(int v_reg);
+    void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
+    void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
+    void DataFlowSSAFormat35C(MIR* mir);
+    void DataFlowSSAFormat3RC(MIR* mir);
+    bool FindLocalLiveIn(BasicBlock* bb);
+    bool ClearVisitedFlag(struct BasicBlock* bb);
+    bool CountUses(struct BasicBlock* bb);
+    bool InferTypeAndSize(BasicBlock* bb);
+    bool VerifyPredInfo(BasicBlock* bb);
+    BasicBlock* NeedsVisit(BasicBlock* bb);
+    BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
+    void MarkPreOrder(BasicBlock* bb);
+    void RecordDFSOrders(BasicBlock* bb);
+    void ComputeDFSOrders();
+    void ComputeDefBlockMatrix();
+    void ComputeDomPostOrderTraversal(BasicBlock* bb);
+    void ComputeDominators();
+    void InsertPhiNodes();
+    void DoDFSPreOrderSSARename(BasicBlock* block);
+    void SetConstant(int32_t ssa_reg, int value);
+    void SetConstantWide(int ssa_reg, int64_t value);
+    int GetSSAUseCount(int s_reg);
+    bool BasicBlockOpt(BasicBlock* bb);
+    bool EliminateNullChecks(BasicBlock* bb);
+    bool NullCheckEliminationInit(BasicBlock* bb);
+    bool BuildExtendedBBList(struct BasicBlock* bb);
+    bool FillDefBlockMatrix(BasicBlock* bb);
+    bool InitializeDominationInfo(BasicBlock* bb);
+    bool ComputeblockIDom(BasicBlock* bb);
+    bool ComputeBlockDominators(BasicBlock* bb);
+    bool SetDominators(BasicBlock* bb);
+    bool ComputeBlockLiveIns(BasicBlock* bb);
+    bool InsertPhiNodeOperands(BasicBlock* bb);
+    bool ComputeDominanceFrontier(BasicBlock* bb);
+    bool DoConstantPropogation(BasicBlock* bb);
+    bool CountChecks(BasicBlock* bb);
+    bool CombineBlocks(BasicBlock* bb);
+
+    CompilationUnit* cu_;
+    GrowableList* ssa_base_vregs_;
+    GrowableList* ssa_subscripts_;
+    GrowableList* ssa_strings_;
+    // Map original Dalvik virtual reg i to the current SSA name.
+    int* vreg_to_ssa_map_;            // length == method->registers_size
+    int* ssa_last_defs_;              // length == method->registers_size
+    ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
+    int* constant_values_;            // length == num_ssa_reg
+    // Use counts of ssa names.
+    GrowableList use_counts_;         // Weighted by nesting depth
+    GrowableList raw_use_counts_;     // Not weighted
+    int num_reachable_blocks_;
+    GrowableList dfs_order_;
+    GrowableList dfs_post_order_;
+    GrowableList dom_post_order_traversal_;
+    int* i_dom_list_;
+    ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
+    ArenaBitVector* temp_block_v_;
+    ArenaBitVector* temp_dalvik_register_v_;
+    ArenaBitVector* temp_ssa_register_v_;  // num_ssa_regs.
+    static const int kInvalidEntry = -1;
+    GrowableList block_list_;
+    ArenaBitVector* try_block_addr_;
+    BasicBlock* entry_block_;
+    BasicBlock* exit_block_;
+    BasicBlock* cur_block_;
+    int num_blocks_;
+    const DexFile::CodeItem* current_code_item_;
+    SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
+    std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
+    typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
+    std::vector<MIRLocation> method_stack_;        // Include stack
+    int current_method_;
+    int current_offset_;
+    int def_count_;                                // Used to estimate size of ssa name storage.
+    int* opcode_count_;                            // Dex opcode coverage stats.
+    int num_ssa_regs_;                             // Number of names following SSA transformation.
+    std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
+};
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_MIRGRAPH_H_
diff --git a/src/compiler/dex/mir_optimization.cc b/src/compiler/dex/mir_optimization.cc
new file mode 100644
index 0000000..bb3938b
--- /dev/null
+++ b/src/compiler/dex/mir_optimization.cc
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "local_value_numbering.h"
+#include "dataflow_iterator.h"
+
+namespace art {
+
+static unsigned int Predecessors(BasicBlock* bb)
+{
+  return bb->predecessors->num_used;
+}
+
+/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
+void MIRGraph::SetConstant(int32_t ssa_reg, int value)
+{
+  SetBit(cu_, is_constant_v_, ssa_reg);
+  constant_values_[ssa_reg] = value;
+}
+
+void MIRGraph::SetConstantWide(int ssa_reg, int64_t value)
+{
+  SetBit(cu_, is_constant_v_, ssa_reg);
+  constant_values_[ssa_reg] = Low32Bits(value);
+  constant_values_[ssa_reg + 1] = High32Bits(value);
+}
+
+bool MIRGraph::DoConstantPropogation(BasicBlock* bb)
+{
+  MIR* mir;
+  ArenaBitVector *is_constant_v = is_constant_v_;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+    DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+    if (!(df_attributes & DF_HAS_DEFS)) continue;
+
+    /* Handle instructions that set up constants directly */
+    if (df_attributes & DF_SETS_CONST) {
+      if (df_attributes & DF_DA) {
+        int32_t vB = static_cast<int32_t>(d_insn->vB);
+        switch (d_insn->opcode) {
+          case Instruction::CONST_4:
+          case Instruction::CONST_16:
+          case Instruction::CONST:
+            SetConstant(mir->ssa_rep->defs[0], vB);
+            break;
+          case Instruction::CONST_HIGH16:
+            SetConstant(mir->ssa_rep->defs[0], vB << 16);
+            break;
+          case Instruction::CONST_WIDE_16:
+          case Instruction::CONST_WIDE_32:
+            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
+            break;
+          case Instruction::CONST_WIDE:
+            SetConstantWide(mir->ssa_rep->defs[0],d_insn->vB_wide);
+            break;
+          case Instruction::CONST_WIDE_HIGH16:
+            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
+            break;
+          default:
+            break;
+        }
+      }
+      /* Handle instructions that set up constants directly */
+    } else if (df_attributes & DF_IS_MOVE) {
+      int i;
+
+      for (i = 0; i < mir->ssa_rep->num_uses; i++) {
+        if (!IsBitSet(is_constant_v, mir->ssa_rep->uses[i])) break;
+      }
+      /* Move a register holding a constant to another register */
+      if (i == mir->ssa_rep->num_uses) {
+        SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
+        if (df_attributes & DF_A_WIDE) {
+          SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
+        }
+      }
+    }
+  }
+  /* TODO: implement code to handle arithmetic operations */
+  return true;
+}
+
+void MIRGraph::PropagateConstants()
+{
+  is_constant_v_ = AllocBitVector(cu_, GetNumSSARegs(), false);
+  constant_values_ = static_cast<int*>(NewMem(cu_, sizeof(int) * GetNumSSARegs(), true,
+                                       kAllocDFInfo));
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    DoConstantPropogation(bb);
+  }
+
+}
+
+/* Advance to next strictly dominated MIR node in an extended basic block */
+static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir)
+{
+  BasicBlock* bb = *p_bb;
+  if (mir != NULL) {
+    mir = mir->next;
+    if (mir == NULL) {
+      bb = bb->fall_through;
+      if ((bb == NULL) || Predecessors(bb) != 1) {
+        mir = NULL;
+      } else {
+      *p_bb = bb;
+      mir = bb->first_mir_insn;
+      }
+    }
+  }
+  return mir;
+}
+
+/*
+ * To be used at an invoke mir.  If the logically next mir node represents
+ * a move-result, return it.  Else, return NULL.  If a move-result exists,
+ * it is required to immediately follow the invoke with no intervening
+ * opcodes or incoming arcs.  However, if the result of the invoke is not
+ * used, a move-result may not be present.
+ */
+MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir)
+{
+  BasicBlock* tbb = bb;
+  mir = AdvanceMIR(&tbb, mir);
+  while (mir != NULL) {
+    int opcode = mir->dalvikInsn.opcode;
+    if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
+        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
+        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
+      break;
+    }
+    // Keep going if pseudo op, otherwise terminate
+    if (opcode < kNumPackedOpcodes) {
+      mir = NULL;
+    } else {
+      mir = AdvanceMIR(&tbb, mir);
+    }
+  }
+  return mir;
+}
+
+static BasicBlock* NextDominatedBlock(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) {
+    return NULL;
+  }
+  DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+      || (bb->block_type == kExitBlock));
+  bb = bb->fall_through;
+  if (bb == NULL || (Predecessors(bb) != 1)) {
+    return NULL;
+  }
+  DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
+  return bb;
+}
+
+static MIR* FindPhi(BasicBlock* bb, int ssa_name)
+{
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
+      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+        if (mir->ssa_rep->uses[i] == ssa_name) {
+          return mir;
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+static SelectInstructionKind SelectKind(MIR* mir)
+{
+  switch (mir->dalvikInsn.opcode) {
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_OBJECT_FROM16:
+      return kSelectMove;
+   case Instruction::CONST:
+   case Instruction::CONST_4:
+   case Instruction::CONST_16:
+      return kSelectConst;
+   case Instruction::GOTO:
+   case Instruction::GOTO_16:
+   case Instruction::GOTO_32:
+      return kSelectGoto;
+   default:;
+  }
+  return kSelectNone;
+}
+
+int MIRGraph::GetSSAUseCount(int s_reg)
+{
+  DCHECK(s_reg < static_cast<int>(raw_use_counts_.num_used));
+  return raw_use_counts_.elem_list[s_reg];
+}
+
+
+/* Do some MIR-level extended basic block optimizations */
+bool MIRGraph::BasicBlockOpt(BasicBlock* bb)
+{
+  if (bb->block_type == kDead) {
+    return true;
+  }
+  int num_temps = 0;
+  LocalValueNumbering local_valnum(cu_);
+  while (bb != NULL) {
+    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+      // TUNING: use the returned value number for CSE.
+      local_valnum.GetValueNumber(mir);
+      // Look for interesting opcodes, skip otherwise
+      Instruction::Code opcode = mir->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::CMPL_FLOAT:
+        case Instruction::CMPL_DOUBLE:
+        case Instruction::CMPG_FLOAT:
+        case Instruction::CMPG_DOUBLE:
+        case Instruction::CMP_LONG:
+          if (cu_->gen_bitcode) {
+            // Bitcode doesn't allow this optimization.
+            break;
+          }
+          if (mir->next != NULL) {
+            MIR* mir_next = mir->next;
+            Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
+            ConditionCode ccode = kCondNv;
+            switch(br_opcode) {
+              case Instruction::IF_EQZ:
+                ccode = kCondEq;
+                break;
+              case Instruction::IF_NEZ:
+                ccode = kCondNe;
+                break;
+              case Instruction::IF_LTZ:
+                ccode = kCondLt;
+                break;
+              case Instruction::IF_GEZ:
+                ccode = kCondGe;
+                break;
+              case Instruction::IF_GTZ:
+                ccode = kCondGt;
+                break;
+              case Instruction::IF_LEZ:
+                ccode = kCondLe;
+                break;
+              default:
+                break;
+            }
+            // Make sure result of cmp is used by next insn and nowhere else
+            if ((ccode != kCondNv) &&
+                (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
+                (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
+              mir_next->dalvikInsn.arg[0] = ccode;
+              switch(opcode) {
+                case Instruction::CMPL_FLOAT:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
+                  break;
+                case Instruction::CMPL_DOUBLE:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
+                  break;
+                case Instruction::CMPG_FLOAT:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
+                  break;
+                case Instruction::CMPG_DOUBLE:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
+                  break;
+                case Instruction::CMP_LONG:
+                  mir_next->dalvikInsn.opcode =
+                      static_cast<Instruction::Code>(kMirOpFusedCmpLong);
+                  break;
+                default: LOG(ERROR) << "Unexpected opcode: " << opcode;
+              }
+              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
+              mir_next->ssa_rep->uses = mir->ssa_rep->uses;
+              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
+              mir_next->ssa_rep->num_defs = 0;
+              mir->ssa_rep->num_uses = 0;
+              mir->ssa_rep->num_defs = 0;
+            }
+          }
+          break;
+        case Instruction::GOTO:
+        case Instruction::GOTO_16:
+        case Instruction::GOTO_32:
+        case Instruction::IF_EQ:
+        case Instruction::IF_NE:
+        case Instruction::IF_LT:
+        case Instruction::IF_GE:
+        case Instruction::IF_GT:
+        case Instruction::IF_LE:
+        case Instruction::IF_EQZ:
+        case Instruction::IF_NEZ:
+        case Instruction::IF_LTZ:
+        case Instruction::IF_GEZ:
+        case Instruction::IF_GTZ:
+        case Instruction::IF_LEZ:
+          if (bb->taken->dominates_return) {
+            mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
+            if (cu_->verbose) {
+              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
+            }
+          }
+          break;
+        default:
+          break;
+      }
+      // Is this the select pattern?
+      // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
+      // TUNING: expand to support IF_xx compare & branches
+      if (!cu_->gen_bitcode && (cu_->instruction_set == kThumb2) &&
+          ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
+          (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
+        BasicBlock* ft = bb->fall_through;
+        DCHECK(ft != NULL);
+        BasicBlock* ft_ft = ft->fall_through;
+        BasicBlock* ft_tk = ft->taken;
+
+        BasicBlock* tk = bb->taken;
+        DCHECK(tk != NULL);
+        BasicBlock* tk_ft = tk->fall_through;
+        BasicBlock* tk_tk = tk->taken;
+
+        /*
+         * In the select pattern, the taken edge goes to a block that unconditionally
+         * transfers to the rejoin block and the fall_though edge goes to a block that
+         * unconditionally falls through to the rejoin block.
+         */
+        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
+          /*
+           * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
+           * suspend check on the taken-taken branch back to the join point.
+           */
+          if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
+              tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
+          }
+          // Are the block bodies something we can handle?
+          if ((ft->first_mir_insn == ft->last_mir_insn) &&
+              (tk->first_mir_insn != tk->last_mir_insn) &&
+              (tk->first_mir_insn->next == tk->last_mir_insn) &&
+              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
+              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
+              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
+              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
+            // Almost there.  Are the instructions targeting the same vreg?
+            MIR* if_true = tk->first_mir_insn;
+            MIR* if_false = ft->first_mir_insn;
+            // It's possible that the target of the select isn't used - skip those (rare) cases.
+            MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
+            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+              /*
+               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
+               * Phi node in the merge block and delete it (while using the SSA name
+               * of the merge as the target of the SELECT.  Delete both taken and
+               * fallthrough blocks, and set fallthrough to merge block.
+               * NOTE: not updating other dataflow info (no longer used at this point).
+               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
+               */
+              if (opcode == Instruction::IF_NEZ) {
+                // Normalize.
+                MIR* tmp_mir = if_true;
+                if_true = if_false;
+                if_false = tmp_mir;
+              }
+              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
+              bool const_form = (SelectKind(if_true) == kSelectConst);
+              if ((SelectKind(if_true) == kSelectMove)) {
+                if (IsConst(if_true->ssa_rep->uses[0]) &&
+                    IsConst(if_false->ssa_rep->uses[0])) {
+                    const_form = true;
+                    if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
+                    if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
+                }
+              }
+              if (const_form) {
+                // "true" set val in vB
+                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
+                // "false" set val in vC
+                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
+              } else {
+                DCHECK_EQ(SelectKind(if_true), kSelectMove);
+                DCHECK_EQ(SelectKind(if_false), kSelectMove);
+                int* src_ssa = static_cast<int*>(NewMem(cu_, sizeof(int) * 3, false,
+                                                 kAllocDFInfo));
+                src_ssa[0] = mir->ssa_rep->uses[0];
+                src_ssa[1] = if_true->ssa_rep->uses[0];
+                src_ssa[2] = if_false->ssa_rep->uses[0];
+                mir->ssa_rep->uses = src_ssa;
+                mir->ssa_rep->num_uses = 3;
+              }
+              mir->ssa_rep->num_defs = 1;
+              mir->ssa_rep->defs = static_cast<int*>(NewMem(cu_, sizeof(int) * 1, false,
+                                                     kAllocDFInfo));
+              mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu_, sizeof(bool) * 1, false,
+                                                     kAllocDFInfo));
+              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+              /*
+               * There is usually a Phi node in the join block for our two cases.  If the
+               * Phi node only contains our two cases as input, we will use the result
+               * SSA name of the Phi node as our select result and delete the Phi.  If
+               * the Phi node has more than two operands, we will arbitrarily use the SSA
+               * name of the "true" path, delete the SSA name of the "false" path from the
+               * Phi node (and fix up the incoming arc list).
+               */
+              if (phi->ssa_rep->num_uses == 2) {
+                mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
+                phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              } else {
+                int dead_def = if_false->ssa_rep->defs[0];
+                int live_def = if_true->ssa_rep->defs[0];
+                mir->ssa_rep->defs[0] = live_def;
+                int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                  if (phi->ssa_rep->uses[i] == live_def) {
+                    incoming[i] = bb->id;
+                  }
+                }
+                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                  if (phi->ssa_rep->uses[i] == dead_def) {
+                    int last_slot = phi->ssa_rep->num_uses - 1;
+                    phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
+                    incoming[i] = incoming[last_slot];
+                  }
+                }
+              }
+              phi->ssa_rep->num_uses--;
+              bb->taken = NULL;
+              tk->block_type = kDead;
+              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
+                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              }
+            }
+          }
+        }
+      }
+    }
+    bb = NextDominatedBlock(bb);
+  }
+
+  if (num_temps > cu_->num_compiler_temps) {
+    cu_->num_compiler_temps = num_temps;
+  }
+  return true;
+}
+
+bool MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info == NULL) return false;
+  bb->data_flow_info->ending_null_check_v =
+      AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapNullCheck);
+  ClearAllBits(bb->data_flow_info->ending_null_check_v);
+  return true;
+}
+
+/* Collect stats on number of checks removed */
+bool MIRGraph::CountChecks(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info == NULL) return false;
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (mir->ssa_rep == NULL) {
+      continue;
+    }
+    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    if (df_attributes & DF_HAS_NULL_CHKS) {
+      //TODO: move checkstats to mir_graph
+      cu_->checkstats->null_checks++;
+      if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
+        cu_->checkstats->null_checks_eliminated++;
+      }
+    }
+    if (df_attributes & DF_HAS_RANGE_CHKS) {
+      cu_->checkstats->range_checks++;
+      if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
+        cu_->checkstats->range_checks_eliminated++;
+      }
+    }
+  }
+  return false;
+}
+
+/* Try to make common case the fallthrough path */
+static bool LayoutBlocks(struct BasicBlock* bb)
+{
+  // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
+  if (!bb->explicit_throw) {
+    return false;
+  }
+  BasicBlock* walker = bb;
+  while (true) {
+    // Check termination conditions
+    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
+      break;
+    }
+    BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
+    if (prev->conditional_branch) {
+      if (prev->fall_through == walker) {
+        // Already done - return
+        break;
+      }
+      DCHECK_EQ(walker, prev->taken);
+      // Got one.  Flip it and exit
+      Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
+      switch (opcode) {
+        case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
+        case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
+        case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
+        case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
+        case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
+        case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
+        case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
+        case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
+        case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
+        case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
+        case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
+        case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
+        default: LOG(FATAL) << "Unexpected opcode " << opcode;
+      }
+      prev->last_mir_insn->dalvikInsn.opcode = opcode;
+      BasicBlock* t_bb = prev->taken;
+      prev->taken = prev->fall_through;
+      prev->fall_through = t_bb;
+      break;
+    }
+    walker = prev;
+  }
+  return false;
+}
+
+/* Combine any basic blocks terminated by instructions that we now know can't throw */
+bool MIRGraph::CombineBlocks(struct BasicBlock* bb)
+{
+  // Loop here to allow combining a sequence of blocks
+  while (true) {
+    // Check termination conditions
+    if ((bb->first_mir_insn == NULL)
+        || (bb->data_flow_info == NULL)
+        || (bb->block_type == kExceptionHandling)
+        || (bb->block_type == kExitBlock)
+        || (bb->block_type == kDead)
+        || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
+        || (bb->successor_block_list.block_list_type != kNotUsed)
+        || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
+      break;
+    }
+
+    // Test the kMirOpCheck instruction
+    MIR* mir = bb->last_mir_insn;
+    // Grab the attributes from the paired opcode
+    MIR* throw_insn = mir->meta.throw_insn;
+    int df_attributes = oat_data_flow_attributes[throw_insn->dalvikInsn.opcode];
+    bool can_combine = true;
+    if (df_attributes & DF_HAS_NULL_CHKS) {
+      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
+    }
+    if (df_attributes & DF_HAS_RANGE_CHKS) {
+      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+    }
+    if (!can_combine) {
+      break;
+    }
+    // OK - got one.  Combine
+    BasicBlock* bb_next = bb->fall_through;
+    DCHECK(!bb_next->catch_entry);
+    DCHECK_EQ(Predecessors(bb_next), 1U);
+    MIR* t_mir = bb->last_mir_insn->prev;
+    // Overwrite the kOpCheck insn with the paired opcode
+    DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
+    *bb->last_mir_insn = *throw_insn;
+    bb->last_mir_insn->prev = t_mir;
+    // Use the successor info from the next block
+    bb->successor_block_list = bb_next->successor_block_list;
+    // Use the ending block linkage from the next block
+    bb->fall_through = bb_next->fall_through;
+    bb->taken->block_type = kDead;  // Kill the unused exception block
+    bb->taken = bb_next->taken;
+    // Include the rest of the instructions
+    bb->last_mir_insn = bb_next->last_mir_insn;
+    /*
+     * If lower-half of pair of blocks to combine contained a return, move the flag
+     * to the newly combined block.
+     */
+    bb->terminated_by_return = bb_next->terminated_by_return;
+
+    /*
+     * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
+     * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
+     */
+
+    // Kill bb_next and remap now-dead id to parent
+    bb_next->block_type = kDead;
+    cu_->block_id_map.Overwrite(bb_next->id, bb->id);
+
+    // Now, loop back and see if we can keep going
+  }
+  return false;
+}
+
+/* Eliminate unnecessary null checks for a basic block. */
+bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb)
+{
+  if (bb->data_flow_info == NULL) return false;
+
+  /*
+   * Set initial state.  Be conservative with catch
+   * blocks and start with no assumptions about null check
+   * status (except for "this").
+   */
+  if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+    ClearAllBits(temp_ssa_register_v_);
+    if ((cu_->access_flags & kAccStatic) == 0) {
+      // If non-static method, mark "this" as non-null
+      int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
+      SetBit(cu_, temp_ssa_register_v_, this_reg);
+    }
+  } else {
+    // Starting state is intesection of all incoming arcs
+    GrowableListIterator iter;
+    GrowableListIteratorInit(bb->predecessors, &iter);
+    BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+    DCHECK(pred_bb != NULL);
+    CopyBitVector(temp_ssa_register_v_, pred_bb->data_flow_info->ending_null_check_v);
+    while (true) {
+      pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+      if (!pred_bb) break;
+      if ((pred_bb->data_flow_info == NULL) ||
+          (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
+        continue;
+      }
+      IntersectBitVectors(temp_ssa_register_v_, temp_ssa_register_v_,
+                          pred_bb->data_flow_info->ending_null_check_v);
+    }
+  }
+
+  // Walk through the instruction in the block, updating as necessary
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (mir->ssa_rep == NULL) {
+        continue;
+    }
+    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+    // Mark target of NEW* as non-null
+    if (df_attributes & DF_NON_NULL_DST) {
+      SetBit(cu_, temp_ssa_register_v_, mir->ssa_rep->defs[0]);
+    }
+
+    // Mark non-null returns from invoke-style NEW*
+    if (df_attributes & DF_NON_NULL_RET) {
+      MIR* next_mir = mir->next;
+      // Next should be an MOVE_RESULT_OBJECT
+      if (next_mir &&
+          next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+        // Mark as null checked
+        SetBit(cu_, temp_ssa_register_v_, next_mir->ssa_rep->defs[0]);
+      } else {
+        if (next_mir) {
+          LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
+        } else if (bb->fall_through) {
+          // Look in next basic block
+          struct BasicBlock* next_bb = bb->fall_through;
+          for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
+            tmir =tmir->next) {
+            if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
+              continue;
+            }
+            // First non-pseudo should be MOVE_RESULT_OBJECT
+            if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+              // Mark as null checked
+              SetBit(cu_, temp_ssa_register_v_, tmir->ssa_rep->defs[0]);
+            } else {
+              LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
+            }
+            break;
+          }
+        }
+      }
+    }
+
+    /*
+     * Propagate nullcheck state on register copies (including
+     * Phi pseudo copies.  For the latter, nullcheck state is
+     * the "and" of all the Phi's operands.
+     */
+    if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
+      int tgt_sreg = mir->ssa_rep->defs[0];
+      int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
+          mir->ssa_rep->num_uses;
+      bool null_checked = true;
+      for (int i = 0; i < operands; i++) {
+        null_checked &= IsBitSet(temp_ssa_register_v_,
+        mir->ssa_rep->uses[i]);
+      }
+      if (null_checked) {
+        SetBit(cu_, temp_ssa_register_v_, tgt_sreg);
+      }
+    }
+
+    // Already nullchecked?
+    if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+      int src_idx;
+      if (df_attributes & DF_NULL_CHK_1) {
+        src_idx = 1;
+      } else if (df_attributes & DF_NULL_CHK_2) {
+        src_idx = 2;
+      } else {
+        src_idx = 0;
+      }
+      int src_sreg = mir->ssa_rep->uses[src_idx];
+        if (IsBitSet(temp_ssa_register_v_, src_sreg)) {
+          // Eliminate the null check
+          mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+        } else {
+          // Mark s_reg as null-checked
+          SetBit(cu_, temp_ssa_register_v_, src_sreg);
+        }
+     }
+  }
+
+  // Did anything change?
+  bool res = CompareBitVectors(bb->data_flow_info->ending_null_check_v,
+                                  temp_ssa_register_v_);
+  if (res) {
+    CopyBitVector(bb->data_flow_info->ending_null_check_v,
+                     temp_ssa_register_v_);
+  }
+  return res;
+}
+
+void MIRGraph::NullCheckElimination()
+{
+  if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
+    DCHECK(temp_ssa_register_v_ != NULL);
+    DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+      NullCheckEliminationInit(bb);
+    }
+    DataflowIterator iter2(this, kPreOrderDFSTraversal, true /* iterative */);
+    bool change = false;
+    for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
+      change = EliminateNullChecks(bb);
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/4_post_nce_cfg/", false);
+  }
+}
+
+void MIRGraph::BasicBlockCombine()
+{
+  DataflowIterator iter(this, kPreOrderDFSTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CombineBlocks(bb);
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/5_post_bbcombine_cfg/", false);
+  }
+}
+
+void MIRGraph::CodeLayout()
+{
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    LayoutBlocks(bb);
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/2_post_layout_cfg/", true);
+  }
+}
+
+void MIRGraph::DumpCheckStats()
+{
+  Checkstats* stats =
+      static_cast<Checkstats*>(NewMem(cu_, sizeof(Checkstats), true, kAllocDFInfo));
+  cu_->checkstats = stats;
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CountChecks(bb);
+  }
+  if (stats->null_checks > 0) {
+    float eliminated = static_cast<float>(stats->null_checks_eliminated);
+    float checks = static_cast<float>(stats->null_checks);
+    LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+              << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
+              << (eliminated/checks) * 100.0 << "%";
+    }
+  if (stats->range_checks > 0) {
+    float eliminated = static_cast<float>(stats->range_checks_eliminated);
+    float checks = static_cast<float>(stats->range_checks);
+    LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+              << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
+              << (eliminated/checks) * 100.0 << "%";
+  }
+}
+
+bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb)
+{
+  if (bb->visited) return false;
+  if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+      || (bb->block_type == kExitBlock))) {
+    // Ignore special blocks
+    bb->visited = true;
+    return false;
+  }
+  // Must be head of extended basic block.
+  BasicBlock* start_bb = bb;
+  extended_basic_blocks_.push_back(bb);
+  bool terminated_by_return = false;
+  // Visit blocks strictly dominated by this head.
+  while (bb != NULL) {
+    bb->visited = true;
+    terminated_by_return |= bb->terminated_by_return;
+    bb = NextDominatedBlock(bb);
+  }
+  if (terminated_by_return) {
+    // This extended basic block contains a return, so mark all members.
+    bb = start_bb;
+    while (bb != NULL) {
+      bb->dominates_return = true;
+      bb = NextDominatedBlock(bb);
+    }
+  }
+  return false; // Not iterative - return value will be ignored
+}
+
+
+void MIRGraph::BasicBlockOptimization()
+{
+  if (!(cu_->disable_opt & (1 << kBBOpt))) {
+    CompilerInitGrowableList(cu_, &cu_->compiler_temps, 6, kListMisc);
+    DCHECK_EQ(cu_->num_compiler_temps, 0);
+    // Mark all blocks as not visited
+    DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+      ClearVisitedFlag(bb);
+    }
+    DataflowIterator iter2(this, kPreOrderDFSTraversal, false /* not iterative */);
+    for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+      BuildExtendedBBList(bb);
+    }
+    // Perform extended basic block optimizations.
+    for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
+      BasicBlockOpt(extended_basic_blocks_[i]);
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/6_post_bbo_cfg/", false);
+  }
+}
+
+}  // namespace art
diff --git a/src/compiler/dex/portable/mir_to_gbc.cc b/src/compiler/dex/portable/mir_to_gbc.cc
index e6900df..3de593c 100644
--- a/src/compiler/dex/portable/mir_to_gbc.cc
+++ b/src/compiler/dex/portable/mir_to_gbc.cc
@@ -28,6 +28,7 @@
 #include <llvm/Support/InstIterator.h>
 
 #include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/dataflow_iterator.h"
 
 //TODO: move gbc_to_lir code into quick directory (if necessary).
 #include "compiler/dex/quick/codegen_util.h"
@@ -42,7 +43,6 @@
 static const char kCatchBlock = 'C';
 
 namespace art {
-static RegLocation GetLoc(CompilationUnit* cu, ::llvm::Value* val);
 
 static ::llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cu, int id)
 {
@@ -59,7 +59,7 @@
   // Set vreg for debugging
   art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg;
   ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   ::llvm::Value* table_slot = cu->irb->getInt32(v_reg);
   ::llvm::Value* args[] = { table_slot, val };
   cu->irb->CreateCall(func, args);
@@ -110,89 +110,6 @@
   return res;
 }
 
-/* Create an in-memory RegLocation from an llvm Value. */
-static void CreateLocFromValue(CompilationUnit* cu, ::llvm::Value* val)
-{
-  // NOTE: llvm takes shortcuts with c_str() - get to std::string firstt
-  std::string s(val->getName().str());
-  const char* val_name = s.c_str();
-  SafeMap< ::llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
-  DCHECK(it == cu->loc_map.end()) << " - already defined: " << val_name;
-  int base_sreg = INVALID_SREG;
-  int subscript = -1;
-  sscanf(val_name, "v%d_%d", &base_sreg, &subscript);
-  if ((base_sreg == INVALID_SREG) && (!strcmp(val_name, "method"))) {
-    base_sreg = SSA_METHOD_BASEREG;
-    subscript = 0;
-  }
-  DCHECK_NE(base_sreg, INVALID_SREG);
-  DCHECK_NE(subscript, -1);
-  // TODO: redo during C++'ification
-  RegLocation loc =  {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, INVALID_REG,
-                      INVALID_REG, INVALID_SREG, INVALID_SREG};
-  ::llvm::Type* ty = val->getType();
-  loc.wide = ((ty == cu->irb->getInt64Ty()) ||
-              (ty == cu->irb->getDoubleTy()));
-  loc.defined = true;
-  loc.home = false;  // May change during promotion
-  loc.s_reg_low = base_sreg;
-  loc.orig_sreg = cu->loc_map.size();
-  PromotionMap p_map = cu->promotion_map[base_sreg];
-  if (ty == cu->irb->getFloatTy()) {
-    loc.fp = true;
-    if (p_map.fp_location == kLocPhysReg) {
-      loc.low_reg = p_map.FpReg;
-      loc.location = kLocPhysReg;
-      loc.home = true;
-    }
-  } else if (ty == cu->irb->getDoubleTy()) {
-    loc.fp = true;
-    PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
-    if ((p_map.fp_location == kLocPhysReg) &&
-        (p_map_high.fp_location == kLocPhysReg) &&
-        ((p_map.FpReg & 0x1) == 0) &&
-        (p_map.FpReg + 1 == p_map_high.FpReg)) {
-      loc.low_reg = p_map.FpReg;
-      loc.high_reg = p_map_high.FpReg;
-      loc.location = kLocPhysReg;
-      loc.home = true;
-    }
-  } else if (ty == cu->irb->getJObjectTy()) {
-    loc.ref = true;
-    if (p_map.core_location == kLocPhysReg) {
-      loc.low_reg = p_map.core_reg;
-      loc.location = kLocPhysReg;
-      loc.home = true;
-    }
-  } else if (ty == cu->irb->getInt64Ty()) {
-    loc.core = true;
-    PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
-    if ((p_map.core_location == kLocPhysReg) &&
-        (p_map_high.core_location == kLocPhysReg)) {
-      loc.low_reg = p_map.core_reg;
-      loc.high_reg = p_map_high.core_reg;
-      loc.location = kLocPhysReg;
-      loc.home = true;
-    }
-  } else {
-    loc.core = true;
-    if (p_map.core_location == kLocPhysReg) {
-      loc.low_reg = p_map.core_reg;
-      loc.location = kLocPhysReg;
-      loc.home = true;
-    }
-  }
-
-  if (cu->verbose && loc.home) {
-    if (loc.wide) {
-      LOG(INFO) << "Promoted wide " << s << " to regs " << loc.low_reg << "/" << loc.high_reg;
-    } else {
-      LOG(INFO) << "Promoted " << s << " to reg " << loc.low_reg;
-    }
-  }
-  cu->loc_map.Put(val, loc);
-}
-
 static void InitIR(CompilationUnit* cu)
 {
   LLVMInfo* llvm_info = cu->llvm_info;
@@ -211,13 +128,9 @@
   cu->irb = llvm_info->GetIRBuilder();
 }
 
-static const char* LlvmSSAName(CompilationUnit* cu, int ssa_reg) {
-  return GET_ELEM_N(cu->ssa_strings, char*, ssa_reg);
-}
-
 ::llvm::BasicBlock* FindCaseTarget(CompilationUnit* cu, uint32_t vaddr)
 {
-  BasicBlock* bb = FindBlock(cu, vaddr);
+  BasicBlock* bb = cu->mir_graph.get()->FindBlock(vaddr);
   DCHECK(bb != NULL);
   return GetLLVMBlock(cu, bb->id);
 }
@@ -829,7 +742,7 @@
  * when necessary.
  */
 static bool ConvertMIRNode(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
-                           ::llvm::BasicBlock* llvm_bb, LIR* label_list)
+                           ::llvm::BasicBlock* llvm_bb)
 {
   bool res = false;   // Assume success
   RegLocation rl_src[3];
@@ -1022,7 +935,7 @@
     case Instruction::RETURN_WIDE:
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT: {
-        if (!(cu->attrs & METHOD_IS_LEAF)) {
+        if (!(cu->attributes & METHOD_IS_LEAF)) {
           EmitSuspendCheck(cu);
         }
         EmitPopShadowFrame(cu);
@@ -1038,7 +951,7 @@
                                                             cu->class_def_idx)) {
           EmitConstructorBarrier(cu);
         }
-        if (!(cu->attrs & METHOD_IS_LEAF)) {
+        if (!(cu->attributes & METHOD_IS_LEAF)) {
           EmitSuspendCheck(cu);
         }
         EmitPopShadowFrame(cu);
@@ -1671,7 +1584,7 @@
   reg_info.push_back(cu->irb->getInt32(cu->num_regs));
   reg_info.push_back(cu->irb->getInt32(cu->num_outs));
   reg_info.push_back(cu->irb->getInt32(cu->num_compiler_temps));
-  reg_info.push_back(cu->irb->getInt32(cu->num_ssa_regs));
+  reg_info.push_back(cu->irb->getInt32(cu->mir_graph->GetNumSSARegs()));
   ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*cu->context, reg_info);
   inst->setMetadata("RegInfo", reg_info_node);
   int promo_size = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
@@ -1918,8 +1831,7 @@
       continue;
     }
 
-    bool not_handled = ConvertMIRNode(cu, mir, bb, llvm_bb,
-                                     NULL /* label_list */);
+    bool not_handled = ConvertMIRNode(cu, mir, bb, llvm_bb);
     if (not_handled) {
       Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
       LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
@@ -2044,14 +1956,17 @@
 void MethodMIR2Bitcode(CompilationUnit* cu)
 {
   InitIR(cu);
-  CompilerInitGrowableList(cu, &cu->llvm_values, cu->num_ssa_regs);
+  CompilerInitGrowableList(cu, &cu->llvm_values, cu->mir_graph->GetNumSSARegs());
 
   // Create the function
   CreateFunction(cu);
 
   // Create an LLVM basic block for each MIR block in dfs preorder
-  DataFlowAnalysisDispatcher(cu, CreateLLVMBasicBlock,
-                                kPreOrderDFSTraversal, false /* is_iterative */);
+  DataflowIterator iter(cu->mir_graph.get(), kPreOrderDFSTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    CreateLLVMBasicBlock(cu, bb);
+  }
+
   /*
    * Create an llvm named value for each MIR SSA name.  Note: we'll use
    * placeholders for all non-argument values (because we haven't seen
@@ -2060,17 +1975,17 @@
   cu->irb->SetInsertPoint(cu->placeholder_bb);
   ::llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
   arg_iter++;  /* Skip path method */
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
+  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
     ::llvm::Value* val;
     RegLocation rl_temp = cu->reg_location[i];
-    if ((SRegToVReg(cu, i) < 0) || rl_temp.high_word) {
+    if ((cu->mir_graph->SRegToVReg(i) < 0) || rl_temp.high_word) {
       InsertGrowableList(cu, &cu->llvm_values, 0);
     } else if ((i < cu->num_regs) ||
                (i >= (cu->num_regs + cu->num_ins))) {
       ::llvm::Constant* imm_value = cu->reg_location[i].wide ?
          cu->irb->getJLong(0) : cu->irb->getJInt(0);
       val = EmitConst(cu, imm_value, cu->reg_location[i]);
-      val->setName(LlvmSSAName(cu, i));
+      val->setName(cu->mir_graph->GetSSAString(i));
       InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(val));
     } else {
       // Recover previously-created argument values
@@ -2079,8 +1994,10 @@
     }
   }
 
-  DataFlowAnalysisDispatcher(cu, BlockBitcodeConversion,
-                                kPreOrderDFSTraversal, false /* Iterative */);
+  DataflowIterator iter2(cu->mir_graph.get(), kPreOrderDFSTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    BlockBitcodeConversion(cu, bb);
+  }
 
   /*
    * In a few rare cases of verification failure, the verifier will
@@ -2120,7 +2037,7 @@
      if (::llvm::verifyFunction(*cu->func, ::llvm::PrintMessageAction)) {
        LOG(INFO) << "Bitcode verification FAILED for "
                  << PrettyMethod(cu->method_idx, *cu->dex_file)
-                 << " of size " << cu->insns_size;
+                 << " of size " << cu->code_item->insns_size_in_code_units_;
        cu->enable_debug |= (1 << kDebugDumpBitcodeFile);
      }
   }
@@ -2151,1417 +2068,4 @@
   }
 }
 
-static RegLocation GetLoc(CompilationUnit* cu, ::llvm::Value* val) {
-  RegLocation res;
-  DCHECK(val != NULL);
-  SafeMap< ::llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
-  if (it == cu->loc_map.end()) {
-    std::string val_name = val->getName().str();
-    if (val_name.empty()) {
-      // FIXME: need to be more robust, handle FP and be in a position to
-      // manage unnamed temps whose lifetimes span basic block boundaries
-      UNIMPLEMENTED(WARNING) << "Need to handle unnamed llvm temps";
-      memset(&res, 0, sizeof(res));
-      res.location = kLocPhysReg;
-      res.low_reg = AllocTemp(cu);
-      res.home = true;
-      res.s_reg_low = INVALID_SREG;
-      res.orig_sreg = INVALID_SREG;
-      ::llvm::Type* ty = val->getType();
-      res.wide = ((ty == cu->irb->getInt64Ty()) ||
-                  (ty == cu->irb->getDoubleTy()));
-      if (res.wide) {
-        res.high_reg = AllocTemp(cu);
-      }
-      cu->loc_map.Put(val, res);
-    } else {
-      DCHECK_EQ(val_name[0], 'v');
-      int base_sreg = INVALID_SREG;
-      sscanf(val_name.c_str(), "v%d_", &base_sreg);
-      res = cu->reg_location[base_sreg];
-      cu->loc_map.Put(val, res);
-    }
-  } else {
-    res = it->second;
-  }
-  return res;
-}
-
-static Instruction::Code GetDalvikOpcode(OpKind op, bool is_const, bool is_wide)
-{
-  Instruction::Code res = Instruction::NOP;
-  if (is_wide) {
-    switch(op) {
-      case kOpAdd: res = Instruction::ADD_LONG; break;
-      case kOpSub: res = Instruction::SUB_LONG; break;
-      case kOpMul: res = Instruction::MUL_LONG; break;
-      case kOpDiv: res = Instruction::DIV_LONG; break;
-      case kOpRem: res = Instruction::REM_LONG; break;
-      case kOpAnd: res = Instruction::AND_LONG; break;
-      case kOpOr: res = Instruction::OR_LONG; break;
-      case kOpXor: res = Instruction::XOR_LONG; break;
-      case kOpLsl: res = Instruction::SHL_LONG; break;
-      case kOpLsr: res = Instruction::USHR_LONG; break;
-      case kOpAsr: res = Instruction::SHR_LONG; break;
-      default: LOG(FATAL) << "Unexpected OpKind " << op;
-    }
-  } else if (is_const){
-    switch(op) {
-      case kOpAdd: res = Instruction::ADD_INT_LIT16; break;
-      case kOpSub: res = Instruction::RSUB_INT_LIT8; break;
-      case kOpMul: res = Instruction::MUL_INT_LIT16; break;
-      case kOpDiv: res = Instruction::DIV_INT_LIT16; break;
-      case kOpRem: res = Instruction::REM_INT_LIT16; break;
-      case kOpAnd: res = Instruction::AND_INT_LIT16; break;
-      case kOpOr: res = Instruction::OR_INT_LIT16; break;
-      case kOpXor: res = Instruction::XOR_INT_LIT16; break;
-      case kOpLsl: res = Instruction::SHL_INT_LIT8; break;
-      case kOpLsr: res = Instruction::USHR_INT_LIT8; break;
-      case kOpAsr: res = Instruction::SHR_INT_LIT8; break;
-      default: LOG(FATAL) << "Unexpected OpKind " << op;
-    }
-  } else {
-    switch(op) {
-      case kOpAdd: res = Instruction::ADD_INT; break;
-      case kOpSub: res = Instruction::SUB_INT; break;
-      case kOpMul: res = Instruction::MUL_INT; break;
-      case kOpDiv: res = Instruction::DIV_INT; break;
-      case kOpRem: res = Instruction::REM_INT; break;
-      case kOpAnd: res = Instruction::AND_INT; break;
-      case kOpOr: res = Instruction::OR_INT; break;
-      case kOpXor: res = Instruction::XOR_INT; break;
-      case kOpLsl: res = Instruction::SHL_INT; break;
-      case kOpLsr: res = Instruction::USHR_INT; break;
-      case kOpAsr: res = Instruction::SHR_INT; break;
-      default: LOG(FATAL) << "Unexpected OpKind " << op;
-    }
-  }
-  return res;
-}
-
-static Instruction::Code GetDalvikFPOpcode(OpKind op, bool is_const, bool is_wide)
-{
-  Instruction::Code res = Instruction::NOP;
-  if (is_wide) {
-    switch(op) {
-      case kOpAdd: res = Instruction::ADD_DOUBLE; break;
-      case kOpSub: res = Instruction::SUB_DOUBLE; break;
-      case kOpMul: res = Instruction::MUL_DOUBLE; break;
-      case kOpDiv: res = Instruction::DIV_DOUBLE; break;
-      case kOpRem: res = Instruction::REM_DOUBLE; break;
-      default: LOG(FATAL) << "Unexpected OpKind " << op;
-    }
-  } else {
-    switch(op) {
-      case kOpAdd: res = Instruction::ADD_FLOAT; break;
-      case kOpSub: res = Instruction::SUB_FLOAT; break;
-      case kOpMul: res = Instruction::MUL_FLOAT; break;
-      case kOpDiv: res = Instruction::DIV_FLOAT; break;
-      case kOpRem: res = Instruction::REM_FLOAT; break;
-      default: LOG(FATAL) << "Unexpected OpKind " << op;
-    }
-  }
-  return res;
-}
-
-static void CvtBinFPOp(CompilationUnit* cu, OpKind op, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  /*
-   * Normally, we won't ever generate an FP operation with an immediate
-   * operand (not supported in Dex instruction set).  However, the IR builder
-   * may insert them - in particular for create_neg_fp.  Recognize this case
-   * and deal with it.
-   */
-  ::llvm::ConstantFP* op1C = ::llvm::dyn_cast< ::llvm::ConstantFP>(inst->getOperand(0));
-  ::llvm::ConstantFP* op2C = ::llvm::dyn_cast< ::llvm::ConstantFP>(inst->getOperand(1));
-  DCHECK(op2C == NULL);
-  if ((op1C != NULL) && (op == kOpSub)) {
-    RegLocation rl_src = GetLoc(cu, inst->getOperand(1));
-    if (rl_dest.wide) {
-      cg->GenArithOpDouble(cu, Instruction::NEG_DOUBLE, rl_dest, rl_src, rl_src);
-    } else {
-      cg->GenArithOpFloat(cu, Instruction::NEG_FLOAT, rl_dest, rl_src, rl_src);
-    }
-  } else {
-    DCHECK(op1C == NULL);
-    RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
-    RegLocation rl_src2 = GetLoc(cu, inst->getOperand(1));
-    Instruction::Code dalvik_op = GetDalvikFPOpcode(op, false, rl_dest.wide);
-    if (rl_dest.wide) {
-      cg->GenArithOpDouble(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
-    } else {
-      cg->GenArithOpFloat(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
-    }
-  }
-}
-
-static void CvtIntNarrowing(CompilationUnit* cu, ::llvm::Instruction* inst,
-                     Instruction::Code opcode)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src);
-}
-
-static void CvtIntToFP(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  Instruction::Code opcode;
-  if (rl_dest.wide) {
-    if (rl_src.wide) {
-      opcode = Instruction::LONG_TO_DOUBLE;
-    } else {
-      opcode = Instruction::INT_TO_DOUBLE;
-    }
-  } else {
-    if (rl_src.wide) {
-      opcode = Instruction::LONG_TO_FLOAT;
-    } else {
-      opcode = Instruction::INT_TO_FLOAT;
-    }
-  }
-  cg->GenConversion(cu, opcode, rl_dest, rl_src);
-}
-
-static void CvtFPToInt(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  RegLocation rl_src = GetLoc(cu, call_inst->getOperand(0));
-  Instruction::Code opcode;
-  if (rl_dest.wide) {
-    if (rl_src.wide) {
-      opcode = Instruction::DOUBLE_TO_LONG;
-    } else {
-      opcode = Instruction::FLOAT_TO_LONG;
-    }
-  } else {
-    if (rl_src.wide) {
-      opcode = Instruction::DOUBLE_TO_INT;
-    } else {
-      opcode = Instruction::FLOAT_TO_INT;
-    }
-  }
-  cg->GenConversion(cu, opcode, rl_dest, rl_src);
-}
-
-static void CvtFloatToDouble(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  cg->GenConversion(cu, Instruction::FLOAT_TO_DOUBLE, rl_dest, rl_src);
-}
-
-static void CvtTrunc(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  rl_src = UpdateLocWide(cu, rl_src);
-  rl_src = WideToNarrow(cu, rl_src);
-  cg->StoreValue(cu, rl_dest, rl_src);
-}
-
-static void CvtDoubleToFloat(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  cg->GenConversion(cu, Instruction::DOUBLE_TO_FLOAT, rl_dest, rl_src);
-}
-
-
-static void CvtIntExt(CompilationUnit* cu, ::llvm::Instruction* inst, bool is_signed)
-{
-  Codegen* cg = cu->cg.get();
-  // TODO: evaluate src/tgt types and add general support for more than int to long
-  RegLocation rl_dest = GetLoc(cu, inst);
-  RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  DCHECK(rl_dest.wide);
-  DCHECK(!rl_src.wide);
-  DCHECK(!rl_dest.fp);
-  DCHECK(!rl_src.fp);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  if (rl_src.location == kLocPhysReg) {
-    cg->OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
-  } else {
-    cg->LoadValueDirect(cu, rl_src, rl_result.low_reg);
-  }
-  if (is_signed) {
-    cg->OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
-  } else {
-    cg->LoadConstant(cu, rl_result.high_reg, 0);
-  }
-  cg->StoreValueWide(cu, rl_dest, rl_result);
-}
-
-static void CvtBinOp(CompilationUnit* cu, OpKind op, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, inst);
-  ::llvm::Value* lhs = inst->getOperand(0);
-  // Special-case RSUB/NEG
-  ::llvm::ConstantInt* lhs_imm = ::llvm::dyn_cast< ::llvm::ConstantInt>(lhs);
-  if ((op == kOpSub) && (lhs_imm != NULL)) {
-    RegLocation rl_src1 = GetLoc(cu, inst->getOperand(1));
-    if (rl_src1.wide) {
-      DCHECK_EQ(lhs_imm->getSExtValue(), 0);
-      cg->GenArithOpLong(cu, Instruction::NEG_LONG, rl_dest, rl_src1, rl_src1);
-    } else {
-      cg->GenArithOpIntLit(cu, Instruction::RSUB_INT, rl_dest, rl_src1,
-                       lhs_imm->getSExtValue());
-    }
-    return;
-  }
-  DCHECK(lhs_imm == NULL);
-  RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
-  ::llvm::Value* rhs = inst->getOperand(1);
-  ::llvm::ConstantInt* const_rhs = ::llvm::dyn_cast< ::llvm::ConstantInt>(rhs);
-  if (!rl_dest.wide && (const_rhs != NULL)) {
-    Instruction::Code dalvik_op = GetDalvikOpcode(op, true, false);
-    cg->GenArithOpIntLit(cu, dalvik_op, rl_dest, rl_src1, const_rhs->getSExtValue());
-  } else {
-    Instruction::Code dalvik_op = GetDalvikOpcode(op, false, rl_dest.wide);
-    RegLocation rl_src2;
-    if (const_rhs != NULL) {
-      // ir_builder converts NOT_LONG to xor src, -1.  Restore
-      DCHECK_EQ(dalvik_op, Instruction::XOR_LONG);
-      DCHECK_EQ(-1L, const_rhs->getSExtValue());
-      dalvik_op = Instruction::NOT_LONG;
-      rl_src2 = rl_src1;
-    } else {
-      rl_src2 = GetLoc(cu, rhs);
-    }
-    if (rl_dest.wide) {
-      cg->GenArithOpLong(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
-    } else {
-      cg->GenArithOpInt(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
-    }
-  }
-}
-
-static void CvtShiftOp(CompilationUnit* cu, Instruction::Code opcode, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
-  ::llvm::Value* rhs = call_inst->getArgOperand(1);
-  if (::llvm::ConstantInt* src2 = ::llvm::dyn_cast< ::llvm::ConstantInt>(rhs)) {
-    DCHECK(!rl_dest.wide);
-    cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src, src2->getSExtValue());
-  } else {
-    RegLocation rl_shift = GetLoc(cu, rhs);
-    if (call_inst->getType() == cu->irb->getInt64Ty()) {
-      cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
-    } else {
-      cg->GenArithOpInt(cu, opcode, rl_dest, rl_src, rl_shift);
-    }
-  }
-}
-
-static void CvtBr(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  ::llvm::BranchInst* br_inst = ::llvm::dyn_cast< ::llvm::BranchInst>(inst);
-  DCHECK(br_inst != NULL);
-  DCHECK(br_inst->isUnconditional());  // May change - but this is all we use now
-  ::llvm::BasicBlock* target_bb = br_inst->getSuccessor(0);
-  cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
-}
-
-static void CvtPhi(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  // Nop - these have already been processed
-}
-
-static void CvtRet(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  ::llvm::ReturnInst* ret_inst = ::llvm::dyn_cast< ::llvm::ReturnInst>(inst);
-  ::llvm::Value* ret_val = ret_inst->getReturnValue();
-  if (ret_val != NULL) {
-    RegLocation rl_src = GetLoc(cu, ret_val);
-    if (rl_src.wide) {
-      cg->StoreValueWide(cu, GetReturnWide(cu, rl_src.fp), rl_src);
-    } else {
-      cg->StoreValue(cu, GetReturn(cu, rl_src.fp), rl_src);
-    }
-  }
-  cg->GenExitSequence(cu);
-}
-
-static ConditionCode GetCond(::llvm::ICmpInst::Predicate llvm_cond)
-{
-  ConditionCode res = kCondAl;
-  switch(llvm_cond) {
-    case ::llvm::ICmpInst::ICMP_EQ: res = kCondEq; break;
-    case ::llvm::ICmpInst::ICMP_NE: res = kCondNe; break;
-    case ::llvm::ICmpInst::ICMP_SLT: res = kCondLt; break;
-    case ::llvm::ICmpInst::ICMP_SGE: res = kCondGe; break;
-    case ::llvm::ICmpInst::ICMP_SGT: res = kCondGt; break;
-    case ::llvm::ICmpInst::ICMP_SLE: res = kCondLe; break;
-    default: LOG(FATAL) << "Unexpected llvm condition";
-  }
-  return res;
-}
-
-static void CvtICmp(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  // cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2)
-  UNIMPLEMENTED(FATAL);
-}
-
-static void CvtICmpBr(CompilationUnit* cu, ::llvm::Instruction* inst,
-               ::llvm::BranchInst* br_inst)
-{
-  Codegen* cg = cu->cg.get();
-  // Get targets
-  ::llvm::BasicBlock* taken_bb = br_inst->getSuccessor(0);
-  LIR* taken = cu->block_to_label_map.Get(taken_bb);
-  ::llvm::BasicBlock* fallthrough_bb = br_inst->getSuccessor(1);
-  LIR* fall_through = cu->block_to_label_map.Get(fallthrough_bb);
-  // Get comparison operands
-  ::llvm::ICmpInst* i_cmp_inst = ::llvm::dyn_cast< ::llvm::ICmpInst>(inst);
-  ConditionCode cond = GetCond(i_cmp_inst->getPredicate());
-  ::llvm::Value* lhs = i_cmp_inst->getOperand(0);
-  // Not expecting a constant as 1st operand
-  DCHECK(::llvm::dyn_cast< ::llvm::ConstantInt>(lhs) == NULL);
-  RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
-  rl_src1 = cg->LoadValue(cu, rl_src1, kCoreReg);
-  ::llvm::Value* rhs = inst->getOperand(1);
-  if (cu->instruction_set == kMips) {
-    // Compare and branch in one shot
-    UNIMPLEMENTED(FATAL);
-  }
-  //Compare, then branch
-  // TODO: handle fused CMP_LONG/IF_xxZ case
-  if (::llvm::ConstantInt* src2 = ::llvm::dyn_cast< ::llvm::ConstantInt>(rhs)) {
-    cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, src2->getSExtValue());
-  } else if (::llvm::dyn_cast< ::llvm::ConstantPointerNull>(rhs) != NULL) {
-    cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, 0);
-  } else {
-    RegLocation rl_src2 = GetLoc(cu, rhs);
-    rl_src2 = cg->LoadValue(cu, rl_src2, kCoreReg);
-    cg->OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
-  }
-  cg->OpCondBranch(cu, cond, taken);
-  // Fallthrough
-  cg->OpUnconditionalBranch(cu, fall_through);
-}
-
-static void CvtCopy(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  DCHECK_EQ(rl_src.wide, rl_dest.wide);
-  DCHECK_EQ(rl_src.fp, rl_dest.fp);
-  if (rl_src.wide) {
-    cg->StoreValueWide(cu, rl_dest, rl_src);
-  } else {
-    cg->StoreValue(cu, rl_dest, rl_src);
-  }
-}
-
-// Note: Immediate arg is a ConstantInt regardless of result type
-static void CvtConst(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  ::llvm::ConstantInt* src =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint64_t immval = src->getZExtValue();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-  if (rl_dest.wide) {
-    cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg, immval);
-    cg->StoreValueWide(cu, rl_dest, rl_result);
-  } else {
-    int immediate = immval & 0xffffffff;
-    cg->LoadConstantNoClobber(cu, rl_result.low_reg, immediate);
-    cg->StoreValue(cu, rl_dest, rl_result);
-    if (immediate == 0) {
-      cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
-    }
-  }
-}
-
-static void CvtConstObject(CompilationUnit* cu, ::llvm::CallInst* call_inst, bool is_string)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  ::llvm::ConstantInt* idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t index = idx_val->getZExtValue();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  if (is_string) {
-    cg->GenConstString(cu, index, rl_dest);
-  } else {
-    cg->GenConstClass(cu, index, rl_dest);
-  }
-}
-
-static void CvtFillArrayData(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* offset_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  cg->GenFillArrayData(cu, offset_val->getSExtValue(), rl_src);
-}
-
-static void CvtNewInstance(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  ::llvm::ConstantInt* type_idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t type_idx = type_idx_val->getZExtValue();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenNewInstance(cu, type_idx, rl_dest);
-}
-
-static void CvtNewArray(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* type_idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t type_idx = type_idx_val->getZExtValue();
-  ::llvm::Value* len = call_inst->getArgOperand(1);
-  RegLocation rl_len = GetLoc(cu, len);
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenNewArray(cu, type_idx, rl_dest, rl_len);
-}
-
-static void CvtInstanceOf(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* type_idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t type_idx = type_idx_val->getZExtValue();
-  ::llvm::Value* src = call_inst->getArgOperand(1);
-  RegLocation rl_src = GetLoc(cu, src);
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenInstanceof(cu, type_idx, rl_dest, rl_src);
-}
-
-static void CvtThrow(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  ::llvm::Value* src = call_inst->getArgOperand(0);
-  RegLocation rl_src = GetLoc(cu, src);
-  cg->GenThrow(cu, rl_src);
-}
-
-static void CvtMonitorEnterExit(CompilationUnit* cu, bool is_enter,
-                         ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  ::llvm::Value* src = call_inst->getArgOperand(1);
-  RegLocation rl_src = GetLoc(cu, src);
-  if (is_enter) {
-    cg->GenMonitorEnter(cu, opt_flags->getZExtValue(), rl_src);
-  } else {
-    cg->GenMonitorExit(cu, opt_flags->getZExtValue(), rl_src);
-  }
-}
-
-static void CvtArrayLength(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  ::llvm::Value* src = call_inst->getArgOperand(1);
-  RegLocation rl_src = GetLoc(cu, src);
-  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
-  cg->GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  cg->LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
-  cg->StoreValue(cu, rl_dest, rl_result);
-}
-
-static void CvtMoveException(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenMoveException(cu, rl_dest);
-}
-
-static void CvtSget(CompilationUnit* cu, ::llvm::CallInst* call_inst, bool is_wide, bool is_object)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
-  ::llvm::ConstantInt* type_idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t type_idx = type_idx_val->getZExtValue();
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenSget(cu, type_idx, rl_dest, is_wide, is_object);
-}
-
-static void CvtSput(CompilationUnit* cu, ::llvm::CallInst* call_inst, bool is_wide, bool is_object)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* type_idx_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  uint32_t type_idx = type_idx_val->getZExtValue();
-  ::llvm::Value* src = call_inst->getArgOperand(1);
-  RegLocation rl_src = GetLoc(cu, src);
-  cg->GenSput(cu, type_idx, rl_src, is_wide, is_object);
-}
-
-static void CvtAget(CompilationUnit* cu, ::llvm::CallInst* call_inst, OpSize size, int scale)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(1));
-  RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(2));
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenArrayGet(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
-              rl_dest, scale);
-}
-
-static void CvtAput(CompilationUnit* cu, ::llvm::CallInst* call_inst, OpSize size,
-                    int scale, bool is_object)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(2));
-  RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(3));
-  if (is_object) {
-    cg->GenArrayObjPut(cu, opt_flags->getZExtValue(), rl_array, rl_index,
-                   rl_src, scale);
-  } else {
-    cg->GenArrayPut(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
-                rl_src, scale);
-  }
-}
-
-static void CvtAputObj(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  CvtAput(cu, call_inst, kWord, 2, true /* is_object */);
-}
-
-static void CvtAputPrimitive(CompilationUnit* cu, ::llvm::CallInst* call_inst,
-                      OpSize size, int scale)
-{
-  CvtAput(cu, call_inst, size, scale, false /* is_object */);
-}
-
-static void CvtIget(CompilationUnit* cu, ::llvm::CallInst* call_inst, OpSize size,
-                    bool is_wide, bool is_obj)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(1));
-  ::llvm::ConstantInt* field_idx =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(2));
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenIGet(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
-          size, rl_dest, rl_obj, is_wide, is_obj);
-}
-
-static void CvtIput(CompilationUnit* cu, ::llvm::CallInst* call_inst, OpSize size,
-                    bool is_wide, bool is_obj)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
-  ::llvm::ConstantInt* opt_flags =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(2));
-  ::llvm::ConstantInt* field_idx =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(3));
-  cg->GenIPut(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
-          size, rl_src, rl_obj, is_wide, is_obj);
-}
-
-static void CvtCheckCast(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
-  ::llvm::ConstantInt* type_idx =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  cg->GenCheckCast(cu, type_idx->getZExtValue(), rl_src);
-}
-
-static void CvtFPCompare(CompilationUnit* cu, ::llvm::CallInst* call_inst,
-                         Instruction::Code opcode)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
-  RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenCmpFP(cu, opcode, rl_dest, rl_src1, rl_src2);
-}
-
-static void CvtLongCompare(CompilationUnit* cu, ::llvm::CallInst* call_inst)
-{
-  Codegen* cg = cu->cg.get();
-  RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
-  RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
-  RegLocation rl_dest = GetLoc(cu, call_inst);
-  cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2);
-}
-
-static void CvtSwitch(CompilationUnit* cu, ::llvm::Instruction* inst)
-{
-  Codegen* cg = cu->cg.get();
-  ::llvm::SwitchInst* sw_inst = ::llvm::dyn_cast< ::llvm::SwitchInst>(inst);
-  DCHECK(sw_inst != NULL);
-  ::llvm::Value* test_val = sw_inst->getCondition();
-  ::llvm::MDNode* table_offset_node = sw_inst->getMetadata("SwitchTable");
-  DCHECK(table_offset_node != NULL);
-  ::llvm::ConstantInt* table_offset_value =
-          static_cast< ::llvm::ConstantInt*>(table_offset_node->getOperand(0));
-  int32_t table_offset = table_offset_value->getSExtValue();
-  RegLocation rl_src = GetLoc(cu, test_val);
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  uint16_t table_magic = *table;
-  if (table_magic == 0x100) {
-    cg->GenPackedSwitch(cu, table_offset, rl_src);
-  } else {
-    DCHECK_EQ(table_magic, 0x200);
-    cg->GenSparseSwitch(cu, table_offset, rl_src);
-  }
-}
-
-static void CvtInvoke(CompilationUnit* cu, ::llvm::CallInst* call_inst, bool is_void,
-                      bool is_filled_new_array)
-{
-  Codegen* cg = cu->cg.get();
-  CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
-  if (is_void) {
-    info->result.location = kLocInvalid;
-  } else {
-    info->result = GetLoc(cu, call_inst);
-  }
-  ::llvm::ConstantInt* invoke_type_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(0));
-  ::llvm::ConstantInt* method_index_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(1));
-  ::llvm::ConstantInt* opt_flags_val =
-      ::llvm::dyn_cast< ::llvm::ConstantInt>(call_inst->getArgOperand(2));
-  info->type = static_cast<InvokeType>(invoke_type_val->getZExtValue());
-  info->index = method_index_val->getZExtValue();
-  info->opt_flags = opt_flags_val->getZExtValue();
-  info->offset = cu->current_dalvik_offset;
-
-  // Count the argument words, and then build argument array.
-  info->num_arg_words = 0;
-  for (unsigned int i = 3; i < call_inst->getNumArgOperands(); i++) {
-    RegLocation t_loc = GetLoc(cu, call_inst->getArgOperand(i));
-    info->num_arg_words += t_loc.wide ? 2 : 1;
-  }
-  info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
-      (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
-  // Now, fill in the location records, synthesizing high loc of wide vals
-  for (int i = 3, next = 0; next < info->num_arg_words;) {
-    info->args[next] = GetLoc(cu, call_inst->getArgOperand(i++));
-    if (info->args[next].wide) {
-      next++;
-      // TODO: Might make sense to mark this as an invalid loc
-      info->args[next].orig_sreg = info->args[next-1].orig_sreg+1;
-      info->args[next].s_reg_low = info->args[next-1].s_reg_low+1;
-    }
-    next++;
-  }
-  // TODO - rework such that we no longer need is_range
-  info->is_range = (info->num_arg_words > 5);
-
-  if (is_filled_new_array) {
-    cg->GenFilledNewArray(cu, info);
-  } else {
-    cg->GenInvoke(cu, info);
-  }
-}
-
-static void CvtConstructorBarrier(CompilationUnit* cu) {
-  Codegen* cg = cu->cg.get();
-  cg->GenMemBarrier(cu, kStoreStore);
-}
-
-/* Look up the RegLocation associated with a Value.  Must already be defined */
-static RegLocation ValToLoc(CompilationUnit* cu, ::llvm::Value* val)
-{
-  SafeMap< ::llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
-  DCHECK(it != cu->loc_map.end()) << "Missing definition";
-  return it->second;
-}
-
-static bool BitcodeBlockCodeGen(CompilationUnit* cu, ::llvm::BasicBlock* bb)
-{
-  Codegen* cg = cu->cg.get();
-  while (cu->llvm_blocks.find(bb) == cu->llvm_blocks.end()) {
-    ::llvm::BasicBlock* next_bb = NULL;
-    cu->llvm_blocks.insert(bb);
-    bool is_entry = (bb == &cu->func->getEntryBlock());
-    // Define the starting label
-    LIR* block_label = cu->block_to_label_map.Get(bb);
-    // Extract the type and starting offset from the block's name
-    char block_type = kInvalidBlock;
-    if (is_entry) {
-      block_type = kNormalBlock;
-      block_label->operands[0] = 0;
-    } else if (!bb->hasName()) {
-      block_type = kNormalBlock;
-      block_label->operands[0] = DexFile::kDexNoIndex;
-    } else {
-      std::string block_name = bb->getName().str();
-      int dummy;
-      sscanf(block_name.c_str(), kLabelFormat, &block_type, &block_label->operands[0], &dummy);
-      cu->current_dalvik_offset = block_label->operands[0];
-    }
-    DCHECK((block_type == kNormalBlock) || (block_type == kCatchBlock));
-    cu->current_dalvik_offset = block_label->operands[0];
-    // Set the label kind
-    block_label->opcode = kPseudoNormalBlockLabel;
-    // Insert the label
-    AppendLIR(cu, block_label);
-
-    LIR* head_lir = NULL;
-
-    if (block_type == kCatchBlock) {
-      head_lir = NewLIR0(cu, kPseudoExportedPC);
-    }
-
-    // Free temp registers and reset redundant store tracking */
-    ResetRegPool(cu);
-    ResetDefTracking(cu);
-
-    //TODO: restore oat incoming liveness optimization
-    ClobberAllRegs(cu);
-
-    if (is_entry) {
-      RegLocation* ArgLocs = static_cast<RegLocation*>
-          (NewMem(cu, sizeof(RegLocation) * cu->num_ins, true, kAllocMisc));
-      ::llvm::Function::arg_iterator it(cu->func->arg_begin());
-      ::llvm::Function::arg_iterator it_end(cu->func->arg_end());
-      // Skip past Method*
-      it++;
-      for (unsigned i = 0; it != it_end; ++it) {
-        ::llvm::Value* val = it;
-        ArgLocs[i++] = ValToLoc(cu, val);
-        ::llvm::Type* ty = val->getType();
-        if ((ty == cu->irb->getInt64Ty()) || (ty == cu->irb->getDoubleTy())) {
-          ArgLocs[i] = ArgLocs[i-1];
-          ArgLocs[i].low_reg = ArgLocs[i].high_reg;
-          ArgLocs[i].orig_sreg++;
-          ArgLocs[i].s_reg_low = INVALID_SREG;
-          ArgLocs[i].high_word = true;
-          i++;
-        }
-      }
-      cg->GenEntrySequence(cu, ArgLocs, cu->method_loc);
-    }
-
-    // Visit all of the instructions in the block
-    for (::llvm::BasicBlock::iterator it = bb->begin(), e = bb->end(); it != e;) {
-      ::llvm::Instruction* inst = it;
-      ::llvm::BasicBlock::iterator next_it = ++it;
-      // Extract the Dalvik offset from the instruction
-      uint32_t opcode = inst->getOpcode();
-      ::llvm::MDNode* dex_offset_node = inst->getMetadata("DexOff");
-      if (dex_offset_node != NULL) {
-        ::llvm::ConstantInt* dex_offset_value =
-            static_cast< ::llvm::ConstantInt*>(dex_offset_node->getOperand(0));
-        cu->current_dalvik_offset = dex_offset_value->getZExtValue();
-      }
-
-      ResetRegPool(cu);
-      if (cu->disable_opt & (1 << kTrackLiveTemps)) {
-        ClobberAllRegs(cu);
-      }
-
-      if (cu->disable_opt & (1 << kSuppressLoads)) {
-        ResetDefTracking(cu);
-      }
-
-  #ifndef NDEBUG
-      /* Reset temp tracking sanity check */
-      cu->live_sreg = INVALID_SREG;
-  #endif
-
-      // TODO: use llvm opcode name here instead of "boundary" if verbose
-      LIR* boundary_lir = MarkBoundary(cu, cu->current_dalvik_offset, "boundary");
-
-      /* Remember the first LIR for thisl block*/
-      if (head_lir == NULL) {
-        head_lir = boundary_lir;
-        head_lir->def_mask = ENCODE_ALL;
-      }
-
-      switch(opcode) {
-
-        case ::llvm::Instruction::ICmp: {
-            ::llvm::Instruction* next_inst = next_it;
-            ::llvm::BranchInst* br_inst = ::llvm::dyn_cast< ::llvm::BranchInst>(next_inst);
-            if (br_inst != NULL /* and... */) {
-              CvtICmpBr(cu, inst, br_inst);
-              ++it;
-            } else {
-              CvtICmp(cu, inst);
-            }
-          }
-          break;
-
-        case ::llvm::Instruction::Call: {
-            ::llvm::CallInst* call_inst = ::llvm::dyn_cast< ::llvm::CallInst>(inst);
-            ::llvm::Function* callee = call_inst->getCalledFunction();
-            art::llvm::IntrinsicHelper::IntrinsicId id =
-                cu->intrinsic_helper->GetIntrinsicId(callee);
-            switch (id) {
-              case art::llvm::IntrinsicHelper::AllocaShadowFrame:
-              case art::llvm::IntrinsicHelper::PopShadowFrame:
-              case art::llvm::IntrinsicHelper::SetVReg:
-                // Ignore shadow frame stuff for quick compiler
-                break;
-              case art::llvm::IntrinsicHelper::CopyInt:
-              case art::llvm::IntrinsicHelper::CopyObj:
-              case art::llvm::IntrinsicHelper::CopyFloat:
-              case art::llvm::IntrinsicHelper::CopyLong:
-              case art::llvm::IntrinsicHelper::CopyDouble:
-                CvtCopy(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::ConstInt:
-              case art::llvm::IntrinsicHelper::ConstObj:
-              case art::llvm::IntrinsicHelper::ConstLong:
-              case art::llvm::IntrinsicHelper::ConstFloat:
-              case art::llvm::IntrinsicHelper::ConstDouble:
-                CvtConst(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::DivInt:
-              case art::llvm::IntrinsicHelper::DivLong:
-                CvtBinOp(cu, kOpDiv, inst);
-                break;
-              case art::llvm::IntrinsicHelper::RemInt:
-              case art::llvm::IntrinsicHelper::RemLong:
-                CvtBinOp(cu, kOpRem, inst);
-                break;
-              case art::llvm::IntrinsicHelper::MethodInfo:
-                // Already dealt with - just ignore it here.
-                break;
-              case art::llvm::IntrinsicHelper::CheckSuspend:
-                cg->GenSuspendTest(cu, 0 /* opt_flags already applied */);
-                break;
-              case art::llvm::IntrinsicHelper::HLInvokeObj:
-              case art::llvm::IntrinsicHelper::HLInvokeFloat:
-              case art::llvm::IntrinsicHelper::HLInvokeDouble:
-              case art::llvm::IntrinsicHelper::HLInvokeLong:
-              case art::llvm::IntrinsicHelper::HLInvokeInt:
-                CvtInvoke(cu, call_inst, false /* is_void */, false /* new_array */);
-                break;
-              case art::llvm::IntrinsicHelper::HLInvokeVoid:
-                CvtInvoke(cu, call_inst, true /* is_void */, false /* new_array */);
-                break;
-              case art::llvm::IntrinsicHelper::HLFilledNewArray:
-                CvtInvoke(cu, call_inst, false /* is_void */, true /* new_array */);
-                break;
-              case art::llvm::IntrinsicHelper::HLFillArrayData:
-                CvtFillArrayData(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::ConstString:
-                CvtConstObject(cu, call_inst, true /* is_string */);
-                break;
-              case art::llvm::IntrinsicHelper::ConstClass:
-                CvtConstObject(cu, call_inst, false /* is_string */);
-                break;
-              case art::llvm::IntrinsicHelper::HLCheckCast:
-                CvtCheckCast(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::NewInstance:
-                CvtNewInstance(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::HLSgetObject:
-                CvtSget(cu, call_inst, false /* wide */, true /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::HLSget:
-              case art::llvm::IntrinsicHelper::HLSgetFloat:
-              case art::llvm::IntrinsicHelper::HLSgetBoolean:
-              case art::llvm::IntrinsicHelper::HLSgetByte:
-              case art::llvm::IntrinsicHelper::HLSgetChar:
-              case art::llvm::IntrinsicHelper::HLSgetShort:
-                CvtSget(cu, call_inst, false /* wide */, false /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::HLSgetWide:
-              case art::llvm::IntrinsicHelper::HLSgetDouble:
-                CvtSget(cu, call_inst, true /* wide */, false /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::HLSput:
-              case art::llvm::IntrinsicHelper::HLSputFloat:
-              case art::llvm::IntrinsicHelper::HLSputBoolean:
-              case art::llvm::IntrinsicHelper::HLSputByte:
-              case art::llvm::IntrinsicHelper::HLSputChar:
-              case art::llvm::IntrinsicHelper::HLSputShort:
-                CvtSput(cu, call_inst, false /* wide */, false /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::HLSputWide:
-              case art::llvm::IntrinsicHelper::HLSputDouble:
-                CvtSput(cu, call_inst, true /* wide */, false /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::HLSputObject:
-                CvtSput(cu, call_inst, false /* wide */, true /* Object */);
-                break;
-              case art::llvm::IntrinsicHelper::GetException:
-                CvtMoveException(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::HLThrowException:
-                CvtThrow(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::MonitorEnter:
-                CvtMonitorEnterExit(cu, true /* is_enter */, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::MonitorExit:
-                CvtMonitorEnterExit(cu, false /* is_enter */, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::OptArrayLength:
-                CvtArrayLength(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::NewArray:
-                CvtNewArray(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::InstanceOf:
-                CvtInstanceOf(cu, call_inst);
-                break;
-
-              case art::llvm::IntrinsicHelper::HLArrayGet:
-              case art::llvm::IntrinsicHelper::HLArrayGetObject:
-              case art::llvm::IntrinsicHelper::HLArrayGetFloat:
-                CvtAget(cu, call_inst, kWord, 2);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayGetWide:
-              case art::llvm::IntrinsicHelper::HLArrayGetDouble:
-                CvtAget(cu, call_inst, kLong, 3);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayGetBoolean:
-                CvtAget(cu, call_inst, kUnsignedByte, 0);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayGetByte:
-                CvtAget(cu, call_inst, kSignedByte, 0);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayGetChar:
-                CvtAget(cu, call_inst, kUnsignedHalf, 1);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayGetShort:
-                CvtAget(cu, call_inst, kSignedHalf, 1);
-                break;
-
-              case art::llvm::IntrinsicHelper::HLArrayPut:
-              case art::llvm::IntrinsicHelper::HLArrayPutFloat:
-                CvtAputPrimitive(cu, call_inst, kWord, 2);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutObject:
-                CvtAputObj(cu, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutWide:
-              case art::llvm::IntrinsicHelper::HLArrayPutDouble:
-                CvtAputPrimitive(cu, call_inst, kLong, 3);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutBoolean:
-                CvtAputPrimitive(cu, call_inst, kUnsignedByte, 0);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutByte:
-                CvtAputPrimitive(cu, call_inst, kSignedByte, 0);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutChar:
-                CvtAputPrimitive(cu, call_inst, kUnsignedHalf, 1);
-                break;
-              case art::llvm::IntrinsicHelper::HLArrayPutShort:
-                CvtAputPrimitive(cu, call_inst, kSignedHalf, 1);
-                break;
-
-              case art::llvm::IntrinsicHelper::HLIGet:
-              case art::llvm::IntrinsicHelper::HLIGetFloat:
-                CvtIget(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetObject:
-                CvtIget(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetWide:
-              case art::llvm::IntrinsicHelper::HLIGetDouble:
-                CvtIget(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetBoolean:
-                CvtIget(cu, call_inst, kUnsignedByte, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetByte:
-                CvtIget(cu, call_inst, kSignedByte, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetChar:
-                CvtIget(cu, call_inst, kUnsignedHalf, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIGetShort:
-                CvtIget(cu, call_inst, kSignedHalf, false /* is_wide */,
-                        false /* obj */);
-                break;
-
-              case art::llvm::IntrinsicHelper::HLIPut:
-              case art::llvm::IntrinsicHelper::HLIPutFloat:
-                CvtIput(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutObject:
-                CvtIput(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutWide:
-              case art::llvm::IntrinsicHelper::HLIPutDouble:
-                CvtIput(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutBoolean:
-                CvtIput(cu, call_inst, kUnsignedByte, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutByte:
-                CvtIput(cu, call_inst, kSignedByte, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutChar:
-                CvtIput(cu, call_inst, kUnsignedHalf, false /* is_wide */,
-                        false /* obj */);
-                break;
-              case art::llvm::IntrinsicHelper::HLIPutShort:
-                CvtIput(cu, call_inst, kSignedHalf, false /* is_wide */,
-                        false /* obj */);
-                break;
-
-              case art::llvm::IntrinsicHelper::IntToChar:
-                CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_CHAR);
-                break;
-              case art::llvm::IntrinsicHelper::IntToShort:
-                CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_SHORT);
-                break;
-              case art::llvm::IntrinsicHelper::IntToByte:
-                CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_BYTE);
-                break;
-
-              case art::llvm::IntrinsicHelper::F2I:
-              case art::llvm::IntrinsicHelper::D2I:
-              case art::llvm::IntrinsicHelper::F2L:
-              case art::llvm::IntrinsicHelper::D2L:
-                CvtFPToInt(cu, call_inst);
-                break;
-
-              case art::llvm::IntrinsicHelper::CmplFloat:
-                CvtFPCompare(cu, call_inst, Instruction::CMPL_FLOAT);
-                break;
-              case art::llvm::IntrinsicHelper::CmpgFloat:
-                CvtFPCompare(cu, call_inst, Instruction::CMPG_FLOAT);
-                break;
-              case art::llvm::IntrinsicHelper::CmplDouble:
-                CvtFPCompare(cu, call_inst, Instruction::CMPL_DOUBLE);
-                break;
-              case art::llvm::IntrinsicHelper::CmpgDouble:
-                CvtFPCompare(cu, call_inst, Instruction::CMPG_DOUBLE);
-                break;
-
-              case art::llvm::IntrinsicHelper::CmpLong:
-                CvtLongCompare(cu, call_inst);
-                break;
-
-              case art::llvm::IntrinsicHelper::SHLLong:
-                CvtShiftOp(cu, Instruction::SHL_LONG, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::SHRLong:
-                CvtShiftOp(cu, Instruction::SHR_LONG, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::USHRLong:
-                CvtShiftOp(cu, Instruction::USHR_LONG, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::SHLInt:
-                CvtShiftOp(cu, Instruction::SHL_INT, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::SHRInt:
-                CvtShiftOp(cu, Instruction::SHR_INT, call_inst);
-                break;
-              case art::llvm::IntrinsicHelper::USHRInt:
-                CvtShiftOp(cu, Instruction::USHR_INT, call_inst);
-                break;
-
-              case art::llvm::IntrinsicHelper::CatchTargets: {
-                  ::llvm::SwitchInst* sw_inst =
-                      ::llvm::dyn_cast< ::llvm::SwitchInst>(next_it);
-                  DCHECK(sw_inst != NULL);
-                  /*
-                   * Discard the edges and the following conditional branch.
-                   * Do a direct branch to the default target (which is the
-                   * "work" portion of the pair.
-                   * TODO: awful code layout - rework
-                   */
-                   ::llvm::BasicBlock* target_bb = sw_inst->getDefaultDest();
-                   DCHECK(target_bb != NULL);
-                   cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
-                   ++it;
-                   // Set next bb to default target - improves code layout
-                   next_bb = target_bb;
-                }
-                break;
-              case art::llvm::IntrinsicHelper::ConstructorBarrier: {
-                CvtConstructorBarrier(cu);
-                break;
-              }
-
-              default:
-                LOG(FATAL) << "Unexpected intrinsic " << cu->intrinsic_helper->GetName(id);
-            }
-          }
-          break;
-
-        case ::llvm::Instruction::Br: CvtBr(cu, inst); break;
-        case ::llvm::Instruction::Add: CvtBinOp(cu, kOpAdd, inst); break;
-        case ::llvm::Instruction::Sub: CvtBinOp(cu, kOpSub, inst); break;
-        case ::llvm::Instruction::Mul: CvtBinOp(cu, kOpMul, inst); break;
-        case ::llvm::Instruction::SDiv: CvtBinOp(cu, kOpDiv, inst); break;
-        case ::llvm::Instruction::SRem: CvtBinOp(cu, kOpRem, inst); break;
-        case ::llvm::Instruction::And: CvtBinOp(cu, kOpAnd, inst); break;
-        case ::llvm::Instruction::Or: CvtBinOp(cu, kOpOr, inst); break;
-        case ::llvm::Instruction::Xor: CvtBinOp(cu, kOpXor, inst); break;
-        case ::llvm::Instruction::PHI: CvtPhi(cu, inst); break;
-        case ::llvm::Instruction::Ret: CvtRet(cu, inst); break;
-        case ::llvm::Instruction::FAdd: CvtBinFPOp(cu, kOpAdd, inst); break;
-        case ::llvm::Instruction::FSub: CvtBinFPOp(cu, kOpSub, inst); break;
-        case ::llvm::Instruction::FMul: CvtBinFPOp(cu, kOpMul, inst); break;
-        case ::llvm::Instruction::FDiv: CvtBinFPOp(cu, kOpDiv, inst); break;
-        case ::llvm::Instruction::FRem: CvtBinFPOp(cu, kOpRem, inst); break;
-        case ::llvm::Instruction::SIToFP: CvtIntToFP(cu, inst); break;
-        case ::llvm::Instruction::FPTrunc: CvtDoubleToFloat(cu, inst); break;
-        case ::llvm::Instruction::FPExt: CvtFloatToDouble(cu, inst); break;
-        case ::llvm::Instruction::Trunc: CvtTrunc(cu, inst); break;
-
-        case ::llvm::Instruction::ZExt: CvtIntExt(cu, inst, false /* signed */);
-          break;
-        case ::llvm::Instruction::SExt: CvtIntExt(cu, inst, true /* signed */);
-          break;
-
-        case ::llvm::Instruction::Switch: CvtSwitch(cu, inst); break;
-
-        case ::llvm::Instruction::Unreachable:
-          break;  // FIXME: can we really ignore these?
-
-        case ::llvm::Instruction::Shl:
-        case ::llvm::Instruction::LShr:
-        case ::llvm::Instruction::AShr:
-        case ::llvm::Instruction::Invoke:
-        case ::llvm::Instruction::FPToUI:
-        case ::llvm::Instruction::FPToSI:
-        case ::llvm::Instruction::UIToFP:
-        case ::llvm::Instruction::PtrToInt:
-        case ::llvm::Instruction::IntToPtr:
-        case ::llvm::Instruction::FCmp:
-        case ::llvm::Instruction::URem:
-        case ::llvm::Instruction::UDiv:
-        case ::llvm::Instruction::Resume:
-        case ::llvm::Instruction::Alloca:
-        case ::llvm::Instruction::GetElementPtr:
-        case ::llvm::Instruction::Fence:
-        case ::llvm::Instruction::AtomicCmpXchg:
-        case ::llvm::Instruction::AtomicRMW:
-        case ::llvm::Instruction::BitCast:
-        case ::llvm::Instruction::VAArg:
-        case ::llvm::Instruction::Select:
-        case ::llvm::Instruction::UserOp1:
-        case ::llvm::Instruction::UserOp2:
-        case ::llvm::Instruction::ExtractElement:
-        case ::llvm::Instruction::InsertElement:
-        case ::llvm::Instruction::ShuffleVector:
-        case ::llvm::Instruction::ExtractValue:
-        case ::llvm::Instruction::InsertValue:
-        case ::llvm::Instruction::LandingPad:
-        case ::llvm::Instruction::IndirectBr:
-        case ::llvm::Instruction::Load:
-        case ::llvm::Instruction::Store:
-          LOG(FATAL) << "Unexpected llvm opcode: " << opcode; break;
-
-        default:
-          LOG(FATAL) << "Unknown llvm opcode: " << inst->getOpcodeName();
-          break;
-      }
-    }
-
-    if (head_lir != NULL) {
-      ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
-    }
-    if (next_bb != NULL) {
-      bb = next_bb;
-      next_bb = NULL;
-    }
-  }
-  return false;
-}
-
-/*
- * Convert LLVM_IR to MIR:
- *   o Iterate through the LLVM_IR and construct a graph using
- *     standard MIR building blocks.
- *   o Perform a basic-block optimization pass to remove unnecessary
- *     store/load sequences.
- *   o Convert the LLVM Value operands into RegLocations where applicable.
- *   o Create ssa_rep def/use operand arrays for each converted LLVM opcode
- *   o Perform register promotion
- *   o Iterate through the graph a basic block at a time, generating
- *     LIR.
- *   o Assemble LIR as usual.
- *   o Profit.
- */
-void MethodBitcode2LIR(CompilationUnit* cu)
-{
-  Codegen* cg = cu->cg.get();
-  ::llvm::Function* func = cu->func;
-  int num_basic_blocks = func->getBasicBlockList().size();
-  // Allocate a list for LIR basic block labels
-  cu->block_label_list =
-    static_cast<LIR*>(NewMem(cu, sizeof(LIR) * num_basic_blocks, true, kAllocLIR));
-  LIR* label_list = cu->block_label_list;
-  int next_label = 0;
-  for (::llvm::Function::iterator i = func->begin(), e = func->end(); i != e; ++i) {
-    cu->block_to_label_map.Put(static_cast< ::llvm::BasicBlock*>(i),
-                               &label_list[next_label++]);
-  }
-
-  /*
-   * Keep honest - clear reg_locations, Value => RegLocation,
-   * promotion map and VmapTables.
-   */
-  cu->loc_map.clear();  // Start fresh
-  cu->reg_location = NULL;
-  for (int i = 0; i < cu->num_dalvik_registers + cu->num_compiler_temps + 1; i++) {
-    cu->promotion_map[i].core_location = kLocDalvikFrame;
-    cu->promotion_map[i].fp_location = kLocDalvikFrame;
-  }
-  cu->core_spill_mask = 0;
-  cu->num_core_spills = 0;
-  cu->fp_spill_mask = 0;
-  cu->num_fp_spills = 0;
-  cu->core_vmap_table.clear();
-  cu->fp_vmap_table.clear();
-
-  /*
-   * At this point, we've lost all knowledge of register promotion.
-   * Rebuild that info from the MethodInfo intrinsic (if it
-   * exists - not required for correctness).  Normally, this will
-   * be the first instruction we encounter, so we won't have to iterate
-   * through everything.
-   */
-  for (::llvm::inst_iterator i = ::llvm::inst_begin(func), e = ::llvm::inst_end(func); i != e; ++i) {
-    ::llvm::CallInst* call_inst = ::llvm::dyn_cast< ::llvm::CallInst>(&*i);
-    if (call_inst != NULL) {
-      ::llvm::Function* callee = call_inst->getCalledFunction();
-      llvm::IntrinsicHelper::IntrinsicId id =
-          cu->intrinsic_helper->GetIntrinsicId(callee);
-      if (id == art::llvm::IntrinsicHelper::MethodInfo) {
-        if (cu->verbose) {
-          LOG(INFO) << "Found MethodInfo";
-        }
-        ::llvm::MDNode* reg_info_node = call_inst->getMetadata("RegInfo");
-        if (reg_info_node != NULL) {
-          ::llvm::ConstantInt* num_ins_value =
-            static_cast< ::llvm::ConstantInt*>(reg_info_node->getOperand(0));
-          ::llvm::ConstantInt* num_regs_value =
-            static_cast< ::llvm::ConstantInt*>(reg_info_node->getOperand(1));
-          ::llvm::ConstantInt* num_outs_value =
-            static_cast< ::llvm::ConstantInt*>(reg_info_node->getOperand(2));
-          ::llvm::ConstantInt* num_compiler_temps_value =
-            static_cast< ::llvm::ConstantInt*>(reg_info_node->getOperand(3));
-          ::llvm::ConstantInt* num_ssa_regs_value =
-            static_cast< ::llvm::ConstantInt*>(reg_info_node->getOperand(4));
-          if (cu->verbose) {
-             LOG(INFO) << "RegInfo - Ins:" << num_ins_value->getZExtValue()
-                       << ", Regs:" << num_regs_value->getZExtValue()
-                       << ", Outs:" << num_outs_value->getZExtValue()
-                       << ", CTemps:" << num_compiler_temps_value->getZExtValue()
-                       << ", SSARegs:" << num_ssa_regs_value->getZExtValue();
-            }
-          }
-        ::llvm::MDNode* pmap_info_node = call_inst->getMetadata("PromotionMap");
-        if (pmap_info_node != NULL) {
-          int elems = pmap_info_node->getNumOperands();
-          if (cu->verbose) {
-            LOG(INFO) << "PMap size: " << elems;
-          }
-          for (int i = 0; i < elems; i++) {
-            ::llvm::ConstantInt* raw_map_data =
-                static_cast< ::llvm::ConstantInt*>(pmap_info_node->getOperand(i));
-            uint32_t map_data = raw_map_data->getZExtValue();
-            PromotionMap* p = &cu->promotion_map[i];
-            p->first_in_pair = (map_data >> 24) & 0xff;
-            p->FpReg = (map_data >> 16) & 0xff;
-            p->core_reg = (map_data >> 8) & 0xff;
-            p->fp_location = static_cast<RegLocationType>((map_data >> 4) & 0xf);
-            if (p->fp_location == kLocPhysReg) {
-              RecordFpPromotion(cu, p->FpReg, i);
-            }
-            p->core_location = static_cast<RegLocationType>(map_data & 0xf);
-            if (p->core_location == kLocPhysReg) {
-              RecordCorePromotion(cu, p->core_reg, i);
-            }
-          }
-          if (cu->verbose) {
-            DumpPromotionMap(cu);
-          }
-        }
-        break;
-      }
-    }
-  }
-  cg->AdjustSpillMask(cu);
-  cu->frame_size = ComputeFrameSize(cu);
-
-  // Create RegLocations for arguments
-  ::llvm::Function::arg_iterator it(cu->func->arg_begin());
-  ::llvm::Function::arg_iterator it_end(cu->func->arg_end());
-  for (; it != it_end; ++it) {
-    ::llvm::Value* val = it;
-    CreateLocFromValue(cu, val);
-  }
-  // Create RegLocations for all non-argument defintions
-  for (::llvm::inst_iterator i = ::llvm::inst_begin(func), e = ::llvm::inst_end(func); i != e; ++i) {
-    ::llvm::Value* val = &*i;
-    if (val->hasName() && (val->getName().str().c_str()[0] == 'v')) {
-      CreateLocFromValue(cu, val);
-    }
-  }
-
-  // Walk the blocks, generating code.
-  for (::llvm::Function::iterator i = cu->func->begin(), e = cu->func->end(); i != e; ++i) {
-    BitcodeBlockCodeGen(cu, static_cast< ::llvm::BasicBlock*>(i));
-  }
-
-  cg->HandleSuspendLaunchPads(cu);
-
-  cg->HandleThrowLaunchPads(cu);
-
-  cg->HandleIntrinsicLaunchPads(cu);
-
-  cu->func->eraseFromParent();
-  cu->func = NULL;
-}
-
-
 }  // namespace art
diff --git a/src/compiler/dex/portable/mir_to_gbc.h b/src/compiler/dex/portable/mir_to_gbc.h
index a3b5b31..48faf75 100644
--- a/src/compiler/dex/portable/mir_to_gbc.h
+++ b/src/compiler/dex/portable/mir_to_gbc.h
@@ -20,7 +20,6 @@
 namespace art {
 
 void MethodMIR2Bitcode(CompilationUnit* cu);
-void MethodBitcode2LIR(CompilationUnit* cu);
 
 }  // namespace art
 
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index d3a3a7c..a201fd8 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -28,7 +28,7 @@
 /* Return the position of an ssa name within the argument list */
 static int InPosition(CompilationUnit* cu, int s_reg)
 {
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   return v_reg - cu->num_regs;
 }
 
@@ -89,7 +89,7 @@
   int first_in = cu->num_regs;
   const int num_arg_regs = 3;  // TODO: generalize & move to RegUtil.cc
   for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-    int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
+    int v_reg = cu->mir_graph->SRegToVReg(mir->ssa_rep->uses[i]);
     int InPosition = v_reg - first_in;
     if (InPosition < num_arg_regs) {
       LockTemp(cu, rARM_ARG1 + InPosition);
@@ -324,7 +324,8 @@
  *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
  *   cbnz  r_idx, lp
  */
-void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -371,7 +372,8 @@
 }
 
 
-void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -588,7 +590,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
                             (static_cast<size_t>(cu->frame_size) <
                             Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index e77394c..29aef0e 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -150,9 +150,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/arm/int_arm.cc b/src/compiler/dex/quick/arm/int_arm.cc
index ed65636..0ebc943 100644
--- a/src/compiler/dex/quick/arm/int_arm.cc
+++ b/src/compiler/dex/quick/arm/int_arm.cc
@@ -187,11 +187,11 @@
   RegLocation rl_src = GetSrc(cu, mir, 0);
   // Temporary debugging code
   int dest_sreg = mir->ssa_rep->defs[0];
-  if ((dest_sreg < 0) || (dest_sreg >= cu->num_ssa_regs)) {
+  if ((dest_sreg < 0) || (dest_sreg >= cu->mir_graph->GetNumSSARegs())) {
     LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
               << PrettyMethod(cu->method_idx,*cu->dex_file);
     LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
-    LOG(INFO) << "vreg = " << SRegToVReg(cu, dest_sreg);
+    LOG(INFO) << "vreg = " << cu->mir_graph->SRegToVReg(dest_sreg);
     LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
     if (mir->ssa_rep->num_uses == 1) {
       LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
@@ -265,7 +265,7 @@
   if (rl_src2.is_const) {
     RegLocation rl_temp = UpdateLocWide(cu, rl_src2);
     // Do special compare/branch against simple const operand if not already in registers.
-    int64_t val = ConstantValueWide(cu, rl_src2);
+    int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
     if ((rl_temp.location != kLocPhysReg) &&
         ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
       GenFusedLongCmpImmBranch(cu, bb, rl_src1, val, ccode);
@@ -538,7 +538,7 @@
   RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
   RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
 
-  if (need_write_barrier && !IsConstantNullRef(cu, rl_new_value)) {
+  if (need_write_barrier && !cu->mir_graph->IsConstantNullRef(rl_new_value)) {
     // Mark card for object assuming new value is stored.
     MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
   }
@@ -678,7 +678,7 @@
 {
   DCHECK(rl_src.wide);
   DCHECK(rl_dest.wide);
-  return (abs(SRegToVReg(cu, rl_src.s_reg_low) - SRegToVReg(cu, rl_dest.s_reg_low)) == 1);
+  return (abs(cu->mir_graph->SRegToVReg(rl_src.s_reg_low) - cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)) == 1);
 }
 
 void ArmCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
@@ -809,7 +809,7 @@
 
   // If index is constant, just fold it into the data offset
   if (constant_index) {
-    data_offset += ConstantValue(cu, rl_index) << scale;
+    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
   }
 
   /* null object? */
@@ -837,7 +837,7 @@
 
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
         GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
@@ -895,7 +895,7 @@
 
   // If index is constant, just fold it into the data offset.
   if (constant_index) {
-    data_offset += ConstantValue(cu, rl_index) << scale;
+    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
   }
 
   rl_array = LoadValue(cu, rl_array, kCoreReg);
@@ -937,7 +937,7 @@
     }
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
         GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
@@ -1021,7 +1021,7 @@
   StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
   FreeTemp(cu, r_ptr);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
@@ -1031,7 +1031,7 @@
 {
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
   // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = ConstantValue(cu, rl_shift) & 0x3f;
+  int shift_amount = cu->mir_graph->ConstantValue(rl_shift) & 0x3f;
   if (shift_amount == 0) {
     StoreValueWide(cu, rl_dest, rl_src);
     return;
@@ -1123,7 +1123,7 @@
     return;
   }
   DCHECK(rl_src2.is_const);
-  int64_t val = ConstantValueWide(cu, rl_src2);
+  int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
   uint32_t val_lo = Low32Bits(val);
   uint32_t val_hi = High32Bits(val);
   int32_t mod_imm_lo = ModifiedImmediate(val_lo);
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index f03e07c..6d8102f 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -597,7 +597,7 @@
   }
 }
 /*
- * TUNING: is leaf?  Can't just use "has_invoke" to determine as some
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
  * instructions might call out to C/assembly helper functions.  Until
  * machinery is in place, always spill lr.
  */
@@ -645,10 +645,10 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) <
-      SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) <
+      cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -658,7 +658,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }
diff --git a/src/compiler/dex/quick/codegen.h b/src/compiler/dex/quick/codegen.h
index 21290ca..272ccad 100644
--- a/src/compiler/dex/quick/codegen.h
+++ b/src/compiler/dex/quick/codegen.h
@@ -105,8 +105,26 @@
 
   public:
 
+    struct SwitchTable {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int vaddr;                  // Dalvik offset of switch opcode.
+      LIR* anchor;                // Reference instruction for relative offsets.
+      LIR** targets;              // Array of case targets.
+    };
+
+    struct FillArrayData {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int size;
+      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
+    };
+
     virtual ~Codegen(){};
 
+    // Shared by all targets - implemented in ralloc_util.cc
+    void SimpleRegAlloc(CompilationUnit* cu);
+
     // Shared by all targets - implemented in gen_common.cc.
     void HandleSuspendLaunchPads(CompilationUnit *cu);
     void HandleIntrinsicLaunchPads(CompilationUnit *cu);
@@ -355,9 +373,9 @@
                                                int second_bit) = 0;
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src) = 0;
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src) = 0;
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case) = 0;
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index 24955f6..b5152df 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -30,15 +30,15 @@
   if (rl_src.is_const) {
     if (rl_src.wide) {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantDouble(ConstantValueWide(cu, rl_src));
+         res = cu->cg->InexpensiveConstantDouble(cu->mir_graph->ConstantValueWide(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantLong(ConstantValueWide(cu, rl_src));
+         res = cu->cg->InexpensiveConstantLong(cu->mir_graph->ConstantValueWide(rl_src));
       }
     } else {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantFloat(ConstantValue(cu, rl_src));
+         res = cu->cg->InexpensiveConstantFloat(cu->mir_graph->ConstantValue(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src));
+         res = cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src));
       }
     }
   }
@@ -55,9 +55,8 @@
 bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
                   int& field_offset, bool& is_volatile, bool is_put)
 {
-  DexCompilationUnit m_unit(cu);
-  return cu->compiler_driver->ComputeInstanceFieldInfo(field_idx, &m_unit,
-           field_offset, is_volatile, is_put);
+  return cu->compiler_driver->ComputeInstanceFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
 }
 
 /* Convert an instruction to a NOP */
@@ -336,7 +335,7 @@
   LOG(INFO) << "Dumping LIR insns for "
             << PrettyMethod(cu->method_idx, *cu->dex_file);
   LIR* lir_insn;
-  int insns_size = cu->insns_size;
+  int insns_size = cu->code_item->insns_size_in_code_units_;
 
   LOG(INFO) << "Regs (excluding ins) : " << cu->num_regs;
   LOG(INFO) << "Ins          : " << cu->num_ins;
@@ -595,7 +594,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable* tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext( &iterator));
+    Codegen::SwitchTable* tab_rec =
+      reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
     AlignBuffer(cu->code_buffer, tab_rec->offset);
     /*
@@ -654,8 +654,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->fill_array_data, &iterator);
   while (true) {
-    FillArrayData *tab_rec =
-        reinterpret_cast<FillArrayData*>(GrowableListIteratorNext( &iterator));
+    Codegen::FillArrayData *tab_rec =
+        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
     AlignBuffer(cu->code_buffer, tab_rec->offset);
     for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
@@ -678,7 +678,8 @@
 static bool VerifyCatchEntries(CompilationUnit* cu)
 {
   bool success = true;
-  for (std::set<uint32_t>::const_iterator it = cu->catches.begin(); it != cu->catches.end(); ++it) {
+  for (std::set<uint32_t>::const_iterator it = cu->mir_graph->catches_.begin();
+       it != cu->mir_graph->catches_.end(); ++it) {
     uint32_t dex_pc = *it;
     bool found = false;
     for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
@@ -695,19 +696,20 @@
   // Now, try in the other direction
   for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
     uint32_t dex_pc = cu->dex2pcMappingTable[i+1];
-    if (cu->catches.find(dex_pc) == cu->catches.end()) {
+    if (cu->mir_graph->catches_.find(dex_pc) == cu->mir_graph->catches_.end()) {
       LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
       success = false;
     }
   }
   if (!success) {
     LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "Entries @ decode: " << cu->catches.size() << ", Entries in table: "
+    LOG(INFO) << "Entries @ decode: " << cu->mir_graph->catches_.size() << ", Entries in table: "
               << cu->dex2pcMappingTable.size()/2;
   }
   return success;
 }
 
+
 static void CreateMappingTables(CompilationUnit* cu)
 {
   for (LIR* tgt_lir = cu->first_lir_insn; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
@@ -720,7 +722,9 @@
       cu->dex2pcMappingTable.push_back(tgt_lir->dalvik_offset);
     }
   }
-  DCHECK(VerifyCatchEntries(cu));
+  if (kIsDebugBuild) {
+    DCHECK(VerifyCatchEntries(cu));
+  }
   cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size() +
                                         cu->dex2pcMappingTable.size());
   cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size());
@@ -850,7 +854,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Codegen::SwitchTable *tab_rec =
+        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -869,8 +874,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->fill_array_data, &iterator);
   while (true) {
-    FillArrayData *tab_rec =
-        reinterpret_cast<FillArrayData*>(GrowableListIteratorNext(&iterator));
+    Codegen::FillArrayData *tab_rec =
+        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     offset += tab_rec->size;
@@ -938,6 +943,7 @@
 {
   Codegen* cg = cu->cg.get();
   AssignOffsets(cu);
+  int assembler_retries = 0;
   /*
    * Assemble here.  Note that we generate code with optimistic assumptions
    * and if found now to work, we'll have to redo the sequence and retry.
@@ -948,8 +954,8 @@
     if (res == kSuccess) {
       break;
     } else {
-      cu->assembler_retries++;
-      if (cu->assembler_retries > MAX_ASSEMBLER_RETRIES) {
+      assembler_retries++;
+      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
         CodegenDump(cu);
         LOG(FATAL) << "Assembler error - too many retries";
       }
@@ -996,7 +1002,7 @@
   return new_label;
 }
 
-static void MarkPackedCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+static void MarkPackedCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1008,7 +1014,7 @@
   }
 }
 
-static void MarkSparseCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+static void MarkSparseCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1025,8 +1031,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable *tab_rec =
-        reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Codegen::SwitchTable *tab_rec =
+        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
       MarkPackedCaseLabels(cu, tab_rec);
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index 3c4b111..652a448 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -16,6 +16,7 @@
 
 #include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "ralloc_util.h"
 
@@ -132,9 +133,9 @@
     // If it's already live in a register or not easily materialized, just keep going
     RegLocation rl_temp = UpdateLoc(cu, rl_src2);
     if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(ConstantValue(cu, rl_src2))) {
+        InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src2))) {
       // OK - convert this to a compare immediate and branch
-      OpCmpImmBranch(cu, cond, rl_src1.low_reg, ConstantValue(cu, rl_src2), taken);
+      OpCmpImmBranch(cu, cond, rl_src1.low_reg, cu->mir_graph->ConstantValue(rl_src2), taken);
       OpUnconditionalBranch(cu, fall_through);
       return;
     }
@@ -353,14 +354,9 @@
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-
-  DexCompilationUnit m_unit(cu);
-
-  bool fast_path =
-      cu->compiler_driver->ComputeStaticFieldInfo(field_idx, &m_unit,
-                                                  field_offset, ssb_index,
-                                                  is_referrers_class, is_volatile,
-                                                  true);
+  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, true);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
@@ -424,7 +420,7 @@
     if (is_volatile) {
       GenMemBarrier(cu, kStoreLoad);
     }
-    if (is_object && !IsConstantNullRef(cu, rl_src)) {
+    if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
       MarkGCCard(cu, rl_src.low_reg, rBase);
     }
     FreeTemp(cu, rBase);
@@ -444,14 +440,9 @@
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-
-  DexCompilationUnit m_unit(cu);
-
-  bool fast_path =
-      cu->compiler_driver->ComputeStaticFieldInfo(field_idx, &m_unit,
-                                                  field_offset, ssb_index,
-                                                  is_referrers_class, is_volatile,
-                                                  false);
+  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, false);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
@@ -762,7 +753,7 @@
       if (is_volatile) {
         GenMemBarrier(cu, kLoadLoad);
       }
-      if (is_object && !IsConstantNullRef(cu, rl_src)) {
+      if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
         MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
       }
     }
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index 1ae29be..c654143 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -1336,18 +1336,14 @@
   // Explicit register usage
   LockCallTemps(cu);
 
-  DexCompilationUnit m_unit(cu);
-
   uint32_t dex_method_idx = info->index;
   int vtable_idx;
   uintptr_t direct_code;
   uintptr_t direct_method;
   bool skip_this;
-  bool fast_path =
-    cu->compiler_driver->ComputeInvokeInfo(dex_method_idx, &m_unit, info->type,
-                                           vtable_idx, direct_code,
-                                           direct_method)
-    && !SLOW_INVOKE_PATH;
+  bool fast_path = cu->compiler_driver->ComputeInvokeInfo(
+      dex_method_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), info->type, vtable_idx,
+      direct_code, direct_method) && !SLOW_INVOKE_PATH;
   if (info->type == kInterface) {
     if (fast_path) {
       p_null_ck = &null_ck;
@@ -1450,7 +1446,7 @@
                                   bool is_range)
 {
   CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
-  MIR* move_result_mir = FindMoveResult(cu, bb, mir);
+  MIR* move_result_mir = cu->mir_graph->FindMoveResult(bb, mir);
   if (move_result_mir == NULL) {
     info->result.location = kLocInvalid;
   } else {
diff --git a/src/compiler/dex/quick/gen_loadstore.cc b/src/compiler/dex/quick/gen_loadstore.cc
index a7baea4..7e116fc 100644
--- a/src/compiler/dex/quick/gen_loadstore.cc
+++ b/src/compiler/dex/quick/gen_loadstore.cc
@@ -16,6 +16,7 @@
 
 #include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "invoke_type.h"
 #include "ralloc_util.h"
 
@@ -50,9 +51,9 @@
       if (!cu->gen_bitcode) {
         // TUNING: We no longer have this info for QuickGBC - assume the worst
         bool used_as_reference = false;
-        int base_vreg = SRegToVReg(cu, rl_dest.s_reg_low);
-        for (int i = 0; !used_as_reference && (i < cu->num_ssa_regs); i++) {
-          if (SRegToVReg(cu, cu->reg_location[i].s_reg_low) == base_vreg) {
+        int base_vreg = cu->mir_graph->SRegToVReg(rl_dest.s_reg_low);
+        for (int i = 0; !used_as_reference && (i < cu->mir_graph->GetNumSSARegs()); i++) {
+          if (cu->mir_graph->SRegToVReg(cu->reg_location[i].s_reg_low) == base_vreg) {
             used_as_reference |= cu->reg_location[i].ref;
           }
         }
@@ -102,7 +103,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopy(cu, r_dest, rl_src.low_reg);
   } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantNoClobber(cu, r_dest, ConstantValue(cu, rl_src));
+    LoadConstantNoClobber(cu, r_dest, cu->mir_graph->ConstantValue(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
@@ -134,7 +135,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopyWide(cu, reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
   } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantWide(cu, reg_lo, reg_hi, ConstantValueWide(cu, rl_src));
+    LoadConstantWide(cu, reg_lo, reg_hi, cu->mir_graph->ConstantValueWide(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
@@ -171,16 +172,16 @@
 
 void Codegen::StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
-#ifndef NDEBUG
   /*
    * Sanity checking - should never try to store to the same
    * ssa name during the compilation of a single instruction
    * without an intervening ClobberSReg().
    */
-  DCHECK((cu->live_sreg == INVALID_SREG) ||
-         (rl_dest.s_reg_low != cu->live_sreg));
-  cu->live_sreg = rl_dest.s_reg_low;
-#endif
+  if (kIsDebugBuild) {
+    DCHECK((cu->live_sreg == INVALID_SREG) ||
+           (rl_dest.s_reg_low != cu->live_sreg));
+    cu->live_sreg = rl_dest.s_reg_low;
+  }
   LIR* def_start;
   LIR* def_end;
   DCHECK(!rl_dest.wide);
@@ -240,16 +241,16 @@
 
 void Codegen::StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
-#ifndef NDEBUG
   /*
    * Sanity checking - should never try to store to the same
    * ssa name during the compilation of a single instruction
    * without an intervening ClobberSReg().
    */
-  DCHECK((cu->live_sreg == INVALID_SREG) ||
-      (rl_dest.s_reg_low != cu->live_sreg));
-  cu->live_sreg = rl_dest.s_reg_low;
-#endif
+  if (kIsDebugBuild) {
+    DCHECK((cu->live_sreg == INVALID_SREG) ||
+           (rl_dest.s_reg_low != cu->live_sreg));
+    cu->live_sreg = rl_dest.s_reg_low;
+  }
   LIR* def_start;
   LIR* def_end;
   DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
@@ -291,8 +292,8 @@
       (oat_live_out(cu, rl_dest.s_reg_low) ||
       oat_live_out(cu, GetSRegHi(rl_dest.s_reg_low)))) {
     def_start = cu->last_lir_insn;
-    DCHECK_EQ((SRegToVReg(cu, rl_dest.s_reg_low)+1),
-              SRegToVReg(cu, GetSRegHi(rl_dest.s_reg_low)));
+    DCHECK_EQ((cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)+1),
+              cu->mir_graph->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
     StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
                       rl_dest.low_reg, rl_dest.high_reg);
     MarkClean(cu, rl_dest);
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index 4fbb16b..d7f9dce 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -61,7 +61,8 @@
  * done:
  *
  */
-void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -140,7 +141,8 @@
  *   jr    r_RA
  * done:
  */
-void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -341,7 +343,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
       (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
   int check_reg = AllocTemp(cu);
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index f889ece..c9d0e21 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -151,9 +151,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/mips/int_mips.cc b/src/compiler/dex/quick/mips/int_mips.cc
index d648c44..b1fa623 100644
--- a/src/compiler/dex/quick/mips/int_mips.cc
+++ b/src/compiler/dex/quick/mips/int_mips.cc
@@ -637,7 +637,7 @@
   StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
   FreeTemp(cu, r_ptr);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index ab6517c..85e8a9b 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -302,7 +302,7 @@
 }
 
 /*
- * TUNING: is leaf?  Can't just use "has_invoke" to determine as some
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
  * instructions might call out to C/assembly helper functions.  Until
  * machinery is in place, always spill lr.
  */
@@ -339,9 +339,9 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rMIPS_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -351,7 +351,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rMIPS_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 3cce26e..0b85e92 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -17,6 +17,7 @@
 #include "object_utils.h"
 
 #include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/dataflow_iterator.h"
 #include "local_optimizations.h"
 #include "codegen_util.h"
 #include "ralloc_util.h"
@@ -91,21 +92,21 @@
                                                           cu->class_def_idx)) {
         cg->GenMemBarrier(cu, kStoreStore);
       }
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       break;
 
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT:
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       cg->StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
       break;
 
     case Instruction::RETURN_WIDE:
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       cg->StoreValueWide(cu, GetReturnWide(cu,
@@ -253,11 +254,11 @@
       break;
 
     case Instruction::PACKED_SWITCH:
-      cg->GenPackedSwitch(cu, vB, rl_src[0]);
+      cg->GenPackedSwitch(cu, mir, vB, rl_src[0]);
       break;
 
     case Instruction::SPARSE_SWITCH:
-      cg->GenSparseSwitch(cu, vB, rl_src[0]);
+      cg->GenSparseSwitch(cu, mir, vB, rl_src[0]);
       break;
 
     case Instruction::CMPL_FLOAT:
@@ -283,8 +284,8 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const && rl_src[1].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg],
-                                       cu->constant_values[rl_src[1].orig_sreg]);
+        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg),
+                                       cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
         if (is_taken && backward_branch) {
           cg->GenSuspendTest(cu, opt_flags);
         }
@@ -312,7 +313,7 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg], 0);
+        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg), 0);
         if (is_taken && backward_branch) {
           cg->GenSuspendTest(cu, opt_flags);
         }
@@ -540,13 +541,13 @@
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
       if (rl_src[0].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[0]))) {
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[0]))) {
         cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[1],
-                             cu->constant_values[rl_src[0].orig_sreg]);
+                             cu->mir_graph->ConstantValue(rl_src[0].orig_sreg));
       } else if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
         cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
-                             cu->constant_values[rl_src[1].orig_sreg]);
+                             cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
       } else {
         cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       }
@@ -565,8 +566,8 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
-        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], ConstantValue(cu, rl_src[1]));
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
+        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], cu->mir_graph->ConstantValue(rl_src[1]));
       } else {
         cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       }
@@ -707,7 +708,6 @@
   LIR* label_list = cu->block_label_list;
   int block_id = bb->id;
 
-  cu->cur_block = bb;
   label_list[block_id].operands[0] = bb->start_offset;
 
   // Insert the block label.
@@ -745,10 +745,10 @@
       ResetDefTracking(cu);
     }
 
-#ifndef NDEBUG
     // Reset temp tracking sanity check.
-    cu->live_sreg = INVALID_SREG;
-#endif
+    if (kIsDebugBuild) {
+      cu->live_sreg = INVALID_SREG;
+    }
 
     cu->current_dalvik_offset = mir->offset;
     int opcode = mir->dalvikInsn.opcode;
@@ -800,12 +800,12 @@
 {
   Codegen* cg = cu->cg.get();
   // Find the first DalvikByteCode block.
-  int num_reachable_blocks = cu->num_reachable_blocks;
-  const GrowableList *block_list = &cu->block_list;
+  int num_reachable_blocks = cu->mir_graph->GetNumReachableBlocks();
   BasicBlock*bb = NULL;
   for (int idx = 0; idx < num_reachable_blocks; idx++) {
-    int dfs_index = cu->dfs_order.elem_list[idx];
-    bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_index));
+    // TODO: no direct access of growable lists.
+    int dfs_index = cu->mir_graph->GetDfsOrder()->elem_list[idx];
+    bb = cu->mir_graph->GetBasicBlock(dfs_index);
     if (bb->block_type == kDalvikByteCode) {
       break;
     }
@@ -832,10 +832,12 @@
   Codegen* cg = cu->cg.get();
   // Hold the labels of each block.
   cu->block_label_list =
-      static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->num_blocks, true, kAllocLIR));
+      static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->mir_graph->GetNumBlocks(), true, kAllocLIR));
 
-  DataFlowAnalysisDispatcher(cu, MethodBlockCodeGen,
-                                kPreOrderDFSTraversal, false /* Iterative */);
+  DataflowIterator iter(cu->mir_graph.get(), kPreOrderDFSTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    MethodBlockCodeGen(cu, bb);
+  }
 
   cg->HandleSuspendLaunchPads(cu);
 
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index 5b7de2c..18c7714 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -17,8 +17,9 @@
 /* This file contains register alloction support. */
 
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "compiler/dex/compiler_utility.h"
-#include "compiler/dex/dataflow.h"
+//#include "compiler/dex/dataflow.h"
 #include "compiler/dex/quick/codegen_util.h"
 #include "ralloc_util.h"
 
@@ -137,12 +138,12 @@
  */
 void ClobberSReg(CompilationUnit* cu, int s_reg)
 {
-#ifndef NDEBUG
   /* Reset live temp tracking sanity checker */
-  if (s_reg == cu->live_sreg) {
-    cu->live_sreg = INVALID_SREG;
+  if (kIsDebugBuild) {
+    if (s_reg == cu->live_sreg) {
+      cu->live_sreg = INVALID_SREG;
+    }
   }
-#endif
   ClobberSRegBody(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs, s_reg);
   ClobberSRegBody(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs, s_reg);
 }
@@ -158,9 +159,9 @@
  */
 int SRegToPMap(CompilationUnit* cu, int s_reg)
 {
-  DCHECK_LT(s_reg, cu->num_ssa_regs);
+  DCHECK_LT(s_reg, cu->mir_graph->GetNumSSARegs());
   DCHECK_GE(s_reg, 0);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   if (v_reg >= 0) {
     DCHECK_LT(v_reg, cu->num_dalvik_registers);
     return v_reg;
@@ -175,7 +176,7 @@
 {
   Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   cg->GetRegInfo(cu, reg)->in_use = true;
   cu->core_spill_mask |= (1 << reg);
   // Include reg for later sort
@@ -205,7 +206,7 @@
 {
   Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   cg->GetRegInfo(cu, reg)->in_use = true;
   cg->MarkPreservedSingle(cu, v_reg, reg);
   cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
@@ -244,7 +245,7 @@
 {
   Codegen* cg = cu->cg.get();
   int res = -1; // Assume failure
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   int p_map_idx = SRegToPMap(cu, s_reg);
   if (cu->promotion_map[p_map_idx+1].fp_location == kLocPhysReg) {
     // Upper reg is already allocated.  Can we fit?
@@ -1088,13 +1089,13 @@
       (bb->block_type == kDalvikByteCode))) {
     return;
   }
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
+  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
     RegLocation loc = cu->reg_location[i];
     RefCounts* counts = loc.fp ? fp_counts : core_counts;
     int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
     //Don't count easily regenerated immediates
     if (loc.fp || !IsInexpensiveConstant(cu, loc)) {
-      counts[p_map_idx].count += cu->raw_use_counts.elem_list[i];
+      counts[p_map_idx].count += cu->mir_graph->GetUseCount(i);
     }
     if (loc.wide && loc.fp && !loc.high_word) {
       counts[p_map_idx].double_start = true;
@@ -1162,8 +1163,7 @@
     FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
   }
 
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->block_list, &iterator);
+  GrowableListIterator iterator = cu->mir_graph->GetBasicBlockIterator();
   while (true) {
     BasicBlock* bb;
     bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
@@ -1217,19 +1217,10 @@
         }
       }
     }
-  } else if (cu->qd_mode) {
-    AllocPreservedCoreReg(cu, cu->method_sreg);
-    for (int i = 0; i < num_regs; i++) {
-      int reg = AllocPreservedCoreReg(cu, i);
-      if (reg < 0) {
-         break;  // No more left
-      }
-    }
   }
 
-
   // Now, update SSA names to new home locations
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
+  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
     RegLocation *curr = &cu->reg_location[i];
     int p_map_idx = SRegToPMap(cu, curr->s_reg_low);
     if (!curr->wide) {
@@ -1292,7 +1283,7 @@
 /* Returns sp-relative offset in bytes for a SReg */
 int SRegOffset(CompilationUnit* cu, int s_reg)
 {
-  return VRegOffset(cu, SRegToVReg(cu, s_reg));
+  return VRegOffset(cu, cu->mir_graph->SRegToVReg(s_reg));
 }
 
 RegLocation GetBadLoc()
@@ -1331,4 +1322,20 @@
   return res;
 }
 
+void Codegen::SimpleRegAlloc(CompilationUnit* cu)
+{
+  DoPromotion(cu);
+
+  /* Get easily-accessable post-promotion copy of RegLocation for Method* */
+  cu->method_loc = cu->reg_location[cu->method_sreg];
+
+  if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
+    LOG(INFO) << "After Promotion";
+    cu->mir_graph->DumpRegLocTable(cu->reg_location, cu->mir_graph->GetNumSSARegs());
+  }
+
+  /* Set the frame size */
+  cu->frame_size = cu->mir_graph->ComputeFrameSize();
+}
+
 }  // namespace art
diff --git a/src/compiler/dex/quick/ralloc_util.h b/src/compiler/dex/quick/ralloc_util.h
index 3287047..1f99600 100644
--- a/src/compiler/dex/quick/ralloc_util.h
+++ b/src/compiler/dex/quick/ralloc_util.h
@@ -23,7 +23,6 @@
 
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_utility.h"
-#include "compiler/dex/dataflow.h"
 
 namespace art {
 
@@ -155,7 +154,6 @@
 int SRegOffset(CompilationUnit* cu, int reg);
 void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
 void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
-int ComputeFrameSize(CompilationUnit* cu);
 int SRegToPMap(CompilationUnit* cu, int s_reg);
 void DumpRegPool(RegisterInfo* p, int num_regs);
 
diff --git a/src/compiler/dex/quick/x86/assemble_x86.cc b/src/compiler/dex/quick/x86/assemble_x86.cc
index 9ee0cb5..2369e49 100644
--- a/src/compiler/dex/quick/x86/assemble_x86.cc
+++ b/src/compiler/dex/quick/x86/assemble_x86.cc
@@ -1099,11 +1099,11 @@
                       int base_or_table, uint8_t index, int scale, int table_or_disp) {
   int disp;
   if (entry->opcode == kX86PcRelLoadRA) {
-    SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(table_or_disp);
+    Codegen::SwitchTable *tab_rec = reinterpret_cast<Codegen::SwitchTable*>(table_or_disp);
     disp = tab_rec->offset;
   } else {
     DCHECK(entry->opcode == kX86PcRelAdr);
-    FillArrayData *tab_rec = reinterpret_cast<FillArrayData*>(base_or_table);
+    Codegen::FillArrayData *tab_rec = reinterpret_cast<Codegen::FillArrayData*>(base_or_table);
     disp = tab_rec->offset;
   }
   if (entry->skeleton.prefix1 != 0) {
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 7b1a7fb..f30e966 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -33,7 +33,8 @@
  * The sparse table in the literal pool is an array of <key,displacement>
  * pairs.
  */
-void X86Codegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void X86Codegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -45,7 +46,8 @@
   rl_src = LoadValue(cu, rl_src, kCoreReg);
   for (int i = 0; i < entries; i++) {
     int key = keys[i];
-    BasicBlock* case_block = FindBlock(cu, cu->current_dalvik_offset + targets[i]);
+    BasicBlock* case_block =
+        cu->mir_graph.get()->FindBlock(cu->current_dalvik_offset + targets[i]);
     LIR* label_list = cu->block_label_list;
     OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
                    &label_list[case_block->id]);
@@ -68,7 +70,8 @@
  * jmp  r_start_of_method
  * done:
  */
-void X86Codegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void X86Codegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -237,7 +240,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
                 (static_cast<size_t>(cu->frame_size) <
                 Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index c1e8fb3..35c976a 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -151,9 +151,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index 81b1d72..984eaef 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -585,7 +585,7 @@
   StoreBaseIndexedDisp(cu, r_array, r_index, scale,
                        data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index cb41fde..ed07220 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -341,9 +341,9 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rX86_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -353,7 +353,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rX86_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }
diff --git a/src/compiler/dex/ralloc.cc b/src/compiler/dex/ralloc.cc
deleted file mode 100644
index 9163cd9..0000000
--- a/src/compiler/dex/ralloc.cc
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_internals.h"
-#include "dataflow.h"
-#include "quick/ralloc_util.h"
-
-namespace art {
-
-static bool SetFp(CompilationUnit* cu, int index, bool is_fp) {
-  bool change = false;
-  if (is_fp && !cu->reg_location[index].fp) {
-    cu->reg_location[index].fp = true;
-    cu->reg_location[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-static bool SetCore(CompilationUnit* cu, int index, bool is_core) {
-  bool change = false;
-  if (is_core && !cu->reg_location[index].defined) {
-    cu->reg_location[index].core = true;
-    cu->reg_location[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-static bool SetRef(CompilationUnit* cu, int index, bool is_ref) {
-  bool change = false;
-  if (is_ref && !cu->reg_location[index].defined) {
-    cu->reg_location[index].ref = true;
-    cu->reg_location[index].defined = true;
-    change = true;
-  }
-  return change;
-}
-
-static bool SetWide(CompilationUnit* cu, int index, bool is_wide) {
-  bool change = false;
-  if (is_wide && !cu->reg_location[index].wide) {
-    cu->reg_location[index].wide = true;
-    change = true;
-  }
-  return change;
-}
-
-static bool SetHigh(CompilationUnit* cu, int index, bool is_high) {
-  bool change = false;
-  if (is_high && !cu->reg_location[index].high_word) {
-    cu->reg_location[index].high_word = true;
-    change = true;
-  }
-  return change;
-}
-
-/*
- * Infer types and sizes.  We don't need to track change on sizes,
- * as it doesn't propagate.  We're guaranteed at least one pass through
- * the cfg.
- */
-static bool InferTypeAndSize(CompilationUnit* cu, BasicBlock* bb)
-{
-  MIR *mir;
-  bool changed = false;   // Did anything change?
-
-  if (bb->data_flow_info == NULL) return false;
-  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock)
-    return false;
-
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    SSARepresentation *ssa_rep = mir->ssa_rep;
-    if (ssa_rep) {
-      int attrs = oat_data_flow_attributes[mir->dalvikInsn.opcode];
-
-      // Handle defs
-      if (attrs & DF_DA) {
-        if (attrs & DF_CORE_A) {
-          changed |= SetCore(cu, ssa_rep->defs[0], true);
-        }
-        if (attrs & DF_REF_A) {
-          changed |= SetRef(cu, ssa_rep->defs[0], true);
-        }
-        if (attrs & DF_A_WIDE) {
-          cu->reg_location[ssa_rep->defs[0]].wide = true;
-          cu->reg_location[ssa_rep->defs[1]].wide = true;
-          cu->reg_location[ssa_rep->defs[1]].high_word = true;
-          DCHECK_EQ(SRegToVReg(cu, ssa_rep->defs[0])+1,
-          SRegToVReg(cu, ssa_rep->defs[1]));
-        }
-      }
-
-      // Handles uses
-      int next = 0;
-      if (attrs & DF_UA) {
-        if (attrs & DF_CORE_A) {
-          changed |= SetCore(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_REF_A) {
-          changed |= SetRef(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_A_WIDE) {
-          cu->reg_location[ssa_rep->uses[next]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
-          DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
-          SRegToVReg(cu, ssa_rep->uses[next + 1]));
-          next += 2;
-        } else {
-          next++;
-        }
-      }
-      if (attrs & DF_UB) {
-        if (attrs & DF_CORE_B) {
-          changed |= SetCore(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_REF_B) {
-          changed |= SetRef(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_B_WIDE) {
-          cu->reg_location[ssa_rep->uses[next]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
-          DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
-                               SRegToVReg(cu, ssa_rep->uses[next + 1]));
-          next += 2;
-        } else {
-          next++;
-        }
-      }
-      if (attrs & DF_UC) {
-        if (attrs & DF_CORE_C) {
-          changed |= SetCore(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_REF_C) {
-          changed |= SetRef(cu, ssa_rep->uses[next], true);
-        }
-        if (attrs & DF_C_WIDE) {
-          cu->reg_location[ssa_rep->uses[next]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
-          DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
-          SRegToVReg(cu, ssa_rep->uses[next + 1]));
-        }
-      }
-
-      // Special-case return handling
-      if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
-          (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
-          (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
-        switch(cu->shorty[0]) {
-            case 'I':
-              changed |= SetCore(cu, ssa_rep->uses[0], true);
-              break;
-            case 'J':
-              changed |= SetCore(cu, ssa_rep->uses[0], true);
-              changed |= SetCore(cu, ssa_rep->uses[1], true);
-              cu->reg_location[ssa_rep->uses[0]].wide = true;
-              cu->reg_location[ssa_rep->uses[1]].wide = true;
-              cu->reg_location[ssa_rep->uses[1]].high_word = true;
-              break;
-            case 'F':
-              changed |= SetFp(cu, ssa_rep->uses[0], true);
-              break;
-            case 'D':
-              changed |= SetFp(cu, ssa_rep->uses[0], true);
-              changed |= SetFp(cu, ssa_rep->uses[1], true);
-              cu->reg_location[ssa_rep->uses[0]].wide = true;
-              cu->reg_location[ssa_rep->uses[1]].wide = true;
-              cu->reg_location[ssa_rep->uses[1]].high_word = true;
-              break;
-            case 'L':
-              changed |= SetRef(cu, ssa_rep->uses[0], true);
-              break;
-            default: break;
-        }
-      }
-
-      // Special-case handling for format 35c/3rc invokes
-      Instruction::Code opcode = mir->dalvikInsn.opcode;
-      int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
-          ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
-      if ((flags & Instruction::kInvoke) &&
-          (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
-        DCHECK_EQ(next, 0);
-        int target_idx = mir->dalvikInsn.vB;
-        const char* shorty = GetShortyFromTargetIdx(cu, target_idx);
-        // Handle result type if floating point
-        if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
-          MIR* move_result_mir = FindMoveResult(cu, bb, mir);
-          // Result might not be used at all, so no move-result
-          if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
-              Instruction::MOVE_RESULT_OBJECT)) {
-            SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
-            DCHECK(tgt_rep != NULL);
-            tgt_rep->fp_def[0] = true;
-            changed |= SetFp(cu, tgt_rep->defs[0], true);
-            if (shorty[0] == 'D') {
-              tgt_rep->fp_def[1] = true;
-              changed |= SetFp(cu, tgt_rep->defs[1], true);
-            }
-          }
-        }
-        int num_uses = mir->dalvikInsn.vA;
-        // If this is a non-static invoke, mark implicit "this"
-        if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
-            (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
-          cu->reg_location[ssa_rep->uses[next]].defined = true;
-          cu->reg_location[ssa_rep->uses[next]].ref = true;
-          next++;
-        }
-        uint32_t cpos = 1;
-        if (strlen(shorty) > 1) {
-          for (int i = next; i < num_uses;) {
-            DCHECK_LT(cpos, strlen(shorty));
-            switch (shorty[cpos++]) {
-              case 'D':
-                ssa_rep->fp_use[i] = true;
-                ssa_rep->fp_use[i+1] = true;
-                cu->reg_location[ssa_rep->uses[i]].wide = true;
-                cu->reg_location[ssa_rep->uses[i+1]].wide = true;
-                cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
-                DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
-                                     SRegToVReg(cu, ssa_rep->uses[i+1]));
-                i++;
-                break;
-              case 'J':
-                cu->reg_location[ssa_rep->uses[i]].wide = true;
-                cu->reg_location[ssa_rep->uses[i+1]].wide = true;
-                cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
-                DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
-                                     SRegToVReg(cu, ssa_rep->uses[i+1]));
-                changed |= SetCore(cu, ssa_rep->uses[i],true);
-                i++;
-                break;
-              case 'F':
-                ssa_rep->fp_use[i] = true;
-                break;
-              case 'L':
-                changed |= SetRef(cu,ssa_rep->uses[i], true);
-                break;
-              default:
-                changed |= SetCore(cu,ssa_rep->uses[i], true);
-                break;
-            }
-            i++;
-          }
-        }
-      }
-
-      for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
-        if (ssa_rep->fp_use[i])
-          changed |= SetFp(cu, ssa_rep->uses[i], true);
-        }
-      for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
-        if (ssa_rep->fp_def[i])
-          changed |= SetFp(cu, ssa_rep->defs[i], true);
-        }
-      // Special-case handling for moves & Phi
-      if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
-        /*
-         * If any of our inputs or outputs is defined, set all.
-         * Some ugliness related to Phi nodes and wide values.
-         * The Phi set will include all low words or all high
-         * words, so we have to treat them specially.
-         */
-        bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
-                      kMirOpPhi);
-        RegLocation rl_temp = cu->reg_location[ssa_rep->defs[0]];
-        bool defined_fp = rl_temp.defined && rl_temp.fp;
-        bool defined_core = rl_temp.defined && rl_temp.core;
-        bool defined_ref = rl_temp.defined && rl_temp.ref;
-        bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
-        bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
-        for (int i = 0; i < ssa_rep->num_uses;i++) {
-          rl_temp = cu->reg_location[ssa_rep->uses[i]];
-          defined_fp |= rl_temp.defined && rl_temp.fp;
-          defined_core |= rl_temp.defined && rl_temp.core;
-          defined_ref |= rl_temp.defined && rl_temp.ref;
-          is_wide |= rl_temp.wide;
-          is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
-        }
-        /*
-         * TODO: cleaner fix
-         * We don't normally expect to see a Dalvik register
-         * definition used both as a floating point and core
-         * value.  However, the instruction rewriting that occurs
-         * during verification can eliminate some type information,
-         * leaving us confused.  The real fix here is either to
-         * add explicit type information to Dalvik byte codes,
-         * or to recognize THROW_VERIFICATION_ERROR as
-         * an unconditional branch and support dead code elimination.
-         * As a workaround we can detect this situation and
-         * disable register promotion (which is the only thing that
-         * relies on distinctions between core and fp usages.
-         */
-        if ((defined_fp && (defined_core | defined_ref)) &&
-            ((cu->disable_opt & (1 << kPromoteRegs)) == 0)) {
-          LOG(WARNING) << PrettyMethod(cu->method_idx, *cu->dex_file)
-                       << " op at block " << bb->id
-                       << " has both fp and core/ref uses for same def.";
-          cu->disable_opt |= (1 << kPromoteRegs);
-        }
-        changed |= SetFp(cu, ssa_rep->defs[0], defined_fp);
-        changed |= SetCore(cu, ssa_rep->defs[0], defined_core);
-        changed |= SetRef(cu, ssa_rep->defs[0], defined_ref);
-        changed |= SetWide(cu, ssa_rep->defs[0], is_wide);
-        changed |= SetHigh(cu, ssa_rep->defs[0], is_high);
-        if (attrs & DF_A_WIDE) {
-          changed |= SetWide(cu, ssa_rep->defs[1], true);
-          changed |= SetHigh(cu, ssa_rep->defs[1], true);
-        }
-        for (int i = 0; i < ssa_rep->num_uses; i++) {
-          changed |= SetFp(cu, ssa_rep->uses[i], defined_fp);
-          changed |= SetCore(cu, ssa_rep->uses[i], defined_core);
-          changed |= SetRef(cu, ssa_rep->uses[i], defined_ref);
-          changed |= SetWide(cu, ssa_rep->uses[i], is_wide);
-          changed |= SetHigh(cu, ssa_rep->uses[i], is_high);
-        }
-        if (attrs & DF_A_WIDE) {
-          DCHECK_EQ(ssa_rep->num_uses, 2);
-          changed |= SetWide(cu, ssa_rep->uses[1], true);
-          changed |= SetHigh(cu, ssa_rep->uses[1], true);
-        }
-      }
-    }
-  }
-  return changed;
-}
-
-static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
-
-static void DumpRegLocTable(CompilationUnit* cu, RegLocation* table, int count)
-{
-  Codegen* cg = cu->cg.get();
-  for (int i = 0; i < count; i++) {
-    LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
-        table[i].orig_sreg, storage_name[table[i].location],
-        table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
-        table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
-        table[i].is_const ? 'c' : 'n',
-        table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
-        cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
-        table[i].low_reg & cg->FpRegMask(),
-        cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
-        table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
-  }
-}
-
-static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
-                                     INVALID_REG, INVALID_REG, INVALID_SREG,
-                                     INVALID_SREG};
-
-int ComputeFrameSize(CompilationUnit* cu) {
-  /* Figure out the frame size */
-  static const uint32_t kAlignMask = kStackAlignment - 1;
-  uint32_t size = (cu->num_core_spills + cu->num_fp_spills +
-                   1 /* filler word */ + cu->num_regs + cu->num_outs +
-                   cu->num_compiler_temps + 1 /* cur_method* */)
-                   * sizeof(uint32_t);
-  /* Align and set */
-  return (size + kAlignMask) & ~(kAlignMask);
-}
-
-/*
- * Simple register allocation.  Some Dalvik virtual registers may
- * be promoted to physical registers.  Most of the work for temp
- * allocation is done on the fly.  We also do some initialization and
- * type inference here.
- */
-void SimpleRegAlloc(CompilationUnit* cu)
-{
-  int i;
-  RegLocation* loc;
-
-  /* Allocate the location map */
-  loc = static_cast<RegLocation*>(NewMem(cu, cu->num_ssa_regs * sizeof(*loc),
-                                  true, kAllocRegAlloc));
-  for (i=0; i< cu->num_ssa_regs; i++) {
-    loc[i] = fresh_loc;
-    loc[i].s_reg_low = i;
-    loc[i].is_const = IsBitSet(cu->is_constant_v, i);
-  }
-
-  /* Patch up the locations for Method* and the compiler temps */
-  loc[cu->method_sreg].location = kLocCompilerTemp;
-  loc[cu->method_sreg].defined = true;
-  for (i = 0; i < cu->num_compiler_temps; i++) {
-    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
-    loc[ct->s_reg].location = kLocCompilerTemp;
-    loc[ct->s_reg].defined = true;
-  }
-
-  cu->reg_location = loc;
-
-  /* Allocation the promotion map */
-  int num_regs = cu->num_dalvik_registers;
-  cu->promotion_map = static_cast<PromotionMap*>
-      (NewMem(cu, (num_regs + cu->num_compiler_temps + 1) * sizeof(cu->promotion_map[0]),
-              true, kAllocRegAlloc));
-
-  /* Add types of incoming arguments based on signature */
-  int num_ins = cu->num_ins;
-  if (num_ins > 0) {
-    int s_reg = num_regs - num_ins;
-    if ((cu->access_flags & kAccStatic) == 0) {
-      // For non-static, skip past "this"
-      cu->reg_location[s_reg].defined = true;
-      cu->reg_location[s_reg].ref = true;
-      s_reg++;
-    }
-    const char* shorty = cu->shorty;
-    int shorty_len = strlen(shorty);
-    for (int i = 1; i < shorty_len; i++) {
-      switch (shorty[i]) {
-        case 'D':
-          cu->reg_location[s_reg].wide = true;
-          cu->reg_location[s_reg+1].high_word = true;
-          cu->reg_location[s_reg+1].fp = true;
-          DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
-          cu->reg_location[s_reg].fp = true;
-          cu->reg_location[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'J':
-          cu->reg_location[s_reg].wide = true;
-          cu->reg_location[s_reg+1].high_word = true;
-          DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
-          cu->reg_location[s_reg].core = true;
-          cu->reg_location[s_reg].defined = true;
-          s_reg++;
-          break;
-        case 'F':
-          cu->reg_location[s_reg].fp = true;
-          cu->reg_location[s_reg].defined = true;
-          break;
-        case 'L':
-          cu->reg_location[s_reg].ref = true;
-          cu->reg_location[s_reg].defined = true;
-          break;
-        default:
-          cu->reg_location[s_reg].core = true;
-          cu->reg_location[s_reg].defined = true;
-          break;
-        }
-        s_reg++;
-      }
-  }
-
-  /* Do type & size inference pass */
-  DataFlowAnalysisDispatcher(cu, InferTypeAndSize,
-                                kPreOrderDFSTraversal,
-                                true /* is_iterative */);
-
-  /*
-   * Set the s_reg_low field to refer to the pre-SSA name of the
-   * base Dalvik virtual register.  Once we add a better register
-   * allocator, remove this remapping.
-   */
-  for (i=0; i < cu->num_ssa_regs; i++) {
-    if (cu->reg_location[i].location != kLocCompilerTemp) {
-      int orig_sreg = cu->reg_location[i].s_reg_low;
-      cu->reg_location[i].orig_sreg = orig_sreg;
-      cu->reg_location[i].s_reg_low = SRegToVReg(cu, orig_sreg);
-    }
-  }
-
-  /*
-   * Now that everything is typed and constants propagated, identify those constants
-   * that can be cheaply materialized and don't need to be flushed to a home location.
-   * The default is to not flush, and some have already been marked as must flush.
-   */
-  for (i=0; i < cu->num_ssa_regs; i++) {
-    if (IsBitSet(cu->is_constant_v, i)) {
-      bool flush = false;
-      RegLocation loc = cu->reg_location[i];
-      if (loc.wide) {
-        int64_t value = ConstantValueWide(cu, loc);
-        if (loc.fp) {
-          flush = !cu->cg->InexpensiveConstantDouble(value);
-        } else {
-          flush = !cu->cg->InexpensiveConstantLong(value);
-        }
-      } else {
-        int32_t value = ConstantValue(cu, loc);
-        if (loc.fp) {
-          flush = !cu->cg->InexpensiveConstantFloat(value);
-        } else {
-          flush = !cu->cg->InexpensiveConstantInt(value);
-        }
-      }
-      if (flush) {
-        SetBit(cu, cu->must_flush_constant_v, i);
-      }
-      if (loc.wide) {
-        i++;  // Skip the high word
-      }
-    }
-  }
-
-  cu->core_spill_mask = 0;
-  cu->fp_spill_mask = 0;
-  cu->num_core_spills = 0;
-
-  DoPromotion(cu);
-
-  /* Get easily-accessable post-promotion copy of RegLocation for Method* */
-  cu->method_loc = cu->reg_location[cu->method_sreg];
-
-  if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
-    LOG(INFO) << "After Promotion";
-    DumpRegLocTable(cu, cu->reg_location, cu->num_ssa_regs);
-  }
-
-  /* Set the frame size */
-  cu->frame_size = ComputeFrameSize(cu);
-}
-
-}  // namespace art
diff --git a/src/compiler/dex/ralloc.h b/src/compiler/dex/ralloc.h
deleted file mode 100644
index cd25b1c..0000000
--- a/src/compiler/dex/ralloc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_RALLOC_H_
-#define ART_SRC_COMPILER_DEX_RALLOC_H_
-
-#include "compiler_internals.h"
-
-namespace art {
-
-void SimpleRegAlloc(CompilationUnit* cu);
-
-}  // namespace art
-
-#endif  // ART_SRC_COMPILER_DEX_RALLOC_H_
diff --git a/src/compiler/dex/ssa_transformation.cc b/src/compiler/dex/ssa_transformation.cc
index 5d787c4..52674aa 100644
--- a/src/compiler/dex/ssa_transformation.cc
+++ b/src/compiler/dex/ssa_transformation.cc
@@ -15,14 +15,11 @@
  */
 
 #include "compiler_internals.h"
-#include "dataflow.h"
+#include "dataflow_iterator.h"
 
 namespace art {
 
-// Make sure iterative dfs recording matches old recursive version
-//#define TEST_DFS
-
-static BasicBlock* NeedsVisit(BasicBlock* bb) {
+BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
   if (bb != NULL) {
     if (bb->visited || bb->hidden) {
       bb = NULL;
@@ -31,7 +28,7 @@
   return bb;
 }
 
-static BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb)
+BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb)
 {
   BasicBlock* res = NeedsVisit(bb->fall_through);
   if (res == NULL) {
@@ -54,179 +51,67 @@
   return res;
 }
 
-static void MarkPreOrder(CompilationUnit* cu, BasicBlock* block)
+void MIRGraph::MarkPreOrder(BasicBlock* block)
 {
   block->visited = true;
   /* Enqueue the pre_order block id */
-  InsertGrowableList(cu, &cu->dfs_order, block->id);
+  InsertGrowableList(cu_, &dfs_order_, block->id);
 }
 
-static void RecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
+void MIRGraph::RecordDFSOrders(BasicBlock* block)
 {
   std::vector<BasicBlock*> succ;
-  MarkPreOrder(cu, block);
+  MarkPreOrder(block);
   succ.push_back(block);
   while (!succ.empty()) {
     BasicBlock* curr = succ.back();
     BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
     if (next_successor != NULL) {
-      MarkPreOrder(cu, next_successor);
+      MarkPreOrder(next_successor);
       succ.push_back(next_successor);
       continue;
     }
-    curr->dfs_id = cu->dfs_post_order.num_used;
-    InsertGrowableList(cu, &cu->dfs_post_order, curr->id);
+    curr->dfs_id = dfs_post_order_.num_used;
+    InsertGrowableList(cu_, &dfs_post_order_, curr->id);
     succ.pop_back();
   }
 }
 
-#if defined(TEST_DFS)
-/* Enter the node to the dfs_order list then visit its successors */
-static void RecursiveRecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
-{
-
-  if (block->visited || block->hidden) return;
-  block->visited = true;
-
-  // Can this block be reached only via previous block fallthrough?
-  if ((block->block_type == kDalvikByteCode) &&
-      (block->predecessors->num_used == 1)) {
-    DCHECK_GE(cu->dfs_order.num_used, 1U);
-    int prev_idx = cu->dfs_order.num_used - 1;
-    int prev_id = cu->dfs_order.elem_list[prev_idx];
-    BasicBlock* pred_bb = (BasicBlock*)block->predecessors->elem_list[0];
-  }
-
-  /* Enqueue the pre_order block id */
-  InsertGrowableList(cu, &cu->dfs_order, block->id);
-
-  if (block->fall_through) {
-    RecursiveRecordDFSOrders(cu, block->fall_through);
-  }
-  if (block->taken) RecursiveRecordDFSOrders(cu, block->taken);
-  if (block->successor_block_list.block_list_type != kNotUsed) {
-    GrowableListIterator iterator;
-    GrowableListIteratorInit(&block->successor_block_list.blocks,
-                                &iterator);
-    while (true) {
-      SuccessorBlockInfo *successor_block_info =
-          (SuccessorBlockInfo *) GrowableListIteratorNext(&iterator);
-      if (successor_block_info == NULL) break;
-      BasicBlock* succ_bb = successor_block_info->block;
-      RecursiveRecordDFSOrders(cu, succ_bb);
-    }
-  }
-
-  /* Record postorder in basic block and enqueue normal id in dfs_post_order */
-  block->dfs_id = cu->dfs_post_order.num_used;
-  InsertGrowableList(cu, &cu->dfs_post_order, block->id);
-  return;
-}
-#endif
-
 /* Sort the blocks by the Depth-First-Search */
-static void ComputeDFSOrders(CompilationUnit* cu)
+void MIRGraph::ComputeDFSOrders()
 {
   /* Initialize or reset the DFS pre_order list */
-  if (cu->dfs_order.elem_list == NULL) {
-    CompilerInitGrowableList(cu, &cu->dfs_order, cu->num_blocks,
-                        kListDfsOrder);
+  if (dfs_order_.elem_list == NULL) {
+    CompilerInitGrowableList(cu_, &dfs_order_, GetNumBlocks(), kListDfsOrder);
   } else {
     /* Just reset the used length on the counter */
-    cu->dfs_order.num_used = 0;
+    dfs_order_.num_used = 0;
   }
 
   /* Initialize or reset the DFS post_order list */
-  if (cu->dfs_post_order.elem_list == NULL) {
-    CompilerInitGrowableList(cu, &cu->dfs_post_order, cu->num_blocks,
-                        kListDfsPostOrder);
+  if (dfs_post_order_.elem_list == NULL) {
+    CompilerInitGrowableList(cu_, &dfs_post_order_, GetNumBlocks(), kListDfsPostOrder);
   } else {
     /* Just reset the used length on the counter */
-    cu->dfs_post_order.num_used = 0;
+    dfs_post_order_.num_used = 0;
   }
 
-#if defined(TEST_DFS)
-  // Reset visited flags
-  DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
-                                kAllNodes, false /* is_iterative */);
-  // Record pre and post order dfs
-  RecursiveRecordDFSOrders(cu, cu->entry_block);
-  // Copy the results for later comparison and reset the lists
-  GrowableList recursive_dfs_order;
-  GrowableList recursive_dfs_post_order;
-  CompilerInitGrowableList(cu, &recursive_dfs_order, cu->dfs_order.num_used,
-                      kListDfsOrder);
-  for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
-    InsertGrowableList(cu, &recursive_dfs_order,
-                          cu->dfs_order.elem_list[i]);
-  }
-  cu->dfs_order.num_used = 0;
-  CompilerInitGrowableList(cu, &recursive_dfs_post_order,
-                      cu->dfs_post_order.num_used, kListDfsOrder);
-  for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
-    InsertGrowableList(cu, &recursive_dfs_post_order,
-                          cu->dfs_post_order.elem_list[i]);
-  }
-  cu->dfs_post_order.num_used = 0;
-#endif
-
   // Reset visited flags from all nodes
-  DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
-                                kAllNodes, false /* is_iterative */);
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    ClearVisitedFlag(bb);
+  }
   // Record dfs orders
-  RecordDFSOrders(cu, cu->entry_block);
+  RecordDFSOrders(GetEntryBlock());
 
-#if defined(TEST_DFS)
-  bool mismatch = false;
-  mismatch |= (cu->dfs_order.num_used != recursive_dfs_order.num_used);
-  for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
-    mismatch |= (cu->dfs_order.elem_list[i] !=
-                 recursive_dfs_order.elem_list[i]);
-  }
-  mismatch |= (cu->dfs_post_order.num_used != recursive_dfs_post_order.num_used);
-  for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
-    mismatch |= (cu->dfs_post_order.elem_list[i] !=
-                 recursive_dfs_post_order.elem_list[i]);
-  }
-  if (mismatch) {
-    LOG(INFO) << "Mismatch for "
-              << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "New dfs";
-    for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
-      LOG(INFO) << i << " - " << cu->dfs_order.elem_list[i];
-    }
-    LOG(INFO) << "Recursive dfs";
-    for (unsigned int i = 0; i < recursive_dfs_order.num_used; i++) {
-      LOG(INFO) << i << " - " << recursive_dfs_order.elem_list[i];
-    }
-    LOG(INFO) << "New post dfs";
-    for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
-      LOG(INFO) << i << " - " << cu->dfs_post_order.elem_list[i];
-    }
-    LOG(INFO) << "Recursive post dfs";
-    for (unsigned int i = 0; i < recursive_dfs_post_order.num_used; i++) {
-      LOG(INFO) << i << " - " << recursive_dfs_post_order.elem_list[i];
-    }
-  }
-  CHECK_EQ(cu->dfs_order.num_used, recursive_dfs_order.num_used);
-  for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
-    CHECK_EQ(cu->dfs_order.elem_list[i], recursive_dfs_order.elem_list[i]);
-  }
-  CHECK_EQ(cu->dfs_post_order.num_used, recursive_dfs_post_order.num_used);
-  for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
-    CHECK_EQ(cu->dfs_post_order.elem_list[i],
-             recursive_dfs_post_order.elem_list[i]);
-  }
-#endif
-
-  cu->num_reachable_blocks = cu->dfs_order.num_used;
+  num_reachable_blocks_ = dfs_order_.num_used;
 }
 
 /*
  * Mark block bit on the per-Dalvik register vector to denote that Dalvik
  * register idx is defined in BasicBlock bb.
  */
-static bool FillDefBlockMatrix(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb)
 {
   if (bb->data_flow_info == NULL) return false;
 
@@ -237,68 +122,69 @@
     int idx = BitVectorIteratorNext(&iterator);
     if (idx == -1) break;
     /* Block bb defines register idx */
-    SetBit(cu, cu->def_block_matrix[idx], bb->id);
+    SetBit(cu_, def_block_matrix_[idx], bb->id);
   }
   return true;
 }
 
-static void ComputeDefBlockMatrix(CompilationUnit* cu)
+void MIRGraph::ComputeDefBlockMatrix()
 {
-  int num_registers = cu->num_dalvik_registers;
+  int num_registers = cu_->num_dalvik_registers;
   /* Allocate num_dalvik_registers bit vector pointers */
-  cu->def_block_matrix = static_cast<ArenaBitVector**>
-      (NewMem(cu, sizeof(ArenaBitVector *) * num_registers, true, kAllocDFInfo));
+  def_block_matrix_ = static_cast<ArenaBitVector**>
+      (NewMem(cu_, sizeof(ArenaBitVector *) * num_registers, true, kAllocDFInfo));
   int i;
 
   /* Initialize num_register vectors with num_blocks bits each */
   for (i = 0; i < num_registers; i++) {
-    cu->def_block_matrix[i] = AllocBitVector(cu, cu->num_blocks,
-                                                 false, kBitMapBMatrix);
+    def_block_matrix_[i] = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapBMatrix);
   }
-  DataFlowAnalysisDispatcher(cu, FindLocalLiveIn,
-                                kAllNodes, false /* is_iterative */);
-  DataFlowAnalysisDispatcher(cu, FillDefBlockMatrix,
-                                kAllNodes, false /* is_iterative */);
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    FindLocalLiveIn(bb);
+  }
+  DataflowIterator iter2(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    FillDefBlockMatrix(bb);
+  }
 
   /*
    * Also set the incoming parameters as defs in the entry block.
    * Only need to handle the parameters for the outer method.
    */
-  int num_regs = cu->num_dalvik_registers;
-  int in_reg = num_regs - cu->num_ins;
+  int num_regs = cu_->num_dalvik_registers;
+  int in_reg = num_regs - cu_->num_ins;
   for (; in_reg < num_regs; in_reg++) {
-    SetBit(cu, cu->def_block_matrix[in_reg], cu->entry_block->id);
+    SetBit(cu_, def_block_matrix_[in_reg], GetEntryBlock()->id);
   }
 }
 
 /* Compute the post-order traversal of the CFG */
-static void ComputeDomPostOrderTraversal(CompilationUnit* cu, BasicBlock* bb)
+void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb)
 {
   ArenaBitVectorIterator bv_iterator;
   BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
-  GrowableList* block_list = &cu->block_list;
 
   /* Iterate through the dominated blocks first */
   while (true) {
     //TUNING: hot call to BitVectorIteratorNext
     int bb_idx = BitVectorIteratorNext(&bv_iterator);
     if (bb_idx == -1) break;
-    BasicBlock* dominated_bb =
-        reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, bb_idx));
-    ComputeDomPostOrderTraversal(cu, dominated_bb);
+    BasicBlock* dominated_bb = GetBasicBlock(bb_idx);
+    ComputeDomPostOrderTraversal(dominated_bb);
   }
 
   /* Enter the current block id */
-  InsertGrowableList(cu, &cu->dom_post_order_traversal, bb->id);
+  InsertGrowableList(cu_, &dom_post_order_traversal_, bb->id);
 
   /* hacky loop detection */
   if (bb->taken && IsBitSet(bb->dominators, bb->taken->id)) {
-    cu->has_loop = true;
+    cu_->attributes |= METHOD_HAS_LOOP;
   }
 }
 
-static void CheckForDominanceFrontier(CompilationUnit* cu, BasicBlock* dom_bb,
-                                      const BasicBlock* succ_bb)
+void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
+                                         const BasicBlock* succ_bb)
 {
   /*
    * TODO - evaluate whether phi will ever need to be inserted into exit
@@ -307,21 +193,19 @@
   if (succ_bb->i_dom != dom_bb &&
     succ_bb->block_type == kDalvikByteCode &&
     succ_bb->hidden == false) {
-    SetBit(cu, dom_bb->dom_frontier, succ_bb->id);
+    SetBit(cu_, dom_bb->dom_frontier, succ_bb->id);
   }
 }
 
 /* Worker function to compute the dominance frontier */
-static bool ComputeDominanceFrontier(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb)
 {
-  GrowableList* block_list = &cu->block_list;
-
   /* Calculate DF_local */
   if (bb->taken) {
-    CheckForDominanceFrontier(cu, bb, bb->taken);
+    CheckForDominanceFrontier(bb, bb->taken);
   }
   if (bb->fall_through) {
-    CheckForDominanceFrontier(cu, bb, bb->fall_through);
+    CheckForDominanceFrontier(bb, bb->fall_through);
   }
   if (bb->successor_block_list.block_list_type != kNotUsed) {
     GrowableListIterator iterator;
@@ -332,7 +216,7 @@
             reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
         if (successor_block_info == NULL) break;
         BasicBlock* succ_bb = successor_block_info->block;
-        CheckForDominanceFrontier(cu, bb, succ_bb);
+        CheckForDominanceFrontier(bb, succ_bb);
       }
   }
 
@@ -343,17 +227,15 @@
     //TUNING: hot call to BitVectorIteratorNext
     int dominated_idx = BitVectorIteratorNext(&bv_iterator);
     if (dominated_idx == -1) break;
-    BasicBlock* dominated_bb =
-        reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dominated_idx));
+    BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
     ArenaBitVectorIterator df_iterator;
     BitVectorIteratorInit(dominated_bb->dom_frontier, &df_iterator);
     while (true) {
       //TUNING: hot call to BitVectorIteratorNext
       int df_up_idx = BitVectorIteratorNext(&df_iterator);
       if (df_up_idx == -1) break;
-      BasicBlock* df_up_block =
-          reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, df_up_idx));
-      CheckForDominanceFrontier(cu, bb, df_up_block);
+      BasicBlock* df_up_block = GetBasicBlock(df_up_idx);
+      CheckForDominanceFrontier(bb, df_up_block);
     }
   }
 
@@ -361,18 +243,18 @@
 }
 
 /* Worker function for initializing domination-related data structures */
-static bool InitializeDominationInfo(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::InitializeDominationInfo(BasicBlock* bb)
 {
-  int num_total_blocks = cu->block_list.num_used;
+  int num_total_blocks = GetBasicBlockListCount();
 
   if (bb->dominators == NULL ) {
-    bb->dominators = AllocBitVector(cu, num_total_blocks,
+    bb->dominators = AllocBitVector(cu_, num_total_blocks,
                                        false /* expandable */,
                                        kBitMapDominators);
-    bb->i_dominated = AllocBitVector(cu, num_total_blocks,
+    bb->i_dominated = AllocBitVector(cu_, num_total_blocks,
                                        false /* expandable */,
                                        kBitMapIDominated);
-    bb->dom_frontier = AllocBitVector(cu, num_total_blocks,
+    bb->dom_frontier = AllocBitVector(cu_, num_total_blocks,
                                         false /* expandable */,
                                         kBitMapDomFrontier);
   } else {
@@ -387,104 +269,19 @@
 }
 
 /*
- * Worker function to compute each block's dominators.  This implementation
- * is only used when kDebugVerifyDataflow is active and should compute
- * the same dominator sets as ComputeBlockDominiators.
- */
-static bool SlowComputeBlockDominators(CompilationUnit* cu, BasicBlock* bb)
-{
-  GrowableList* block_list = &cu->block_list;
-  int num_total_blocks = block_list->num_used;
-  ArenaBitVector* temp_block_v = cu->temp_block_v;
-  GrowableListIterator iter;
-
-  /*
-   * The dominator of the entry block has been preset to itself and we need
-   * to skip the calculation here.
-   */
-  if (bb == cu->entry_block) return false;
-
-  SetInitialBits(temp_block_v, num_total_blocks);
-
-  /* Iterate through the predecessors */
-  GrowableListIteratorInit(bb->predecessors, &iter);
-  while (true) {
-    BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
-    if (!pred_bb) break;
-    /* temp_block_v = temp_block_v ^ dominators */
-    if (pred_bb->dominators != NULL) {
-      IntersectBitVectors(temp_block_v, temp_block_v, pred_bb->dominators);
-    }
-  }
-  SetBit(cu, temp_block_v, bb->id);
-  if (CompareBitVectors(temp_block_v, bb->dominators)) {
-    CopyBitVector(bb->dominators, temp_block_v);
-    return true;
-  }
-  return false;
-}
-
-/*
- * Worker function to compute the idom.  This implementation is only
- * used when kDebugVerifyDataflow is active and should compute the
- * same i_dom as ComputeblockIDom.
- */
-static bool SlowComputeBlockIDom(CompilationUnit* cu, BasicBlock* bb)
-{
-  GrowableList* block_list = &cu->block_list;
-  ArenaBitVector* temp_block_v = cu->temp_block_v;
-  ArenaBitVectorIterator bv_iterator;
-  BasicBlock* i_dom;
-
-  if (bb == cu->entry_block) return false;
-
-  CopyBitVector(temp_block_v, bb->dominators);
-  ClearBit(temp_block_v, bb->id);
-  BitVectorIteratorInit(temp_block_v, &bv_iterator);
-
-  /* Should not see any dead block */
-  DCHECK_NE(CountSetBits(temp_block_v),  0);
-  if (CountSetBits(temp_block_v) == 1) {
-    i_dom = reinterpret_cast<BasicBlock*>
-        (GrowableListGetElement(block_list, BitVectorIteratorNext(&bv_iterator)));
-    bb->i_dom = i_dom;
-  } else {
-    int i_dom_idx = BitVectorIteratorNext(&bv_iterator);
-    DCHECK_NE(i_dom_idx, -1);
-    while (true) {
-      int next_dom = BitVectorIteratorNext(&bv_iterator);
-      if (next_dom == -1) break;
-      BasicBlock* next_dom_bb =
-          reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, next_dom));
-      /* i_dom dominates next_dom - set new i_dom */
-      if (IsBitSet(next_dom_bb->dominators, i_dom_idx)) {
-          i_dom_idx = next_dom;
-      }
-
-    }
-    i_dom = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, i_dom_idx));
-    /* Set the immediate dominator block for bb */
-    bb->i_dom = i_dom;
-  }
-  /* Add bb to the i_dominated set of the immediate dominator block */
-  SetBit(cu, i_dom->i_dominated, bb->id);
-  return true;
-}
-
-/*
  * Walk through the ordered i_dom list until we reach common parent.
  * Given the ordering of i_dom_list, this common parent represents the
  * last element of the intersection of block1 and block2 dominators.
   */
-static int FindCommonParent(CompilationUnit *cu, int block1, int block2)
+int MIRGraph::FindCommonParent(int block1, int block2)
 {
   while (block1 != block2) {
     while (block1 < block2) {
-      block1 = cu->i_dom_list[block1];
+      block1 = i_dom_list_[block1];
       DCHECK_NE(block1, NOTVISITED);
     }
     while (block2 < block1) {
-      block2 = cu->i_dom_list[block2];
+      block2 = i_dom_list_[block2];
       DCHECK_NE(block2, NOTVISITED);
     }
   }
@@ -492,13 +289,13 @@
 }
 
 /* Worker function to compute each block's immediate dominator */
-static bool ComputeblockIDom(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::ComputeblockIDom(BasicBlock* bb)
 {
   GrowableListIterator iter;
   int idom = -1;
 
   /* Special-case entry block */
-  if (bb == cu->entry_block) {
+  if (bb == GetEntryBlock()) {
     return false;
   }
 
@@ -509,7 +306,7 @@
   while (true) {
     BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
     CHECK(pred_bb != NULL);
-    if (cu->i_dom_list[pred_bb->dfs_id] != NOTVISITED) {
+    if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
       idom = pred_bb->dfs_id;
       break;
     }
@@ -519,140 +316,132 @@
   while (true) {
       BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
       if (!pred_bb) break;
-      if (cu->i_dom_list[pred_bb->dfs_id] == NOTVISITED) {
+      if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
         continue;
       } else {
-        idom = FindCommonParent(cu, pred_bb->dfs_id, idom);
+        idom = FindCommonParent(pred_bb->dfs_id, idom);
       }
   }
 
   DCHECK_NE(idom, NOTVISITED);
 
   /* Did something change? */
-  if (cu->i_dom_list[bb->dfs_id] != idom) {
-    cu->i_dom_list[bb->dfs_id] = idom;
+  if (i_dom_list_[bb->dfs_id] != idom) {
+    i_dom_list_[bb->dfs_id] = idom;
     return true;
   }
   return false;
 }
 
 /* Worker function to compute each block's domintors */
-static bool ComputeBlockDominiators(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::ComputeBlockDominators(BasicBlock* bb)
 {
-  if (bb == cu->entry_block) {
+  if (bb == GetEntryBlock()) {
     ClearAllBits(bb->dominators);
   } else {
     CopyBitVector(bb->dominators, bb->i_dom->dominators);
   }
-  SetBit(cu, bb->dominators, bb->id);
+  SetBit(cu_, bb->dominators, bb->id);
   return false;
 }
 
-static bool SetDominators(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::SetDominators(BasicBlock* bb)
 {
-  if (bb != cu->entry_block) {
-    int idom_dfs_idx = cu->i_dom_list[bb->dfs_id];
+  if (bb != GetEntryBlock()) {
+    int idom_dfs_idx = i_dom_list_[bb->dfs_id];
     DCHECK_NE(idom_dfs_idx, NOTVISITED);
-    int i_dom_idx = cu->dfs_post_order.elem_list[idom_dfs_idx];
-    BasicBlock* i_dom =
-        reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, i_dom_idx));
-    if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
+    int i_dom_idx = dfs_post_order_.elem_list[idom_dfs_idx];
+    BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
+    if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
       DCHECK_EQ(bb->i_dom->id, i_dom->id);
     }
     bb->i_dom = i_dom;
     /* Add bb to the i_dominated set of the immediate dominator block */
-    SetBit(cu, i_dom->i_dominated, bb->id);
+    SetBit(cu_, i_dom->i_dominated, bb->id);
   }
   return false;
 }
 
 /* Compute dominators, immediate dominator, and dominance fronter */
-static void ComputeDominators(CompilationUnit* cu)
+void MIRGraph::ComputeDominators()
 {
-  int num_reachable_blocks = cu->num_reachable_blocks;
-  int num_total_blocks = cu->block_list.num_used;
+  int num_reachable_blocks = num_reachable_blocks_;
+  int num_total_blocks = GetBasicBlockListCount();
 
   /* Initialize domination-related data structures */
-  DataFlowAnalysisDispatcher(cu, InitializeDominationInfo,
-                                kReachableNodes, false /* is_iterative */);
+  DataflowIterator iter(this, kReachableNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    InitializeDominationInfo(bb);
+  }
 
   /* Initalize & Clear i_dom_list */
-  if (cu->i_dom_list == NULL) {
-    cu->i_dom_list = static_cast<int*>(NewMem(cu, sizeof(int) * num_reachable_blocks,
+  if (i_dom_list_ == NULL) {
+    i_dom_list_ = static_cast<int*>(NewMem(cu_, sizeof(int) * num_reachable_blocks,
                                                false, kAllocDFInfo));
   }
   for (int i = 0; i < num_reachable_blocks; i++) {
-    cu->i_dom_list[i] = NOTVISITED;
+    i_dom_list_[i] = NOTVISITED;
   }
 
   /* For post-order, last block is entry block.  Set its i_dom to istelf */
-  DCHECK_EQ(cu->entry_block->dfs_id, num_reachable_blocks-1);
-  cu->i_dom_list[cu->entry_block->dfs_id] = cu->entry_block->dfs_id;
+  DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
+  i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
 
   /* Compute the immediate dominators */
-  DataFlowAnalysisDispatcher(cu, ComputeblockIDom,
-                                kReversePostOrderTraversal,
-                                true /* is_iterative */);
+  DataflowIterator iter2(this, kReversePostOrderTraversal, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+    change = ComputeblockIDom(bb);
+  }
 
   /* Set the dominator for the root node */
-  ClearAllBits(cu->entry_block->dominators);
-  SetBit(cu, cu->entry_block->dominators, cu->entry_block->id);
+  ClearAllBits(GetEntryBlock()->dominators);
+  SetBit(cu_, GetEntryBlock()->dominators, GetEntryBlock()->id);
 
-  if (cu->temp_block_v == NULL) {
-    cu->temp_block_v = AllocBitVector(cu, num_total_blocks,
-                                          false /* expandable */,
-                                          kBitMapTmpBlockV);
+  if (temp_block_v_ == NULL) {
+    temp_block_v_ = AllocBitVector(cu_, num_total_blocks, false /* expandable */, kBitMapTmpBlockV);
   } else {
-    ClearAllBits(cu->temp_block_v);
+    ClearAllBits(temp_block_v_);
   }
-  cu->entry_block->i_dom = NULL;
+  GetEntryBlock()->i_dom = NULL;
 
-  /* For testing, compute sets using alternate mechanism */
-  if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
-    // Use alternate mechanism to compute dominators for comparison
-    DataFlowAnalysisDispatcher(cu, SlowComputeBlockDominators,
-                                  kPreOrderDFSTraversal,
-                                  true /* is_iterative */);
-
-   DataFlowAnalysisDispatcher(cu, SlowComputeBlockIDom,
-                                 kReachableNodes,
-                                 false /* is_iterative */);
+  DataflowIterator iter3(this, kReachableNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+    SetDominators(bb);
   }
 
-  DataFlowAnalysisDispatcher(cu, SetDominators,
-                                kReachableNodes,
-                                false /* is_iterative */);
-
-  DataFlowAnalysisDispatcher(cu, ComputeBlockDominiators,
-                                kReversePostOrderTraversal,
-                                false /* is_iterative */);
+  DataflowIterator iter4(this, kReversePostOrderTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+    ComputeBlockDominators(bb);
+  }
 
   /*
    * Now go ahead and compute the post order traversal based on the
    * i_dominated sets.
    */
-  if (cu->dom_post_order_traversal.elem_list == NULL) {
-    CompilerInitGrowableList(cu, &cu->dom_post_order_traversal,
+  if (dom_post_order_traversal_.elem_list == NULL) {
+    CompilerInitGrowableList(cu_, &dom_post_order_traversal_,
                         num_reachable_blocks, kListDomPostOrderTraversal);
   } else {
-    cu->dom_post_order_traversal.num_used = 0;
+    dom_post_order_traversal_.num_used = 0;
   }
 
-  ComputeDomPostOrderTraversal(cu, cu->entry_block);
-  DCHECK_EQ(cu->dom_post_order_traversal.num_used, static_cast<unsigned>(cu->num_reachable_blocks));
+  ComputeDomPostOrderTraversal(GetEntryBlock());
+  DCHECK_EQ(dom_post_order_traversal_.num_used, static_cast<unsigned>(num_reachable_blocks_));
 
   /* Now compute the dominance frontier for each block */
-  DataFlowAnalysisDispatcher(cu, ComputeDominanceFrontier,
-                                        kPostOrderDOMTraversal,
-                                        false /* is_iterative */);
+  DataflowIterator iter5(this, kPostOrderDOMTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+    ComputeDominanceFrontier(bb);
+  }
 }
 
 /*
  * Perform dest U= src1 ^ ~src2
  * This is probably not general enough to be placed in BitVector.[ch].
  */
-static void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
-                              const ArenaBitVector* src2)
+void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
+                                 const ArenaBitVector* src2)
 {
   if (dest->storage_size != src1->storage_size ||
     dest->storage_size != src2->storage_size ||
@@ -672,9 +461,9 @@
  * The calculated result is used for phi-node pruning - where we only need to
  * insert a phi node if the variable is live-in to the block.
  */
-static bool ComputeBlockLiveIns(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb)
 {
-  ArenaBitVector* temp_dalvik_register_v = cu->temp_dalvik_register_v;
+  ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
 
   if (bb->data_flow_info == NULL) return false;
   CopyBitVector(temp_dalvik_register_v, bb->data_flow_info->live_in_v);
@@ -682,8 +471,7 @@
     ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
   if (bb->fall_through && bb->fall_through->data_flow_info)
-    ComputeSuccLineIn(temp_dalvik_register_v,
-                      bb->fall_through->data_flow_info->live_in_v,
+    ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
   if (bb->successor_block_list.block_list_type != kNotUsed) {
     GrowableListIterator iterator;
@@ -695,8 +483,7 @@
       if (successor_block_info == NULL) break;
       BasicBlock* succ_bb = successor_block_info->block;
       if (succ_bb->data_flow_info) {
-        ComputeSuccLineIn(temp_dalvik_register_v,
-                          succ_bb->data_flow_info->live_in_v,
+        ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
                           bb->data_flow_info->def_v);
       }
     }
@@ -709,30 +496,28 @@
 }
 
 /* Insert phi nodes to for each variable to the dominance frontiers */
-static void InsertPhiNodes(CompilationUnit* cu)
+void MIRGraph::InsertPhiNodes()
 {
   int dalvik_reg;
-  const GrowableList* block_list = &cu->block_list;
-  ArenaBitVector* phi_blocks =
-      AllocBitVector(cu, cu->num_blocks, false, kBitMapPhi);
-  ArenaBitVector* tmp_blocks =
-      AllocBitVector(cu, cu->num_blocks, false, kBitMapTmpBlocks);
-  ArenaBitVector* input_blocks =
-      AllocBitVector(cu, cu->num_blocks, false, kBitMapInputBlocks);
+  ArenaBitVector* phi_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapPhi);
+  ArenaBitVector* tmp_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapTmpBlocks);
+  ArenaBitVector* input_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapInputBlocks);
 
-  cu->temp_dalvik_register_v =
-      AllocBitVector(cu, cu->num_dalvik_registers, false,
-                        kBitMapRegisterV);
+  temp_dalvik_register_v_ =
+      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
 
-  DataFlowAnalysisDispatcher(cu, ComputeBlockLiveIns,
-                                kPostOrderDFSTraversal, true /* is_iterative */);
+  DataflowIterator iter(this, kPostOrderDFSTraversal, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+    change = ComputeBlockLiveIns(bb);
+  }
 
   /* Iterate through each Dalvik register */
-  for (dalvik_reg = cu->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
+  for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
     bool change;
     ArenaBitVectorIterator iterator;
 
-    CopyBitVector(input_blocks, cu->def_block_matrix[dalvik_reg]);
+    CopyBitVector(input_blocks, def_block_matrix_[dalvik_reg]);
     ClearAllBits(phi_blocks);
 
     /* Calculate the phi blocks for each Dalvik register */
@@ -744,8 +529,7 @@
       while (true) {
         int idx = BitVectorIteratorNext(&iterator);
         if (idx == -1) break;
-          BasicBlock* def_bb =
-              reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
+          BasicBlock* def_bb = GetBasicBlock(idx);
 
           /* Merge the dominance frontier to tmp_blocks */
           //TUNING: hot call to UnifyBitVetors
@@ -762,8 +546,7 @@
            * the dominance frontier.
            */
           CopyBitVector(input_blocks, phi_blocks);
-          UnifyBitVetors(input_blocks, input_blocks,
-                             cu->def_block_matrix[dalvik_reg]);
+          UnifyBitVetors(input_blocks, input_blocks, def_block_matrix_[dalvik_reg]);
       }
     } while (change);
 
@@ -775,14 +558,14 @@
     while (true) {
       int idx = BitVectorIteratorNext(&iterator);
       if (idx == -1) break;
-      BasicBlock* phi_bb =
-          reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
+      BasicBlock* phi_bb = GetBasicBlock(idx);
       /* Variable will be clobbered before being used - no need for phi */
       if (!IsBitSet(phi_bb->data_flow_info->live_in_v, dalvik_reg)) continue;
-      MIR *phi = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocDFInfo));
+      MIR *phi = static_cast<MIR*>(NewMem(cu_, sizeof(MIR), true, kAllocDFInfo));
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
       phi->dalvikInsn.vA = dalvik_reg;
       phi->offset = phi_bb->start_offset;
+      phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
       PrependMIR(phi_bb, phi);
     }
   }
@@ -792,7 +575,7 @@
  * Worker function to insert phi-operands with latest SSA names from
  * predecessor blocks
  */
-static bool InsertPhiNodeOperands(CompilationUnit* cu, BasicBlock* bb)
+bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb)
 {
   GrowableListIterator iter;
   MIR *mir;
@@ -805,7 +588,7 @@
       return true;
     int ssa_reg = mir->ssa_rep->defs[0];
     DCHECK_GE(ssa_reg, 0);   // Shouldn't see compiler temps here
-    int v_reg = SRegToVReg(cu, ssa_reg);
+    int v_reg = SRegToVReg(ssa_reg);
 
     uses.clear();
     incoming_arc.clear();
@@ -825,11 +608,11 @@
     int num_uses = uses.size();
     mir->ssa_rep->num_uses = num_uses;
     mir->ssa_rep->uses =
-        static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
+        static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false, kAllocDFInfo));
     mir->ssa_rep->fp_use =
-        static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true, kAllocDFInfo));
+        static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true, kAllocDFInfo));
     int* incoming =
-        static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
+        static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false, kAllocDFInfo));
     // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
     mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
 
@@ -844,29 +627,29 @@
   return true;
 }
 
-static void DoDFSPreOrderSSARename(CompilationUnit* cu, BasicBlock* block)
+void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block)
 {
 
   if (block->visited || block->hidden) return;
   block->visited = true;
 
   /* Process this block */
-  DoSSAConversion(cu, block);
-  int map_size = sizeof(int) * cu->num_dalvik_registers;
+  DoSSAConversion(block);
+  int map_size = sizeof(int) * cu_->num_dalvik_registers;
 
   /* Save SSA map snapshot */
-  int* saved_ssa_map = static_cast<int*>(NewMem(cu, map_size, false, kAllocDalvikToSSAMap));
-  memcpy(saved_ssa_map, cu->vreg_to_ssa_map, map_size);
+  int* saved_ssa_map = static_cast<int*>(NewMem(cu_, map_size, false, kAllocDalvikToSSAMap));
+  memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
 
   if (block->fall_through) {
-    DoDFSPreOrderSSARename(cu, block->fall_through);
+    DoDFSPreOrderSSARename(block->fall_through);
     /* Restore SSA map snapshot */
-    memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+    memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
   }
   if (block->taken) {
-    DoDFSPreOrderSSARename(cu, block->taken);
+    DoDFSPreOrderSSARename(block->taken);
     /* Restore SSA map snapshot */
-    memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+    memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
   }
   if (block->successor_block_list.block_list_type != kNotUsed) {
     GrowableListIterator iterator;
@@ -876,56 +659,59 @@
           reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
       if (successor_block_info == NULL) break;
       BasicBlock* succ_bb = successor_block_info->block;
-      DoDFSPreOrderSSARename(cu, succ_bb);
+      DoDFSPreOrderSSARename(succ_bb);
       /* Restore SSA map snapshot */
-      memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+      memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
     }
   }
-  cu->vreg_to_ssa_map = saved_ssa_map;
+  vreg_to_ssa_map_ = saved_ssa_map;
   return;
 }
 
 /* Perform SSA transformation for the whole method */
-void SSATransformation(CompilationUnit* cu)
+void MIRGraph::SSATransformation()
 {
   /* Compute the DFS order */
-  ComputeDFSOrders(cu);
+  ComputeDFSOrders();
 
-  if (!cu->disable_dataflow) {
+  if (!cu_->disable_dataflow) {
     /* Compute the dominator info */
-    ComputeDominators(cu);
+    ComputeDominators();
   }
 
   /* Allocate data structures in preparation for SSA conversion */
-  CompilerInitializeSSAConversion(cu);
+  CompilerInitializeSSAConversion();
 
-  if (!cu->disable_dataflow) {
+  if (!cu_->disable_dataflow) {
     /* Find out the "Dalvik reg def x block" relation */
-    ComputeDefBlockMatrix(cu);
+    ComputeDefBlockMatrix();
 
     /* Insert phi nodes to dominance frontiers for all variables */
-    InsertPhiNodes(cu);
+    InsertPhiNodes();
   }
 
   /* Rename register names by local defs and phi nodes */
-  DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
-                                kAllNodes, false /* is_iterative */);
-  DoDFSPreOrderSSARename(cu, cu->entry_block);
+  DataflowIterator iter(this, kAllNodes, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    ClearVisitedFlag(bb);
+  }
+  DoDFSPreOrderSSARename(GetEntryBlock());
 
-  if (!cu->disable_dataflow) {
+  if (!cu_->disable_dataflow) {
     /*
      * Shared temp bit vector used by each block to count the number of defs
      * from all the predecessor blocks.
      */
-    cu->temp_ssa_register_v = AllocBitVector(cu, cu->num_ssa_regs,
-         false, kBitMapTempSSARegisterV);
-
-    cu->temp_ssa_block_id_v =
-        static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_ssa_regs, false, kAllocDFInfo));
+    temp_ssa_register_v_ = AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
 
     /* Insert phi-operands with latest SSA names from predecessor blocks */
-    DataFlowAnalysisDispatcher(cu, InsertPhiNodeOperands,
-                                  kReachableNodes, false /* is_iterative */);
+    DataflowIterator iter(this, kReachableNodes, false /* not iterative */);
+    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+      InsertPhiNodeOperands(bb);
+    }
+  }
+  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
+    DumpCFG("/sdcard/3_post_ssa_cfg/", false);
   }
 }
 
diff --git a/src/compiler/dex/ssa_transformation.h b/src/compiler/dex/ssa_transformation.h
deleted file mode 100644
index 92f7c0e..0000000
--- a/src/compiler/dex/ssa_transformation.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_SSATRANSFORMATION_H_
-#define ART_SRC_COMPILER_DEX_SSATRANSFORMATION_H_
-
-#include "compiler_internals.h"
-
-namespace art {
-
-void SSATransformation(CompilationUnit* cu);
-
-}  // namespace art
-
-#endif  // ART_SRC_COMPILER_DEX_DATAFLOW_H_
diff --git a/src/compiler/dex/vreg_analysis.cc b/src/compiler/dex/vreg_analysis.cc
new file mode 100644
index 0000000..4f516fe
--- /dev/null
+++ b/src/compiler/dex/vreg_analysis.cc
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dataflow_iterator.h"
+#include "quick/ralloc_util.h"
+
+namespace art {
+
+static bool SetFp(CompilationUnit* cu, int index, bool is_fp) {
+  bool change = false;
+  if (is_fp && !cu->reg_location[index].fp) {
+    cu->reg_location[index].fp = true;
+    cu->reg_location[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+static bool SetCore(CompilationUnit* cu, int index, bool is_core) {
+  bool change = false;
+  if (is_core && !cu->reg_location[index].defined) {
+    cu->reg_location[index].core = true;
+    cu->reg_location[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+static bool SetRef(CompilationUnit* cu, int index, bool is_ref) {
+  bool change = false;
+  if (is_ref && !cu->reg_location[index].defined) {
+    cu->reg_location[index].ref = true;
+    cu->reg_location[index].defined = true;
+    change = true;
+  }
+  return change;
+}
+
+static bool SetWide(CompilationUnit* cu, int index, bool is_wide) {
+  bool change = false;
+  if (is_wide && !cu->reg_location[index].wide) {
+    cu->reg_location[index].wide = true;
+    change = true;
+  }
+  return change;
+}
+
+static bool SetHigh(CompilationUnit* cu, int index, bool is_high) {
+  bool change = false;
+  if (is_high && !cu->reg_location[index].high_word) {
+    cu->reg_location[index].high_word = true;
+    change = true;
+  }
+  return change;
+}
+
+/*
+ * Infer types and sizes.  We don't need to track change on sizes,
+ * as it doesn't propagate.  We're guaranteed at least one pass through
+ * the cfg.
+ */
+bool MIRGraph::InferTypeAndSize(BasicBlock* bb)
+{
+  MIR *mir;
+  bool changed = false;   // Did anything change?
+
+  if (bb->data_flow_info == NULL) return false;
+  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock)
+    return false;
+
+  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    SSARepresentation *ssa_rep = mir->ssa_rep;
+    if (ssa_rep) {
+      int attrs = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+      // Handle defs
+      if (attrs & DF_DA) {
+        if (attrs & DF_CORE_A) {
+          changed |= SetCore(cu_, ssa_rep->defs[0], true);
+        }
+        if (attrs & DF_REF_A) {
+          changed |= SetRef(cu_, ssa_rep->defs[0], true);
+        }
+        if (attrs & DF_A_WIDE) {
+          cu_->reg_location[ssa_rep->defs[0]].wide = true;
+          cu_->reg_location[ssa_rep->defs[1]].wide = true;
+          cu_->reg_location[ssa_rep->defs[1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->defs[0])+1,
+          SRegToVReg(ssa_rep->defs[1]));
+        }
+      }
+
+      // Handles uses
+      int next = 0;
+      if (attrs & DF_UA) {
+        if (attrs & DF_CORE_A) {
+          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_A) {
+          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_A_WIDE) {
+          cu_->reg_location[ssa_rep->uses[next]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+          SRegToVReg(ssa_rep->uses[next + 1]));
+          next += 2;
+        } else {
+          next++;
+        }
+      }
+      if (attrs & DF_UB) {
+        if (attrs & DF_CORE_B) {
+          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_B) {
+          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_B_WIDE) {
+          cu_->reg_location[ssa_rep->uses[next]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+                               SRegToVReg(ssa_rep->uses[next + 1]));
+          next += 2;
+        } else {
+          next++;
+        }
+      }
+      if (attrs & DF_UC) {
+        if (attrs & DF_CORE_C) {
+          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_REF_C) {
+          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+        }
+        if (attrs & DF_C_WIDE) {
+          cu_->reg_location[ssa_rep->uses[next]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
+          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
+          SRegToVReg(ssa_rep->uses[next + 1]));
+        }
+      }
+
+      // Special-case return handling
+      if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
+          (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
+          (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
+        switch(cu_->shorty[0]) {
+            case 'I':
+              changed |= SetCore(cu_, ssa_rep->uses[0], true);
+              break;
+            case 'J':
+              changed |= SetCore(cu_, ssa_rep->uses[0], true);
+              changed |= SetCore(cu_, ssa_rep->uses[1], true);
+              cu_->reg_location[ssa_rep->uses[0]].wide = true;
+              cu_->reg_location[ssa_rep->uses[1]].wide = true;
+              cu_->reg_location[ssa_rep->uses[1]].high_word = true;
+              break;
+            case 'F':
+              changed |= SetFp(cu_, ssa_rep->uses[0], true);
+              break;
+            case 'D':
+              changed |= SetFp(cu_, ssa_rep->uses[0], true);
+              changed |= SetFp(cu_, ssa_rep->uses[1], true);
+              cu_->reg_location[ssa_rep->uses[0]].wide = true;
+              cu_->reg_location[ssa_rep->uses[1]].wide = true;
+              cu_->reg_location[ssa_rep->uses[1]].high_word = true;
+              break;
+            case 'L':
+              changed |= SetRef(cu_, ssa_rep->uses[0], true);
+              break;
+            default: break;
+        }
+      }
+
+      // Special-case handling for format 35c/3rc invokes
+      Instruction::Code opcode = mir->dalvikInsn.opcode;
+      int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
+          ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
+      if ((flags & Instruction::kInvoke) &&
+          (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
+        DCHECK_EQ(next, 0);
+        int target_idx = mir->dalvikInsn.vB;
+        const char* shorty = GetShortyFromTargetIdx(cu_, target_idx);
+        // Handle result type if floating point
+        if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
+          MIR* move_result_mir = FindMoveResult(bb, mir);
+          // Result might not be used at all, so no move-result
+          if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
+              Instruction::MOVE_RESULT_OBJECT)) {
+            SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
+            DCHECK(tgt_rep != NULL);
+            tgt_rep->fp_def[0] = true;
+            changed |= SetFp(cu_, tgt_rep->defs[0], true);
+            if (shorty[0] == 'D') {
+              tgt_rep->fp_def[1] = true;
+              changed |= SetFp(cu_, tgt_rep->defs[1], true);
+            }
+          }
+        }
+        int num_uses = mir->dalvikInsn.vA;
+        // If this is a non-static invoke, mark implicit "this"
+        if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
+            (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
+          cu_->reg_location[ssa_rep->uses[next]].defined = true;
+          cu_->reg_location[ssa_rep->uses[next]].ref = true;
+          next++;
+        }
+        uint32_t cpos = 1;
+        if (strlen(shorty) > 1) {
+          for (int i = next; i < num_uses;) {
+            DCHECK_LT(cpos, strlen(shorty));
+            switch (shorty[cpos++]) {
+              case 'D':
+                ssa_rep->fp_use[i] = true;
+                ssa_rep->fp_use[i+1] = true;
+                cu_->reg_location[ssa_rep->uses[i]].wide = true;
+                cu_->reg_location[ssa_rep->uses[i+1]].wide = true;
+                cu_->reg_location[ssa_rep->uses[i+1]].high_word = true;
+                DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+                i++;
+                break;
+              case 'J':
+                cu_->reg_location[ssa_rep->uses[i]].wide = true;
+                cu_->reg_location[ssa_rep->uses[i+1]].wide = true;
+                cu_->reg_location[ssa_rep->uses[i+1]].high_word = true;
+                DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+                changed |= SetCore(cu_, ssa_rep->uses[i],true);
+                i++;
+                break;
+              case 'F':
+                ssa_rep->fp_use[i] = true;
+                break;
+              case 'L':
+                changed |= SetRef(cu_,ssa_rep->uses[i], true);
+                break;
+              default:
+                changed |= SetCore(cu_,ssa_rep->uses[i], true);
+                break;
+            }
+            i++;
+          }
+        }
+      }
+
+      for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
+        if (ssa_rep->fp_use[i])
+          changed |= SetFp(cu_, ssa_rep->uses[i], true);
+        }
+      for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
+        if (ssa_rep->fp_def[i])
+          changed |= SetFp(cu_, ssa_rep->defs[i], true);
+        }
+      // Special-case handling for moves & Phi
+      if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
+        /*
+         * If any of our inputs or outputs is defined, set all.
+         * Some ugliness related to Phi nodes and wide values.
+         * The Phi set will include all low words or all high
+         * words, so we have to treat them specially.
+         */
+        bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
+                      kMirOpPhi);
+        RegLocation rl_temp = cu_->reg_location[ssa_rep->defs[0]];
+        bool defined_fp = rl_temp.defined && rl_temp.fp;
+        bool defined_core = rl_temp.defined && rl_temp.core;
+        bool defined_ref = rl_temp.defined && rl_temp.ref;
+        bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
+        bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
+        for (int i = 0; i < ssa_rep->num_uses;i++) {
+          rl_temp = cu_->reg_location[ssa_rep->uses[i]];
+          defined_fp |= rl_temp.defined && rl_temp.fp;
+          defined_core |= rl_temp.defined && rl_temp.core;
+          defined_ref |= rl_temp.defined && rl_temp.ref;
+          is_wide |= rl_temp.wide;
+          is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
+        }
+        /*
+         * TODO: cleaner fix
+         * We don't normally expect to see a Dalvik register
+         * definition used both as a floating point and core
+         * value.  However, the instruction rewriting that occurs
+         * during verification can eliminate some type information,
+         * leaving us confused.  The real fix here is either to
+         * add explicit type information to Dalvik byte codes,
+         * or to recognize THROW_VERIFICATION_ERROR as
+         * an unconditional branch and support dead code elimination.
+         * As a workaround we can detect this situation and
+         * disable register promotion (which is the only thing that
+         * relies on distinctions between core and fp usages.
+         */
+        if ((defined_fp && (defined_core | defined_ref)) &&
+            ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
+          LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                       << " op at block " << bb->id
+                       << " has both fp and core/ref uses for same def.";
+          cu_->disable_opt |= (1 << kPromoteRegs);
+        }
+        changed |= SetFp(cu_, ssa_rep->defs[0], defined_fp);
+        changed |= SetCore(cu_, ssa_rep->defs[0], defined_core);
+        changed |= SetRef(cu_, ssa_rep->defs[0], defined_ref);
+        changed |= SetWide(cu_, ssa_rep->defs[0], is_wide);
+        changed |= SetHigh(cu_, ssa_rep->defs[0], is_high);
+        if (attrs & DF_A_WIDE) {
+          changed |= SetWide(cu_, ssa_rep->defs[1], true);
+          changed |= SetHigh(cu_, ssa_rep->defs[1], true);
+        }
+        for (int i = 0; i < ssa_rep->num_uses; i++) {
+          changed |= SetFp(cu_, ssa_rep->uses[i], defined_fp);
+          changed |= SetCore(cu_, ssa_rep->uses[i], defined_core);
+          changed |= SetRef(cu_, ssa_rep->uses[i], defined_ref);
+          changed |= SetWide(cu_, ssa_rep->uses[i], is_wide);
+          changed |= SetHigh(cu_, ssa_rep->uses[i], is_high);
+        }
+        if (attrs & DF_A_WIDE) {
+          DCHECK_EQ(ssa_rep->num_uses, 2);
+          changed |= SetWide(cu_, ssa_rep->uses[1], true);
+          changed |= SetHigh(cu_, ssa_rep->uses[1], true);
+        }
+      }
+    }
+  }
+  return changed;
+}
+
+static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
+
+void MIRGraph::DumpRegLocTable(RegLocation* table, int count)
+{
+  Codegen* cg = cu_->cg.get();
+  if (cg != NULL) {
+    for (int i = 0; i < count; i++) {
+      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
+          table[i].orig_sreg, storage_name[table[i].location],
+          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+          table[i].is_const ? 'c' : 'n',
+          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+          cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
+          table[i].low_reg & cg->FpRegMask(),
+          cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
+          table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
+    }
+  } else {
+    // Either pre-regalloc or Portable.
+    for (int i = 0; i < count; i++) {
+      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c S%d",
+          table[i].orig_sreg, storage_name[table[i].location],
+          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+          table[i].is_const ? 'c' : 'n',
+          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+          table[i].s_reg_low);
+    }
+  }
+}
+
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+                                     INVALID_REG, INVALID_REG, INVALID_SREG,
+                                     INVALID_SREG};
+
+int MIRGraph::ComputeFrameSize() {
+  /* Figure out the frame size */
+  static const uint32_t kAlignMask = kStackAlignment - 1;
+  uint32_t size = (cu_->num_core_spills + cu_->num_fp_spills +
+                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
+                   cu_->num_compiler_temps + 1 /* cur_method* */)
+                   * sizeof(uint32_t);
+  /* Align and set */
+  return (size + kAlignMask) & ~(kAlignMask);
+}
+
+/*
+ * Simple register allocation.  Some Dalvik virtual registers may
+ * be promoted to physical registers.  Most of the work for temp
+ * allocation is done on the fly.  We also do some initialization and
+ * type inference here.
+ */
+void MIRGraph::BuildRegLocations()
+{
+  int i;
+  RegLocation* loc;
+
+  /* Allocate the location map */
+  loc = static_cast<RegLocation*>(NewMem(cu_, GetNumSSARegs() * sizeof(*loc),
+                                  true, kAllocRegAlloc));
+  for (i=0; i < GetNumSSARegs(); i++) {
+    loc[i] = fresh_loc;
+    loc[i].s_reg_low = i;
+    loc[i].is_const = IsBitSet(is_constant_v_, i);
+  }
+
+  /* Patch up the locations for Method* and the compiler temps */
+  loc[cu_->method_sreg].location = kLocCompilerTemp;
+  loc[cu_->method_sreg].defined = true;
+  for (i = 0; i < cu_->num_compiler_temps; i++) {
+    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu_->compiler_temps.elem_list[i]);
+    loc[ct->s_reg].location = kLocCompilerTemp;
+    loc[ct->s_reg].defined = true;
+  }
+
+  cu_->reg_location = loc;
+
+  /* Allocation the promotion map */
+  int num_regs = cu_->num_dalvik_registers;
+  cu_->promotion_map = static_cast<PromotionMap*>
+      (NewMem(cu_, (num_regs + cu_->num_compiler_temps + 1) * sizeof(cu_->promotion_map[0]),
+              true, kAllocRegAlloc));
+
+  /* Add types of incoming arguments based on signature */
+  int num_ins = cu_->num_ins;
+  if (num_ins > 0) {
+    int s_reg = num_regs - num_ins;
+    if ((cu_->access_flags & kAccStatic) == 0) {
+      // For non-static, skip past "this"
+      cu_->reg_location[s_reg].defined = true;
+      cu_->reg_location[s_reg].ref = true;
+      s_reg++;
+    }
+    const char* shorty = cu_->shorty;
+    int shorty_len = strlen(shorty);
+    for (int i = 1; i < shorty_len; i++) {
+      switch (shorty[i]) {
+        case 'D':
+          cu_->reg_location[s_reg].wide = true;
+          cu_->reg_location[s_reg+1].high_word = true;
+          cu_->reg_location[s_reg+1].fp = true;
+          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
+          cu_->reg_location[s_reg].fp = true;
+          cu_->reg_location[s_reg].defined = true;
+          s_reg++;
+          break;
+        case 'J':
+          cu_->reg_location[s_reg].wide = true;
+          cu_->reg_location[s_reg+1].high_word = true;
+          DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
+          cu_->reg_location[s_reg].core = true;
+          cu_->reg_location[s_reg].defined = true;
+          s_reg++;
+          break;
+        case 'F':
+          cu_->reg_location[s_reg].fp = true;
+          cu_->reg_location[s_reg].defined = true;
+          break;
+        case 'L':
+          cu_->reg_location[s_reg].ref = true;
+          cu_->reg_location[s_reg].defined = true;
+          break;
+        default:
+          cu_->reg_location[s_reg].core = true;
+          cu_->reg_location[s_reg].defined = true;
+          break;
+        }
+        s_reg++;
+      }
+  }
+
+  /* Do type & size inference pass */
+  DataflowIterator iter(this, kPreOrderDFSTraversal, true /* iterative */);
+  bool change = false;
+  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+    change = InferTypeAndSize(bb);
+  }
+
+  /*
+   * Set the s_reg_low field to refer to the pre-SSA name of the
+   * base Dalvik virtual register.  Once we add a better register
+   * allocator, remove this remapping.
+   */
+  for (i=0; i < GetNumSSARegs(); i++) {
+    if (cu_->reg_location[i].location != kLocCompilerTemp) {
+      int orig_sreg = cu_->reg_location[i].s_reg_low;
+      cu_->reg_location[i].orig_sreg = orig_sreg;
+      cu_->reg_location[i].s_reg_low = SRegToVReg(orig_sreg);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/src/compiler/driver/compiler_driver.h b/src/compiler/driver/compiler_driver.h
index 139bcd1..c87d6f7 100644
--- a/src/compiler/driver/compiler_driver.h
+++ b/src/compiler/driver/compiler_driver.h
@@ -41,7 +41,6 @@
 
 enum CompilerBackend {
   kQuick,
-  kQuickGBC,
   kPortable
 };
 
diff --git a/src/compiler/driver/dex_compilation_unit.cc b/src/compiler/driver/dex_compilation_unit.cc
index 67987fa..962df42 100644
--- a/src/compiler/driver/dex_compilation_unit.cc
+++ b/src/compiler/driver/dex_compilation_unit.cc
@@ -18,6 +18,7 @@
 
 #include "base/stringprintf.h"
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/mir_graph.h"
 #include "utils.h"
 
 namespace art {
diff --git a/src/compiler/llvm/gbc_expander.cc b/src/compiler/llvm/gbc_expander.cc
index 9b71694..c616673 100644
--- a/src/compiler/llvm/gbc_expander.cc
+++ b/src/compiler/llvm/gbc_expander.cc
@@ -26,6 +26,7 @@
 #include "utils_llvm.h"
 #include "verifier/method_verifier.h"
 
+#include "compiler/dex/mir_graph.h"
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/quick/codegen.h"
 using art::kMIRIgnoreNullCheck;
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index 2f9d579..ecba628 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -761,8 +761,6 @@
       StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
       if (backend_str == "Quick") {
         compiler_backend = kQuick;
-      } else if (backend_str == "QuickGBC") {
-        compiler_backend = kQuickGBC;
       } else if (backend_str == "Portable") {
         compiler_backend = kPortable;
       }