Quick compiler: Single .so for all targets

With this CL, all targets can be built into a single .so (but
we're not yet doing so - the compiler driver needs to be reworked).

A new Codgen class is introduced (see compiler/codegen/codegen.h),
along with target-specific sub-classes ArmCodegen, MipsCodegens and
X86Codegen (see compiler/codegen/*/codegen_[Arm|Mips|X86].h).

Additional minor code, comment and format refactoring.  Some source
files combined, temporary header files deleted and a few file
renames to better identify their function.

Next up is combining the Quick and Portable .so files.

Note: building all targets into libdvm-compiler.so increases its
size by 140K bytes.  I'm inclined to not bother introducing conditional
compilation to limit code to the specific target - the added build and
testing complexity doesn't doesn't seem worth such a modest size savings.

Change-Id: Id9c5b4502ad6b77cdb31f71d3126f51a4f2e9dfe
diff --git a/build/Android.libart-compiler-llvm.mk b/build/Android.libart-compiler-llvm.mk
index b278310..c072792 100644
--- a/build/Android.libart-compiler-llvm.mk
+++ b/build/Android.libart-compiler-llvm.mk
@@ -39,7 +39,6 @@
   LIBART_COMPILER_LLVM_SRC_FILES += \
 	src/compiler/dataflow.cc \
 	src/compiler/frontend.cc \
-	src/compiler/intermediate_rep.cc \
 	src/compiler/ralloc.cc \
 	src/compiler/ssa_transformation.cc \
 	src/compiler/compiler_utility.cc \
@@ -48,8 +47,8 @@
 	src/compiler/codegen/gen_loadstore.cc \
 	src/compiler/codegen/gen_common.cc \
 	src/compiler/codegen/gen_invoke.cc \
-	src/compiler/codegen/method_bitcode.cc \
-	src/compiler/codegen/method_codegen_driver.cc \
+	src/compiler/codegen/mir_to_gbc.cc \
+	src/compiler/codegen/mir_to_lir.cc \
 	src/compiler/codegen/local_optimizations.cc \
 	src/compiler/codegen/arm/target_arm.cc \
 	src/compiler/codegen/arm/assemble_arm.cc \
@@ -57,6 +56,18 @@
 	src/compiler/codegen/arm/call_arm.cc \
 	src/compiler/codegen/arm/fp_arm.cc \
 	src/compiler/codegen/arm/int_arm.cc \
+	src/compiler/codegen/mips/target_mips.cc \
+	src/compiler/codegen/mips/assemble_mips.cc \
+	src/compiler/codegen/mips/utility_mips.cc \
+	src/compiler/codegen/mips/call_mips.cc \
+	src/compiler/codegen/mips/fp_mips.cc \
+	src/compiler/codegen/mips/int_mips.cc \
+	src/compiler/codegen/x86/target_x86.cc \
+	src/compiler/codegen/x86/assemble_x86.cc \
+	src/compiler/codegen/x86/utility_x86.cc \
+	src/compiler/codegen/x86/call_x86.cc \
+	src/compiler/codegen/x86/fp_x86.cc \
+	src/compiler/codegen/x86/int_x86.cc \
 	src/compiler_llvm/dalvik_reg.cc \
 	src/compiler_llvm/gbc_expander.cc \
 	src/compiler_llvm/method_compiler.cc \
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index c940a42..3d12527 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -17,7 +17,6 @@
 LIBART_COMPILER_COMMON_SRC_FILES += \
 	src/compiler/dataflow.cc \
 	src/compiler/frontend.cc \
-	src/compiler/intermediate_rep.cc \
 	src/compiler/ralloc.cc \
 	src/compiler/ssa_transformation.cc \
 	src/compiler/compiler_utility.cc \
@@ -26,8 +25,8 @@
 	src/compiler/codegen/gen_loadstore.cc \
 	src/compiler/codegen/gen_common.cc \
 	src/compiler/codegen/gen_invoke.cc \
-	src/compiler/codegen/method_bitcode.cc \
-	src/compiler/codegen/method_codegen_driver.cc \
+	src/compiler/codegen/mir_to_gbc.cc \
+	src/compiler/codegen/mir_to_lir.cc \
 	src/compiler/codegen/local_optimizations.cc \
 	src/oat/jni/calling_convention.cc \
 	src/oat/jni/jni_compiler.cc \
@@ -35,37 +34,37 @@
 	src/oat/jni/mips/calling_convention_mips.cc \
 	src/oat/jni/x86/calling_convention_x86.cc \
 	src/greenland/ir_builder.cc \
-	src/greenland/intrinsic_helper.cc
-
-LIBART_COMPILER_arm_SRC_FILES += \
-	$(LIBART_COMPILER_COMMON_SRC_FILES) \
+	src/greenland/intrinsic_helper.cc \
 	src/compiler/codegen/arm/target_arm.cc \
 	src/compiler/codegen/arm/assemble_arm.cc \
 	src/compiler/codegen/arm/utility_arm.cc \
 	src/compiler/codegen/arm/call_arm.cc \
 	src/compiler/codegen/arm/fp_arm.cc \
 	src/compiler/codegen/arm/int_arm.cc \
-	src/oat/jni/arm/jni_internal_arm.cc
-
-LIBART_COMPILER_mips_SRC_FILES += \
-	$(LIBART_COMPILER_COMMON_SRC_FILES) \
 	src/compiler/codegen/mips/target_mips.cc \
 	src/compiler/codegen/mips/assemble_mips.cc \
 	src/compiler/codegen/mips/utility_mips.cc \
 	src/compiler/codegen/mips/call_mips.cc \
 	src/compiler/codegen/mips/fp_mips.cc \
 	src/compiler/codegen/mips/int_mips.cc \
-	src/oat/jni/mips/jni_internal_mips.cc
-
-LIBART_COMPILER_x86_SRC_FILES += \
-	$(LIBART_COMPILER_COMMON_SRC_FILES) \
 	src/compiler/codegen/x86/target_x86.cc \
 	src/compiler/codegen/x86/assemble_x86.cc \
 	src/compiler/codegen/x86/utility_x86.cc \
 	src/compiler/codegen/x86/call_x86.cc \
 	src/compiler/codegen/x86/fp_x86.cc \
 	src/compiler/codegen/x86/int_x86.cc \
-	src/oat/jni/x86/jni_internal_x86.cc
+	src/oat/jni/arm/jni_internal_arm.cc \
+	src/oat/jni/mips/jni_internal_mips.cc \
+	src/oat/jni/x86/jni_internal_x86.cc \
+
+LIBART_COMPILER_arm_SRC_FILES += \
+	$(LIBART_COMPILER_COMMON_SRC_FILES)
+
+LIBART_COMPILER_mips_SRC_FILES += \
+	$(LIBART_COMPILER_COMMON_SRC_FILES)
+
+LIBART_COMPILER_x86_SRC_FILES += \
+	$(LIBART_COMPILER_COMMON_SRC_FILES)
 
 # $(1): target or host
 # $(2): ndebug or debug
diff --git a/src/compiler.cc b/src/compiler.cc
index e730f9d..818b412 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -340,7 +340,28 @@
   init_compiler_context(*this);
 
   jni_compiler_ = FindFunction<JniCompilerFn>(compiler_so_name, compiler_library_, "ArtJniCompileMethod");
-  create_invoke_stub_ = FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateInvokeStub");
+  if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)){
+    create_invoke_stub_ =
+        FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateLLVMInvokeStub");
+  } else {
+    switch (instruction_set) {
+      case kArm:
+      case kThumb2:
+        create_invoke_stub_ =
+            FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateArmInvokeStub");
+        break;
+      case kMips:
+        create_invoke_stub_ =
+            FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateMipsInvokeStub");
+        break;
+      case kX86:
+        create_invoke_stub_ =
+            FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateX86InvokeStub");
+        break;
+      default:
+        LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+    }
+  }
 
   if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) {
     create_proxy_stub_ = FindFunction<CreateProxyStubFn>(
diff --git a/src/compiler/codegen/arm/arm_lir.h b/src/compiler/codegen/arm/arm_lir.h
index 7955b1b..09b45b8 100644
--- a/src/compiler/codegen/arm/arm_lir.h
+++ b/src/compiler/codegen/arm/arm_lir.h
@@ -93,18 +93,19 @@
  * +========================+
  */
 
-/* Offset to distingish FP regs */
+// Offset to distingish FP regs.
 #define ARM_FP_REG_OFFSET 32
-/* Offset to distinguish DP FP regs */
+// Offset to distinguish DP FP regs.
 #define ARM_FP_DOUBLE 64
-/* First FP callee save */
+// First FP callee save.
 #define ARM_FP_CALLEE_SAVE_BASE 16
-/* Reg types */
+// Reg types.
 #define ARM_REGTYPE(x) (x & (ARM_FP_REG_OFFSET | ARM_FP_DOUBLE))
 #define ARM_FPREG(x) ((x & ARM_FP_REG_OFFSET) == ARM_FP_REG_OFFSET)
 #define ARM_LOWREG(x) ((x & 0x7) == x)
 #define ARM_DOUBLEREG(x) ((x & ARM_FP_DOUBLE) == ARM_FP_DOUBLE)
 #define ARM_SINGLEREG(x) (ARM_FPREG(x) && !ARM_DOUBLEREG(x))
+
 /*
  * Note: the low register of a floating point pair is sufficient to
  * create the name of a double, but require both names to be passed to
@@ -113,10 +114,10 @@
  * code that reg locations always describe doubles as a pair of singles.
  */
 #define ARM_S2D(x,y) ((x) | ARM_FP_DOUBLE)
-/* Mask to strip off fp flags */
+// Mask to strip off fp flags.
 #define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
 
-/* RegisterLocation templates return values (r0, or r0/r1) */
+// RegisterLocation templates return values (r0, or r0/r1).
 #define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r0, INVALID_REG,\
                           INVALID_SREG, INVALID_SREG}
 #define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1, \
@@ -210,7 +211,7 @@
   dr15 = fr30 + ARM_FP_DOUBLE,
 };
 
-/* Target-independent aliases */
+// Target-independent aliases.
 #define rARM_ARG0 r0
 #define rARM_ARG1 r1
 #define rARM_ARG2 r2
@@ -224,7 +225,6 @@
 #define rARM_INVOKE_TGT rARM_LR
 #define rARM_COUNT INVALID_REG
 
-/* Shift encodings */
 enum ArmShiftEncodings {
   kArmLsl = 0x0,
   kArmLsr = 0x1,
@@ -238,326 +238,216 @@
  * Assemble.cc.
  */
 enum ArmOpcode {
-  /************************************************************************/
   kArmFirst = 0,
-  kArm16BitData = kArmFirst, /* DATA   [0] rd[15..0] */
-  kThumbAdcRR,       /* adc   [0100000101] rm[5..3] rd[2..0] */
-  kThumbAddRRI3,     /* add(1)  [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
-  kThumbAddRI8,      /* add(2)  [00110] rd[10..8] imm_8[7..0] */
-  kThumbAddRRR,      /* add(3)  [0001100] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbAddRRLH,     /* add(4)  [01000100] H12[01] rm[5..3] rd[2..0] */
-  kThumbAddRRHL,     /* add(4)  [01001000] H12[10] rm[5..3] rd[2..0] */
-  kThumbAddRRHH,     /* add(4)  [01001100] H12[11] rm[5..3] rd[2..0] */
-  kThumbAddPcRel,    /* add(5)  [10100] rd[10..8] imm_8[7..0] */
-  kThumbAddSpRel,    /* add(6)  [10101] rd[10..8] imm_8[7..0] */
-  kThumbAddSpI7,     /* add(7)  [101100000] imm_7[6..0] */
-  kThumbAndRR,       /* and   [0100000000] rm[5..3] rd[2..0] */
-  kThumbAsrRRI5,     /* asr(1)  [00010] imm_5[10..6] rm[5..3] rd[2..0] */
-  kThumbAsrRR,       /* asr(2)  [0100000100] rs[5..3] rd[2..0] */
-  kThumbBCond,       /* b(1)  [1101] cond[11..8] offset_8[7..0] */
-  kThumbBUncond,     /* b(2)  [11100] offset_11[10..0] */
-  kThumbBicRR,       /* bic   [0100001110] rm[5..3] rd[2..0] */
-  kThumbBkpt,        /* bkpt  [10111110] imm_8[7..0] */
-  kThumbBlx1,        /* blx(1)  [111] H[10] offset_11[10..0] */
-  kThumbBlx2,        /* blx(1)  [111] H[01] offset_11[10..0] */
-  kThumbBl1,         /* blx(1)  [111] H[10] offset_11[10..0] */
-  kThumbBl2,         /* blx(1)  [111] H[11] offset_11[10..0] */
-  kThumbBlxR,        /* blx(2)  [010001111] rm[6..3] [000] */
-  kThumbBx,          /* bx    [010001110] H2[6..6] rm[5..3] SBZ[000] */
-  kThumbCmnRR,       /* cmn   [0100001011] rm[5..3] rd[2..0] */
-  kThumbCmpRI8,      /* cmp(1)  [00101] rn[10..8] imm_8[7..0] */
-  kThumbCmpRR,       /* cmp(2)  [0100001010] rm[5..3] rd[2..0] */
-  kThumbCmpLH,       /* cmp(3)  [01000101] H12[01] rm[5..3] rd[2..0] */
-  kThumbCmpHL,       /* cmp(3)  [01000110] H12[10] rm[5..3] rd[2..0] */
-  kThumbCmpHH,       /* cmp(3)  [01000111] H12[11] rm[5..3] rd[2..0] */
-  kThumbEorRR,       /* eor   [0100000001] rm[5..3] rd[2..0] */
-  kThumbLdmia,       /* ldmia   [11001] rn[10..8] reglist [7..0] */
-  kThumbLdrRRI5,     /* ldr(1)  [01101] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbLdrRRR,      /* ldr(2)  [0101100] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbLdrPcRel,    /* ldr(3)  [01001] rd[10..8] imm_8[7..0] */
-  kThumbLdrSpRel,    /* ldr(4)  [10011] rd[10..8] imm_8[7..0] */
-  kThumbLdrbRRI5,    /* ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbLdrbRRR,     /* ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbLdrhRRI5,    /* ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbLdrhRRR,     /* ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbLdrsbRRR,    /* ldrsb   [0101011] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbLdrshRRR,    /* ldrsh   [0101111] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbLslRRI5,     /* lsl(1)  [00000] imm_5[10..6] rm[5..3] rd[2..0] */
-  kThumbLslRR,       /* lsl(2)  [0100000010] rs[5..3] rd[2..0] */
-  kThumbLsrRRI5,     /* lsr(1)  [00001] imm_5[10..6] rm[5..3] rd[2..0] */
-  kThumbLsrRR,       /* lsr(2)  [0100000011] rs[5..3] rd[2..0] */
-  kThumbMovImm,      /* mov(1)  [00100] rd[10..8] imm_8[7..0] */
-  kThumbMovRR,       /* mov(2)  [0001110000] rn[5..3] rd[2..0] */
-  kThumbMovRR_H2H,   /* mov(3)  [01000111] H12[11] rm[5..3] rd[2..0] */
-  kThumbMovRR_H2L,   /* mov(3)  [01000110] H12[01] rm[5..3] rd[2..0] */
-  kThumbMovRR_L2H,   /* mov(3)  [01000101] H12[10] rm[5..3] rd[2..0] */
-  kThumbMul,         /* mul   [0100001101] rm[5..3] rd[2..0] */
-  kThumbMvn,         /* mvn   [0100001111] rm[5..3] rd[2..0] */
-  kThumbNeg,         /* neg   [0100001001] rm[5..3] rd[2..0] */
-  kThumbOrr,         /* orr   [0100001100] rm[5..3] rd[2..0] */
-  kThumbPop,         /* pop   [1011110] r[8..8] rl[7..0] */
-  kThumbPush,        /* push  [1011010] r[8..8] rl[7..0] */
-  kThumbRorRR,       /* ror   [0100000111] rs[5..3] rd[2..0] */
-  kThumbSbc,         /* sbc   [0100000110] rm[5..3] rd[2..0] */
-  kThumbStmia,       /* stmia   [11000] rn[10..8] reglist [7.. 0] */
-  kThumbStrRRI5,     /* str(1)  [01100] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbStrRRR,      /* str(2)  [0101000] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbStrSpRel,    /* str(3)  [10010] rd[10..8] imm_8[7..0] */
-  kThumbStrbRRI5,    /* strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbStrbRRR,     /* strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbStrhRRI5,    /* strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0] */
-  kThumbStrhRRR,     /* strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbSubRRI3,     /* sub(1)  [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
-  kThumbSubRI8,      /* sub(2)  [00111] rd[10..8] imm_8[7..0] */
-  kThumbSubRRR,      /* sub(3)  [0001101] rm[8..6] rn[5..3] rd[2..0] */
-  kThumbSubSpI7,     /* sub(4)  [101100001] imm_7[6..0] */
-  kThumbSwi,         /* swi   [11011111] imm_8[7..0] */
-  kThumbTst,         /* tst   [0100001000] rm[5..3] rn[2..0] */
-  kThumb2Vldrs,      /* vldr low  sx [111011011001] rn[19..16] rd[15-12]
-                       [1010] imm_8[7..0] */
-  kThumb2Vldrd,      /* vldr low  dx [111011011001] rn[19..16] rd[15-12]
-                        [1011] imm_8[7..0] */
-  kThumb2Vmuls,      /* vmul vd, vn, vm [111011100010] rn[19..16]
-                        rd[15-12] [10100000] rm[3..0] */
-  kThumb2Vmuld,      /* vmul vd, vn, vm [111011100010] rn[19..16]
-                        rd[15-12] [10110000] rm[3..0] */
-  kThumb2Vstrs,      /* vstr low  sx [111011011000] rn[19..16] rd[15-12]
-                        [1010] imm_8[7..0] */
-  kThumb2Vstrd,      /* vstr low  dx [111011011000] rn[19..16] rd[15-12]
-                        [1011] imm_8[7..0] */
-  kThumb2Vsubs,      /* vsub vd, vn, vm [111011100011] rn[19..16]
-                        rd[15-12] [10100040] rm[3..0] */
-  kThumb2Vsubd,      /* vsub vd, vn, vm [111011100011] rn[19..16]
-                        rd[15-12] [10110040] rm[3..0] */
-  kThumb2Vadds,      /* vadd vd, vn, vm [111011100011] rn[19..16]
-                        rd[15-12] [10100000] rm[3..0] */
-  kThumb2Vaddd,      /* vadd vd, vn, vm [111011100011] rn[19..16]
-                        rd[15-12] [10110000] rm[3..0] */
-  kThumb2Vdivs,      /* vdiv vd, vn, vm [111011101000] rn[19..16]
-                        rd[15-12] [10100000] rm[3..0] */
-  kThumb2Vdivd,      /* vdiv vd, vn, vm [111011101000] rn[19..16]
-                        rd[15-12] [10110000] rm[3..0] */
-  kThumb2VcvtIF,     /* vcvt.F32 vd, vm [1110111010111000] vd[15..12]
-                        [10101100] vm[3..0] */
-  kThumb2VcvtID,     /* vcvt.F64 vd, vm [1110111010111000] vd[15..12]
-                       [10111100] vm[3..0] */
-  kThumb2VcvtFI,     /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
-                       [10101100] vm[3..0] */
-  kThumb2VcvtDI,     /* vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12]
-                       [10111100] vm[3..0] */
-  kThumb2VcvtFd,     /* vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12]
-                       [10101100] vm[3..0] */
-  kThumb2VcvtDF,     /* vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12]
-                       [10111100] vm[3..0] */
-  kThumb2Vsqrts,     /* vsqrt.f32 vd, vm [1110111010110001] vd[15..12]
-                       [10101100] vm[3..0] */
-  kThumb2Vsqrtd,     /* vsqrt.f64 vd, vm [1110111010110001] vd[15..12]
-                       [10111100] vm[3..0] */
-  kThumb2MovImmShift,/* mov(T2) rd, #<const> [11110] i [00001001111]
-                       imm3 rd[11..8] imm8 */
-  kThumb2MovImm16,   /* mov(T3) rd, #<const> [11110] i [0010100] imm4 [0]
-                       imm3 rd[11..8] imm8 */
-  kThumb2StrRRI12,   /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
-                       rn[19..16] rt[15..12] imm12[11..0] */
-  kThumb2LdrRRI12,   /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
-                       rn[19..16] rt[15..12] imm12[11..0] */
-  kThumb2StrRRI8Predec, /* str(Imm,T4) rd,[rn,#-imm8] [111110000100]
-                       rn[19..16] rt[15..12] [1100] imm[7..0]*/
-  kThumb2LdrRRI8Predec, /* ldr(Imm,T4) rd,[rn,#-imm8] [111110000101]
-                       rn[19..16] rt[15..12] [1100] imm[7..0]*/
-  kThumb2Cbnz,       /* cbnz rd,<label> [101110] i [1] imm5[7..3]
-                       rn[2..0] */
-  kThumb2Cbz,        /* cbn rd,<label> [101100] i [1] imm5[7..3]
-                       rn[2..0] */
-  kThumb2AddRRI12,   /* add rd, rn, #imm12 [11110] i [100000] rn[19..16]
-                       [0] imm3[14..12] rd[11..8] imm8[7..0] */
-  kThumb2MovRR,      /* mov rd, rm [11101010010011110000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2Vmovs,      /* vmov.f32 vd, vm [111011101] D [110000]
-                       vd[15..12] 101001] M [0] vm[3..0] */
-  kThumb2Vmovd,      /* vmov.f64 vd, vm [111011101] D [110000]
-                       vd[15..12] 101101] M [0] vm[3..0] */
-  kThumb2Ldmia,      /* ldmia  [111010001001[ rn[19..16] mask[15..0] */
-  kThumb2Stmia,      /* stmia  [111010001000[ rn[19..16] mask[15..0] */
-  kThumb2AddRRR,     /* add [111010110000] rn[19..16] [0000] rd[11..8]
-                        [0000] rm[3..0] */
-  kThumb2SubRRR,     /* sub [111010111010] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2SbcRRR,     /* sbc [111010110110] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2CmpRR,      /* cmp [111010111011] rn[19..16] [0000] [1111]
-                       [0000] rm[3..0] */
-  kThumb2SubRRI12,   /* sub rd, rn, #imm12 [11110] i [01010] rn[19..16]
-                       [0] imm3[14..12] rd[11..8] imm8[7..0] */
-  kThumb2MvnImm12,   /* mov(T2) rd, #<const> [11110] i [00011011110]
-                       imm3 rd[11..8] imm8 */
-  kThumb2Sel,        /* sel rd, rn, rm [111110101010] rn[19-16] rd[11-8]
-                       rm[3-0] */
-  kThumb2Ubfx,       /* ubfx rd,rn,#lsb,#width [111100111100] rn[19..16]
-                       [0] imm3[14-12] rd[11-8] w[4-0] */
-  kThumb2Sbfx,       /* ubfx rd,rn,#lsb,#width [111100110100] rn[19..16]
-                       [0] imm3[14-12] rd[11-8] w[4-0] */
-  kThumb2LdrRRR,     /* ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2LdrhRRR,    /* ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2LdrshRRR,   /* ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2LdrbRRR,    /* ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2LdrsbRRR,   /* ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2StrRRR,     /* str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2StrhRRR,    /* str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2StrbRRR,    /* str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16]
-                       rt[15-12] [000000] imm[5-4] rm[3-0] */
-  kThumb2LdrhRRI12,  /* ldrh rt,[rn,#imm12] [111110001011]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2LdrshRRI12, /* ldrsh rt,[rn,#imm12] [111110011011]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2LdrbRRI12,  /* ldrb rt,[rn,#imm12] [111110001001]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2LdrsbRRI12, /* ldrsb rt,[rn,#imm12] [111110011001]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2StrhRRI12,  /* strh rt,[rn,#imm12] [111110001010]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2StrbRRI12,  /* strb rt,[rn,#imm12] [111110001000]
-                       rt[15..12] rn[19..16] imm12[11..0] */
-  kThumb2Pop,        /* pop   [1110100010111101] list[15-0]*/
-  kThumb2Push,       /* push  [1110100100101101] list[15-0]*/
-  kThumb2CmpRI8,     /* cmp rn, #<const> [11110] i [011011] rn[19-16] [0]
-                       imm3 [1111] imm8[7..0] */
-  kThumb2AdcRRR,     /* adc [111010110101] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2AndRRR,     /* and [111010100000] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2BicRRR,     /* bic [111010100010] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2CmnRR,      /* cmn [111010110001] rn[19..16] [0000] [1111]
-                       [0000] rm[3..0] */
-  kThumb2EorRRR,     /* eor [111010101000] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2MulRRR,     /* mul [111110110000] rn[19..16] [1111] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2MnvRR,      /* mvn [11101010011011110] rd[11-8] [0000]
-                       rm[3..0] */
-  kThumb2RsubRRI8,   /* rsub [111100011100] rn[19..16] [0000] rd[11..8]
-                       imm8[7..0] */
-  kThumb2NegRR,      /* actually rsub rd, rn, #0 */
-  kThumb2OrrRRR,     /* orr [111010100100] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2TstRR,      /* tst [111010100001] rn[19..16] [0000] [1111]
-                       [0000] rm[3..0] */
-  kThumb2LslRRR,     /* lsl [111110100000] rn[19..16] [1111] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2LsrRRR,     /* lsr [111110100010] rn[19..16] [1111] rd[11..8]
-                        [0000] rm[3..0] */
-  kThumb2AsrRRR,     /* asr [111110100100] rn[19..16] [1111] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2RorRRR,     /* ror [111110100110] rn[19..16] [1111] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2LslRRI5,    /* lsl [11101010010011110] imm[14.12] rd[11..8]
-                       [00] rm[3..0] */
-  kThumb2LsrRRI5,    /* lsr [11101010010011110] imm[14.12] rd[11..8]
-                       [01] rm[3..0] */
-  kThumb2AsrRRI5,    /* asr [11101010010011110] imm[14.12] rd[11..8]
-                       [10] rm[3..0] */
-  kThumb2RorRRI5,    /* ror [11101010010011110] imm[14.12] rd[11..8]
-                       [11] rm[3..0] */
-  kThumb2BicRRI8,    /* bic [111100000010] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2AndRRI8,    /* bic [111100000000] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2OrrRRI8,    /* orr [111100000100] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2EorRRI8,    /* eor [111100001000] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2AddRRI8,    /* add [111100001000] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2AdcRRI8,    /* adc [111100010101] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2SubRRI8,    /* sub [111100011011] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2SbcRRI8,    /* sbc [111100010111] rn[19..16] [0] imm3
-                       rd[11..8] imm8 */
-  kThumb2It,         /* it [10111111] firstcond[7-4] mask[3-0] */
-  kThumb2Fmstat,     /* fmstat [11101110111100011111101000010000] */
-  kThumb2Vcmpd,      /* vcmp [111011101] D [11011] rd[15-12] [1011]
-                        E [1] M [0] rm[3-0] */
-  kThumb2Vcmps,      /* vcmp [111011101] D [11010] rd[15-12] [1011]
-                        E [1] M [0] rm[3-0] */
-  kThumb2LdrPcRel12, /* ldr rd,[pc,#imm12] [1111100011011111] rt[15-12]
-                        imm12[11-0] */
-  kThumb2BCond,      /* b<c> [1110] S cond[25-22] imm6[21-16] [10]
-                        J1 [0] J2 imm11[10..0] */
-  kThumb2Vmovd_RR,   /* vmov [111011101] D [110000] vd[15-12 [101101]
-                        M [0] vm[3-0] */
-  kThumb2Vmovs_RR,   /* vmov [111011101] D [110000] vd[15-12 [101001]
-                        M [0] vm[3-0] */
-  kThumb2Fmrs,       /* vmov [111011100000] vn[19-16] rt[15-12] [1010]
-                        N [0010000] */
-  kThumb2Fmsr,       /* vmov [111011100001] vn[19-16] rt[15-12] [1010]
-                        N [0010000] */
-  kThumb2Fmrrd,      /* vmov [111011000100] rt2[19-16] rt[15-12]
-                        [101100] M [1] vm[3-0] */
-  kThumb2Fmdrr,      /* vmov [111011000101] rt2[19-16] rt[15-12]
-                        [101100] M [1] vm[3-0] */
-  kThumb2Vabsd,      /* vabs.f64 [111011101] D [110000] rd[15-12]
-                        [1011110] M [0] vm[3-0] */
-  kThumb2Vabss,      /* vabs.f32 [111011101] D [110000] rd[15-12]
-                        [1010110] M [0] vm[3-0] */
-  kThumb2Vnegd,      /* vneg.f64 [111011101] D [110000] rd[15-12]
-                        [1011110] M [0] vm[3-0] */
-  kThumb2Vnegs,      /* vneg.f32 [111011101] D [110000] rd[15-12]
-                        [1010110] M [0] vm[3-0] */
-  kThumb2Vmovs_IMM8, /* vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12]
-                        [10100000] imm4l[3-0] */
-  kThumb2Vmovd_IMM8, /* vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12]
-                        [10110000] imm4l[3-0] */
-  kThumb2Mla,        /* mla [111110110000] rn[19-16] ra[15-12] rd[7-4]
-                        [0000] rm[3-0] */
-  kThumb2Umull,      /* umull [111110111010] rn[19-16], rdlo[15-12]
-                        rdhi[11-8] [0000] rm[3-0] */
-  kThumb2Ldrex,      /* ldrex [111010000101] rn[19-16] rt[11-8] [1111]
-                        imm8[7-0] */
-  kThumb2Strex,      /* strex [111010000100] rn[19-16] rt[11-8] rd[11-8]
-                        imm8[7-0] */
-  kThumb2Clrex,      /* clrex [111100111011111110000111100101111] */
-  kThumb2Bfi,        /* bfi [111100110110] rn[19-16] [0] imm3[14-12]
-                        rd[11-8] imm2[7-6] [0] msb[4-0] */
-  kThumb2Bfc,        /* bfc [11110011011011110] [0] imm3[14-12]
-                        rd[11-8] imm2[7-6] [0] msb[4-0] */
-  kThumb2Dmb,        /* dmb [1111001110111111100011110101] option[3-0] */
-  kThumb2LdrPcReln12,/* ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12]
-                        imm12[11-0] */
-  kThumb2Stm,        /* stm <list> [111010010000] rn[19-16] 000 rl[12-0] */
-  kThumbUndefined,   /* undefined [11011110xxxxxxxx] */
-  kThumb2VPopCS,     /* vpop <list of callee save fp singles (s16+) */
-  kThumb2VPushCS,    /* vpush <list callee save fp singles (s16+) */
-  kThumb2Vldms,      /* vldms rd, <list> */
-  kThumb2Vstms,      /* vstms rd, <list> */
-  kThumb2BUncond,    /* b <label> */
-  kThumb2MovImm16H,  /* similar to kThumb2MovImm16, but target high hw */
-  kThumb2AddPCR,     /* Thumb2 2-operand add with hard-coded PC target */
-  kThumb2Adr,        /* Special purpose encoding of ADR for switch tables */
-  kThumb2MovImm16LST,/* Special purpose version for switch table use */
-  kThumb2MovImm16HST,/* Special purpose version for switch table use */
-  kThumb2LdmiaWB,    /* ldmia  [111010011001[ rn[19..16] mask[15..0] */
-  kThumb2SubsRRI12,  /* setflags encoding */
-  kThumb2OrrRRRs,    /* orrx [111010100101] rn[19..16] [0000] rd[11..8]
-                        [0000] rm[3..0] */
-  kThumb2Push1,      /* t3 encoding of push */
-  kThumb2Pop1,       /* t3 encoding of pop */
-  kThumb2RsubRRR,    /* rsb [111010111101] rn[19..16] [0000] rd[11..8]
-                       [0000] rm[3..0] */
-  kThumb2Smull,      /* smull [111110111000] rn[19-16], rdlo[15-12]
-                        rdhi[11-8] [0000] rm[3-0] */
+  kArm16BitData = kArmFirst, // DATA   [0] rd[15..0].
+  kThumbAdcRR,       // adc   [0100000101] rm[5..3] rd[2..0].
+  kThumbAddRRI3,     // add(1)  [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
+  kThumbAddRI8,      // add(2)  [00110] rd[10..8] imm_8[7..0].
+  kThumbAddRRR,      // add(3)  [0001100] rm[8..6] rn[5..3] rd[2..0].
+  kThumbAddRRLH,     // add(4)  [01000100] H12[01] rm[5..3] rd[2..0].
+  kThumbAddRRHL,     // add(4)  [01001000] H12[10] rm[5..3] rd[2..0].
+  kThumbAddRRHH,     // add(4)  [01001100] H12[11] rm[5..3] rd[2..0].
+  kThumbAddPcRel,    // add(5)  [10100] rd[10..8] imm_8[7..0].
+  kThumbAddSpRel,    // add(6)  [10101] rd[10..8] imm_8[7..0].
+  kThumbAddSpI7,     // add(7)  [101100000] imm_7[6..0].
+  kThumbAndRR,       // and   [0100000000] rm[5..3] rd[2..0].
+  kThumbAsrRRI5,     // asr(1)  [00010] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbAsrRR,       // asr(2)  [0100000100] rs[5..3] rd[2..0].
+  kThumbBCond,       // b(1)  [1101] cond[11..8] offset_8[7..0].
+  kThumbBUncond,     // b(2)  [11100] offset_11[10..0].
+  kThumbBicRR,       // bic   [0100001110] rm[5..3] rd[2..0].
+  kThumbBkpt,        // bkpt  [10111110] imm_8[7..0].
+  kThumbBlx1,        // blx(1)  [111] H[10] offset_11[10..0].
+  kThumbBlx2,        // blx(1)  [111] H[01] offset_11[10..0].
+  kThumbBl1,         // blx(1)  [111] H[10] offset_11[10..0].
+  kThumbBl2,         // blx(1)  [111] H[11] offset_11[10..0].
+  kThumbBlxR,        // blx(2)  [010001111] rm[6..3] [000].
+  kThumbBx,          // bx    [010001110] H2[6..6] rm[5..3] SBZ[000].
+  kThumbCmnRR,       // cmn   [0100001011] rm[5..3] rd[2..0].
+  kThumbCmpRI8,      // cmp(1)  [00101] rn[10..8] imm_8[7..0].
+  kThumbCmpRR,       // cmp(2)  [0100001010] rm[5..3] rd[2..0].
+  kThumbCmpLH,       // cmp(3)  [01000101] H12[01] rm[5..3] rd[2..0].
+  kThumbCmpHL,       // cmp(3)  [01000110] H12[10] rm[5..3] rd[2..0].
+  kThumbCmpHH,       // cmp(3)  [01000111] H12[11] rm[5..3] rd[2..0].
+  kThumbEorRR,       // eor   [0100000001] rm[5..3] rd[2..0].
+  kThumbLdmia,       // ldmia   [11001] rn[10..8] reglist [7..0].
+  kThumbLdrRRI5,     // ldr(1)  [01101] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrRRR,      // ldr(2)  [0101100] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrPcRel,    // ldr(3)  [01001] rd[10..8] imm_8[7..0].
+  kThumbLdrSpRel,    // ldr(4)  [10011] rd[10..8] imm_8[7..0].
+  kThumbLdrbRRI5,    // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrbRRR,     // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrhRRI5,    // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbLdrhRRR,     // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrsbRRR,    // ldrsb   [0101011] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLdrshRRR,    // ldrsh   [0101111] rm[8..6] rn[5..3] rd[2..0].
+  kThumbLslRRI5,     // lsl(1)  [00000] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbLslRR,       // lsl(2)  [0100000010] rs[5..3] rd[2..0].
+  kThumbLsrRRI5,     // lsr(1)  [00001] imm_5[10..6] rm[5..3] rd[2..0].
+  kThumbLsrRR,       // lsr(2)  [0100000011] rs[5..3] rd[2..0].
+  kThumbMovImm,      // mov(1)  [00100] rd[10..8] imm_8[7..0].
+  kThumbMovRR,       // mov(2)  [0001110000] rn[5..3] rd[2..0].
+  kThumbMovRR_H2H,   // mov(3)  [01000111] H12[11] rm[5..3] rd[2..0].
+  kThumbMovRR_H2L,   // mov(3)  [01000110] H12[01] rm[5..3] rd[2..0].
+  kThumbMovRR_L2H,   // mov(3)  [01000101] H12[10] rm[5..3] rd[2..0].
+  kThumbMul,         // mul   [0100001101] rm[5..3] rd[2..0].
+  kThumbMvn,         // mvn   [0100001111] rm[5..3] rd[2..0].
+  kThumbNeg,         // neg   [0100001001] rm[5..3] rd[2..0].
+  kThumbOrr,         // orr   [0100001100] rm[5..3] rd[2..0].
+  kThumbPop,         // pop   [1011110] r[8..8] rl[7..0].
+  kThumbPush,        // push  [1011010] r[8..8] rl[7..0].
+  kThumbRorRR,       // ror   [0100000111] rs[5..3] rd[2..0].
+  kThumbSbc,         // sbc   [0100000110] rm[5..3] rd[2..0].
+  kThumbStmia,       // stmia   [11000] rn[10..8] reglist [7.. 0].
+  kThumbStrRRI5,     // str(1)  [01100] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrRRR,      // str(2)  [0101000] rm[8..6] rn[5..3] rd[2..0].
+  kThumbStrSpRel,    // str(3)  [10010] rd[10..8] imm_8[7..0].
+  kThumbStrbRRI5,    // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrbRRR,     // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0].
+  kThumbStrhRRI5,    // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0].
+  kThumbStrhRRR,     // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0].
+  kThumbSubRRI3,     // sub(1)  [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
+  kThumbSubRI8,      // sub(2)  [00111] rd[10..8] imm_8[7..0].
+  kThumbSubRRR,      // sub(3)  [0001101] rm[8..6] rn[5..3] rd[2..0].
+  kThumbSubSpI7,     // sub(4)  [101100001] imm_7[6..0].
+  kThumbSwi,         // swi   [11011111] imm_8[7..0].
+  kThumbTst,         // tst   [0100001000] rm[5..3] rn[2..0].
+  kThumb2Vldrs,      // vldr low  sx [111011011001] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+  kThumb2Vldrd,      // vldr low  dx [111011011001] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+  kThumb2Vmuls,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vmuld,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2Vstrs,      // vstr low  sx [111011011000] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+  kThumb2Vstrd,      // vstr low  dx [111011011000] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+  kThumb2Vsubs,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100040] rm[3..0].
+  kThumb2Vsubd,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110040] rm[3..0].
+  kThumb2Vadds,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vaddd,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2Vdivs,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10100000] rm[3..0].
+  kThumb2Vdivd,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10110000] rm[3..0].
+  kThumb2VcvtIF,     // vcvt.F32 vd, vm [1110111010111000] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtID,     // vcvt.F64 vd, vm [1110111010111000] vd[15..12] [10111100] vm[3..0].
+  kThumb2VcvtFI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtDI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10111100] vm[3..0].
+  kThumb2VcvtFd,     // vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12] [10101100] vm[3..0].
+  kThumb2VcvtDF,     // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0].
+  kThumb2Vsqrts,     // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0].
+  kThumb2Vsqrtd,     // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0].
+  kThumb2MovImmShift,// mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
+  kThumb2MovImm16,   // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8.
+  kThumb2StrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+  kThumb2LdrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+  kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2Cbnz,       // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0].
+  kThumb2Cbz,        // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0].
+  kThumb2AddRRI12,   // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+  kThumb2MovRR,      // mov rd, rm [11101010010011110000] rd[11..8] [0000] rm[3..0].
+  kThumb2Vmovs,      // vmov.f32 vd, vm [111011101] D [110000] vd[15..12] 101001] M [0] vm[3..0].
+  kThumb2Vmovd,      // vmov.f64 vd, vm [111011101] D [110000] vd[15..12] 101101] M [0] vm[3..0].
+  kThumb2Ldmia,      // ldmia  [111010001001[ rn[19..16] mask[15..0].
+  kThumb2Stmia,      // stmia  [111010001000[ rn[19..16] mask[15..0].
+  kThumb2AddRRR,     // add [111010110000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2SubRRR,     // sub [111010111010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2SbcRRR,     // sbc [111010110110] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2CmpRR,      // cmp [111010111011] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2SubRRI12,   // sub rd, rn, #imm12 [11110] i [01010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+  kThumb2MvnImm12,   // mov(T2) rd, #<const> [11110] i [00011011110] imm3 rd[11..8] imm8.
+  kThumb2Sel,        // sel rd, rn, rm [111110101010] rn[19-16] rd[11-8] rm[3-0].
+  kThumb2Ubfx,       // ubfx rd,rn,#lsb,#width [111100111100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+  kThumb2Sbfx,       // ubfx rd,rn,#lsb,#width [111100110100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+  kThumb2LdrRRR,     // ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrhRRR,    // ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrshRRR,   // ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrbRRR,    // ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrsbRRR,   // ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrRRR,     // str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrhRRR,    // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2StrbRRR,    // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+  kThumb2LdrhRRI12,  // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrshRRI12, // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrbRRI12,  // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrsbRRI12, // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2StrhRRI12,  // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2StrbRRI12,  // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2Pop,        // pop   [1110100010111101] list[15-0]*/
+  kThumb2Push,       // push  [1110100100101101] list[15-0]*/
+  kThumb2CmpRI8,     // cmp rn, #<const> [11110] i [011011] rn[19-16] [0] imm3 [1111] imm8[7..0].
+  kThumb2AdcRRR,     // adc [111010110101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2AndRRR,     // and [111010100000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2BicRRR,     // bic [111010100010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2CmnRR,      // cmn [111010110001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2EorRRR,     // eor [111010101000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2MulRRR,     // mul [111110110000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2MnvRR,      // mvn [11101010011011110] rd[11-8] [0000] rm[3..0].
+  kThumb2RsubRRI8,   // rsub [111100011100] rn[19..16] [0000] rd[11..8] imm8[7..0].
+  kThumb2NegRR,      // actually rsub rd, rn, #0.
+  kThumb2OrrRRR,     // orr [111010100100] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2TstRR,      // tst [111010100001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+  kThumb2LslRRR,     // lsl [111110100000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2LsrRRR,     // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2AsrRRR,     // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2RorRRR,     // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+  kThumb2LslRRI5,    // lsl [11101010010011110] imm[14.12] rd[11..8] [00] rm[3..0].
+  kThumb2LsrRRI5,    // lsr [11101010010011110] imm[14.12] rd[11..8] [01] rm[3..0].
+  kThumb2AsrRRI5,    // asr [11101010010011110] imm[14.12] rd[11..8] [10] rm[3..0].
+  kThumb2RorRRI5,    // ror [11101010010011110] imm[14.12] rd[11..8] [11] rm[3..0].
+  kThumb2BicRRI8,    // bic [111100000010] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AndRRI8,    // bic [111100000000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2OrrRRI8,    // orr [111100000100] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2EorRRI8,    // eor [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AddRRI8,    // add [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2AdcRRI8,    // adc [111100010101] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2SubRRI8,    // sub [111100011011] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2SbcRRI8,    // sbc [111100010111] rn[19..16] [0] imm3 rd[11..8] imm8.
+  kThumb2It,         // it [10111111] firstcond[7-4] mask[3-0].
+  kThumb2Fmstat,     // fmstat [11101110111100011111101000010000].
+  kThumb2Vcmpd,      // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
+  kThumb2Vcmps,      // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0].
+  kThumb2LdrPcRel12, // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2BCond,      // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0].
+  kThumb2Vmovd_RR,   // vmov [111011101] D [110000] vd[15-12 [101101] M [0] vm[3-0].
+  kThumb2Vmovs_RR,   // vmov [111011101] D [110000] vd[15-12 [101001] M [0] vm[3-0].
+  kThumb2Fmrs,       // vmov [111011100000] vn[19-16] rt[15-12] [1010] N [0010000].
+  kThumb2Fmsr,       // vmov [111011100001] vn[19-16] rt[15-12] [1010] N [0010000].
+  kThumb2Fmrrd,      // vmov [111011000100] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+  kThumb2Fmdrr,      // vmov [111011000101] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+  kThumb2Vabsd,      // vabs.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+  kThumb2Vabss,      // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+  kThumb2Vnegd,      // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+  kThumb2Vnegs,      // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+  kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
+  kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
+  kThumb2Mla,        // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0].
+  kThumb2Umull,      // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
+  kThumb2Ldrex,      // ldrex [111010000101] rn[19-16] rt[11-8] [1111] imm8[7-0].
+  kThumb2Strex,      // strex [111010000100] rn[19-16] rt[11-8] rd[11-8] imm8[7-0].
+  kThumb2Clrex,      // clrex [111100111011111110000111100101111].
+  kThumb2Bfi,        // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+  kThumb2Bfc,        // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+  kThumb2Dmb,        // dmb [1111001110111111100011110101] option[3-0].
+  kThumb2LdrPcReln12,// ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2Stm,        // stm <list> [111010010000] rn[19-16] 000 rl[12-0].
+  kThumbUndefined,   // undefined [11011110xxxxxxxx].
+  kThumb2VPopCS,     // vpop <list of callee save fp singles (s16+).
+  kThumb2VPushCS,    // vpush <list callee save fp singles (s16+).
+  kThumb2Vldms,      // vldms rd, <list>.
+  kThumb2Vstms,      // vstms rd, <list>.
+  kThumb2BUncond,    // b <label>.
+  kThumb2MovImm16H,  // similar to kThumb2MovImm16, but target high hw.
+  kThumb2AddPCR,     // Thumb2 2-operand add with hard-coded PC target.
+  kThumb2Adr,        // Special purpose encoding of ADR for switch tables.
+  kThumb2MovImm16LST,// Special purpose version for switch table use.
+  kThumb2MovImm16HST,// Special purpose version for switch table use.
+  kThumb2LdmiaWB,    // ldmia  [111010011001[ rn[19..16] mask[15..0].
+  kThumb2SubsRRI12,  // setflags encoding.
+  kThumb2OrrRRRs,    // orrx [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2Push1,      // t3 encoding of push.
+  kThumb2Pop1,       // t3 encoding of pop.
+  kThumb2RsubRRR,    // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+  kThumb2Smull,      // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
   kArmLast,
 };
 
-/* DMB option encodings */
 enum ArmOpDmbOptions {
   kSY = 0xf,
   kST = 0xe,
@@ -567,43 +457,40 @@
   kNSHST = 0x6
 };
 
-/* Bit flags describing the behavior of each native opcode */
-/* Instruction assembly field_loc kind */
+// Instruction assembly field_loc kind.
 enum ArmEncodingKind {
   kFmtUnused,
-  kFmtBitBlt,    /* Bit string using end/start */
-  kFmtDfp,       /* Double FP reg */
-  kFmtSfp,       /* Single FP reg */
-  kFmtModImm,    /* Shifted 8-bit immed using [26,14..12,7..0] */
-  kFmtImm16,     /* Zero-extended immed using [26,19..16,14..12,7..0] */
-  kFmtImm6,      /* Encoded branch target using [9,7..3]0 */
-  kFmtImm12,     /* Zero-extended immediate using [26,14..12,7..0] */
-  kFmtShift,     /* Shift descriptor, [14..12,7..4] */
-  kFmtLsb,       /* least significant bit using [14..12][7..6] */
-  kFmtBWidth,    /* bit-field width, encoded as width-1 */
-  kFmtShift5,    /* Shift count, [14..12,7..6] */
-  kFmtBrOffset,  /* Signed extended [26,11,13,21-16,10-0]:0 */
-  kFmtFPImm,     /* Encoded floating point immediate */
-  kFmtOff24,     /* 24-bit Thumb2 unconditional branch encoding */
+  kFmtBitBlt,    // Bit string using end/start.
+  kFmtDfp,       // Double FP reg.
+  kFmtSfp,       // Single FP reg.
+  kFmtModImm,    // Shifted 8-bit immed using [26,14..12,7..0].
+  kFmtImm16,     // Zero-extended immed using [26,19..16,14..12,7..0].
+  kFmtImm6,      // Encoded branch target using [9,7..3]0.
+  kFmtImm12,     // Zero-extended immediate using [26,14..12,7..0].
+  kFmtShift,     // Shift descriptor, [14..12,7..4].
+  kFmtLsb,       // least significant bit using [14..12][7..6].
+  kFmtBWidth,    // bit-field width, encoded as width-1.
+  kFmtShift5,    // Shift count, [14..12,7..6].
+  kFmtBrOffset,  // Signed extended [26,11,13,21-16,10-0]:0.
+  kFmtFPImm,     // Encoded floating point immediate.
+  kFmtOff24,     // 24-bit Thumb2 unconditional branch encoding.
 };
 
-/* Struct used to define the snippet positions for each Thumb opcode */
+// Struct used to define the snippet positions for each Thumb opcode.
 struct ArmEncodingMap {
   uint32_t skeleton;
   struct {
     ArmEncodingKind kind;
-    int end;   /* end for kFmtBitBlt, 1-bit slice end for FP regs */
-    int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
+    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
+    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
   } field_loc[4];
   ArmOpcode opcode;
   uint64_t flags;
   const char* name;
   const char* fmt;
-  int size;   /* Size in bytes */
+  int size;   // Note: size is in bytes.
 };
 
-extern const ArmEncodingMap EncodingMap[kArmLast];
-
 }  // namespace art
 
 #endif  // ART_SRC_COMPILER_CODEGEN_ARM_ARMLIR_H_
diff --git a/src/compiler/codegen/arm/assemble_arm.cc b/src/compiler/codegen/arm/assemble_arm.cc
index 8cb0b97..93c979b 100644
--- a/src/compiler/codegen/arm/assemble_arm.cc
+++ b/src/compiler/codegen/arm/assemble_arm.cc
@@ -15,6 +15,7 @@
  */
 
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../codegen_util.h"
 
 namespace art {
@@ -75,7 +76,7 @@
  *  [!] escape.  To insert "!", use "!!"
  */
 /* NOTE: must be kept in sync with enum ArmOpcode from LIR.h */
-const ArmEncodingMap EncodingMap[kArmLast] = {
+const ArmEncodingMap ArmCodegen::EncodingMap[kArmLast] = {
     ENCODING_MAP(kArm16BitData,    0x0000,
                  kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 2),
@@ -987,8 +988,7 @@
  * discover that pc-relative displacements may not fit the selected
  * instruction.
  */
-AssemblerStatus AssembleInstructions(CompilationUnit* cu,
-                    uintptr_t start_addr)
+AssemblerStatus ArmCodegen::AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr)
 {
   LIR* lir;
   AssemblerStatus res = kSuccess;  // Assume success
@@ -1361,7 +1361,7 @@
   return res;
 }
 
-int GetInsnSize(LIR* lir)
+int ArmCodegen::GetInsnSize(LIR* lir)
 {
   return EncodingMap[lir->opcode].size;
 }
@@ -1369,7 +1369,7 @@
 /*
  * Target-dependent offset assignment.
  */
-int AssignInsnOffsets(CompilationUnit* cu)
+int ArmCodegen::AssignInsnOffsets(CompilationUnit* cu)
 {
   LIR* arm_lir;
   int offset = 0;
diff --git a/src/compiler/codegen/arm/call_arm.cc b/src/compiler/codegen/arm/call_arm.cc
index 775b25d..950105c 100644
--- a/src/compiler/codegen/arm/call_arm.cc
+++ b/src/compiler/codegen/arm/call_arm.cc
@@ -19,6 +19,7 @@
 #include "oat_compilation_unit.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
@@ -37,7 +38,7 @@
  * there.  NOTE: all live arg registers must be locked prior to this call
  * to avoid having them allocated as a temp by downstream utilities.
  */
-RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc)
+RegLocation ArmCodegen::ArgLoc(CompilationUnit* cu, RegLocation loc)
 {
   int arg_num = InPosition(cu, loc.s_reg_low);
   if (loc.wide) {
@@ -67,15 +68,16 @@
  * the frame, we can't use the normal LoadValue() because it assumed
  * a proper frame - and we're frameless.
  */
-RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
+static RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
 {
+  Codegen* cg = cu->cg.get();
   if (loc.location == kLocDalvikFrame) {
     int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
     loc.low_reg = AllocTemp(cu);
-    LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
+    cg->LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
     if (loc.wide) {
       loc.high_reg = AllocTemp(cu);
-      LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
+      cg->LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
     }
     loc.location = kLocPhysReg;
   }
@@ -122,7 +124,8 @@
 }
 
 /* Used for the "verbose" listing */
-void GenPrintLabel(CompilationUnit *cu, MIR* mir)
+//TODO:  move to common code
+void ArmCodegen::GenPrintLabel(CompilationUnit *cu, MIR* mir)
 {
   /* Mark the beginning of a Dalvik instruction for line tracking */
   char* inst_str = cu->verbose ?
@@ -138,6 +141,7 @@
 static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
                         OpSize size, bool long_or_double, bool is_object)
 {
+  Codegen* cg = cu->cg.get();
   int field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
@@ -147,7 +151,7 @@
   }
   RegLocation rl_obj = GetSrc(cu, mir, 0);
   LockLiveArgs(cu, mir);
-  rl_obj = ArgLoc(cu, rl_obj);
+  rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
   RegLocation rl_dest;
   if (long_or_double) {
     rl_dest = GetReturnWide(cu, false);
@@ -155,16 +159,17 @@
     rl_dest = GetReturn(cu, false);
   }
   // Point of no return - no aborts after this
-  GenPrintLabel(cu, mir);
+  ArmCodegen::GenPrintLabel(cu, mir);
   rl_obj = LoadArg(cu, rl_obj);
-  GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
-          long_or_double, is_object);
+  cg->GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
+              long_or_double, is_object);
   return GetNextMir(cu, bb, mir);
 }
 
 static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
                         OpSize size, bool long_or_double, bool is_object)
 {
+  Codegen* cg = cu->cg.get();
   int field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
@@ -182,24 +187,25 @@
     rl_src = GetSrc(cu, mir, 0);
     rl_obj = GetSrc(cu, mir, 1);
   }
-  rl_src = ArgLoc(cu, rl_src);
-  rl_obj = ArgLoc(cu, rl_obj);
+  rl_src = ArmCodegen::ArgLoc(cu, rl_src);
+  rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
   // Reject if source is split across registers & frame
   if (rl_obj.location == kLocInvalid) {
     ResetRegPool(cu);
     return NULL;
   }
   // Point of no return - no aborts after this
-  GenPrintLabel(cu, mir);
+  ArmCodegen::GenPrintLabel(cu, mir);
   rl_obj = LoadArg(cu, rl_obj);
   rl_src = LoadArg(cu, rl_src);
-  GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
-          long_or_double, is_object);
+  cg->GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
+              long_or_double, is_object);
   return GetNextMir(cu, bb, mir);
 }
 
 static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_src;
   RegLocation rl_dest;
   bool wide = (mir->ssa_rep->num_uses == 2);
@@ -211,18 +217,18 @@
     rl_dest = GetReturn(cu, false);
   }
   LockLiveArgs(cu, mir);
-  rl_src = ArgLoc(cu, rl_src);
+  rl_src = ArmCodegen::ArgLoc(cu, rl_src);
   if (rl_src.location == kLocInvalid) {
     ResetRegPool(cu);
     return NULL;
   }
   // Point of no return - no aborts after this
-  GenPrintLabel(cu, mir);
+  ArmCodegen::GenPrintLabel(cu, mir);
   rl_src = LoadArg(cu, rl_src);
   if (wide) {
-    StoreValueWide(cu, rl_dest, rl_src);
+    cg->StoreValueWide(cu, rl_dest, rl_src);
   } else {
-    StoreValue(cu, rl_dest, rl_src);
+    cg->StoreValue(cu, rl_dest, rl_src);
   }
   return mir;
 }
@@ -230,8 +236,8 @@
 /*
  * Special-case code genration for simple non-throwing leaf methods.
  */
-void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                    SpecialCaseHandler special_case)
+void ArmCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case)
 {
    cu->current_dalvik_offset = mir->offset;
    MIR* next_mir = NULL;
@@ -241,7 +247,7 @@
        next_mir = mir;
        break;
      case kConstFunction:
-       GenPrintLabel(cu, mir);
+       ArmCodegen::GenPrintLabel(cu, mir);
        LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
        next_mir = GetNextMir(cu, &bb, mir);
        break;
@@ -292,7 +298,7 @@
    if (next_mir != NULL) {
     cu->current_dalvik_offset = next_mir->offset;
     if (special_case != kIdentity) {
-      GenPrintLabel(cu, next_mir);
+      ArmCodegen::GenPrintLabel(cu, next_mir);
     }
     NewLIR1(cu, kThumbBx, rARM_LR);
     cu->core_spill_mask = 0;
@@ -324,8 +330,7 @@
  *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
  *   cbnz  r_idx, lp
  */
-void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -363,7 +368,7 @@
   NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
   OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
   // Go if match. NOTE: No instruction set switch here - must stay Thumb2
-  OpIT(cu, kArmCondEq, "");
+  OpIT(cu, kCondEq, "");
   LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
   tab_rec->anchor = switch_branch;
   // Needs to use setflags encoding here
@@ -372,8 +377,7 @@
 }
 
 
-void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -429,7 +433,7 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   // Add the table to the list - we'll process it later
@@ -481,7 +485,7 @@
  * preserved.
  *
  */
-void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void ArmCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   DCHECK_EQ(LW_SHAPE_THIN, 0);
@@ -497,11 +501,11 @@
   NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
   NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
   OpRegImm(cu, kOpCmp, r1, 0);
-  OpIT(cu, kArmCondEq, "");
+  OpIT(cu, kCondEq, "");
   NewLIR4(cu, kThumb2Strex, r1, r2, r0,
           Object::MonitorOffset().Int32Value() >> 2);
   OpRegImm(cu, kOpCmp, r1, 0);
-  OpIT(cu, kArmCondNe, "T");
+  OpIT(cu, kCondNe, "T");
   // Go expensive route - artLockObjectFromCode(self, obj);
   LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
   ClobberCalleeSave(cu);
@@ -516,7 +520,7 @@
  * a zero recursion count, it's safe to punch it back to the
  * initial, unlock thin state with a store word.
  */
-void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void ArmCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   DCHECK_EQ(LW_SHAPE_THIN, 0);
   FlushAllRegs(cu);
@@ -532,7 +536,7 @@
   OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
   NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
   OpRegReg(cu, kOpSub, r1, r2);
-  OpIT(cu, kArmCondEq, "EE");
+  OpIT(cu, kCondEq, "EE");
   StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3);
   // Go expensive route - UnlockObjectFromCode(obj);
   LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
@@ -545,7 +549,7 @@
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void ArmCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
 {
   int reg_card_base = AllocTemp(cu);
   int reg_card_no = AllocTemp(cu);
@@ -560,8 +564,7 @@
   FreeTemp(cu, reg_card_no);
 }
 
-void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                      RegLocation rl_method)
+void ArmCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
 {
   int spill_count = cu->num_core_spills + cu->num_fp_spills;
   /*
@@ -614,7 +617,7 @@
   FreeTemp(cu, r3);
 }
 
-void GenExitSequence(CompilationUnit* cu)
+void ArmCodegen::GenExitSequence(CompilationUnit* cu)
 {
   int spill_count = cu->num_core_spills + cu->num_fp_spills;
   /*
diff --git a/src/compiler/codegen/arm/codegen_arm.h b/src/compiler/codegen/arm/codegen_arm.h
new file mode 100644
index 0000000..4737d8c
--- /dev/null
+++ b/src/compiler/codegen/arm/codegen_arm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_CODEGEN_ARM_CODEGENARM_H_
+#define ART_SRC_COMPILER_CODEGEN_ARM_CODEGENARM_H_
+
+#include "../../compiler_internals.h"
+
+namespace art {
+
+class ArmCodegen : public Codegen {
+  public:
+    // Required for target - codegen helpers.
+    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit);
+    virtual int LoadHelper(CompilationUnit* cu, int offset);
+    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg);
+    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+                                 OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg);
+    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+    virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+                                       int val_lo, int val_hi);
+    virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
+    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                               OpSize size);
+    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+                                   int r_src_hi);
+    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+                                 OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg);
+    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg);
+    virtual bool SameRegType(int reg1, int reg2);
+    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int S2d(int low_reg, int high_reg);
+    virtual int TargetReg(SpecialTargetRegister reg);
+    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegLocation LocCReturn();
+    virtual RegLocation LocCReturnDouble();
+    virtual RegLocation LocCReturnFloat();
+    virtual RegLocation LocCReturnWide();
+    virtual uint32_t FpRegMask();
+    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+    virtual void AdjustSpillMask(CompilationUnit* cu);
+    virtual void ClobberCalleeSave(CompilationUnit *cu);
+    virtual void FlushReg(CompilationUnit* cu, int reg);
+    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+    virtual void FreeCallTemps(CompilationUnit* cu);
+    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps(CompilationUnit* cu);
+    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual const char* GetTargetInstFmt(int opcode);
+    virtual const char* GetTargetInstName(int opcode);
+    virtual int AssignInsnOffsets(CompilationUnit* cu);
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    virtual uint64_t GetPCUseDefEncoding();
+    virtual uint64_t GetTargetInstFlags(int opcode);
+    virtual int GetInsnSize(LIR* lir);
+    virtual bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2);
+    virtual bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src);
+    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+    virtual bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2);
+    virtual bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind);
+    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div);
+    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div);
+    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+                                  RegLocation rl_method);
+    virtual void GenExitSequence(CompilationUnit* cu);
+    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+                                  RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double);
+    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit);
+    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case);
+
+    // Required for target - single operation generators.
+    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                             LIR* target);
+    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+                                LIR* target);
+    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+                                LIR* target);
+    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                             int r_src2);
+    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+                       int offset);
+    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+                               int src_hi);
+    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+    static RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc);
+    LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                          int r_dest_hi, OpSize size, int s_reg);
+    LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                           int r_src_hi, OpSize size);
+    static void GenPrintLabel(CompilationUnit *cu, MIR* mir);
+    static LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                                 int r_src2, int shift);
+    static LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
+                              int shift);
+    static const ArmEncodingMap EncodingMap[kArmLast];
+    static int EncodeShift(int code, int amount);
+    static int ModifiedImmediate(uint32_t value);
+    static ArmConditionCode ArmConditionEncoding(ConditionCode code);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_CODEGEN_ARM_CODEGENARM_H_
diff --git a/src/compiler/codegen/arm/fp_arm.cc b/src/compiler/codegen/arm/fp_arm.cc
index 46695b9..a9ea916 100644
--- a/src/compiler/codegen/arm/fp_arm.cc
+++ b/src/compiler/codegen/arm/fp_arm.cc
@@ -15,13 +15,14 @@
  */
 
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-bool GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                     RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kThumbBkpt;
   RegLocation rl_result;
@@ -63,8 +64,8 @@
   return false;
 }
 
-bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
-                      RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kThumbBkpt;
   RegLocation rl_result;
@@ -108,8 +109,8 @@
   return false;
 }
 
-bool GenConversion(CompilationUnit* cu, Instruction::Code opcode,
-                   RegLocation rl_dest, RegLocation rl_src)
+bool ArmCodegen::GenConversion(CompilationUnit* cu, Instruction::Code opcode,
+                               RegLocation rl_dest, RegLocation rl_src)
 {
   int op = kThumbBkpt;
   int src_reg;
@@ -161,8 +162,8 @@
   return false;
 }
 
-void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                         bool gt_bias, bool is_double)
+void ArmCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double)
 {
   LIR* label_list = cu->block_label_list;
   LIR* target = &label_list[bb->taken->id];
@@ -215,8 +216,8 @@
 }
 
 
-bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-        RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2)
 {
   bool is_double;
   int default_result;
@@ -261,12 +262,12 @@
   DCHECK(!ARM_FPREG(rl_result.low_reg));
   NewLIR0(cu, kThumb2Fmstat);
 
-  OpIT(cu, (default_result == -1) ? kArmCondGt : kArmCondMi, "");
+  OpIT(cu, (default_result == -1) ? kCondGt : kCondMi, "");
   NewLIR2(cu, kThumb2MovImmShift, rl_result.low_reg,
           ModifiedImmediate(-default_result)); // Must not alter ccodes
   GenBarrier(cu);
 
-  OpIT(cu, kArmCondEq, "");
+  OpIT(cu, kCondEq, "");
   LoadConstant(cu, rl_result.low_reg, 0);
   GenBarrier(cu);
 
@@ -274,7 +275,7 @@
   return false;
 }
 
-void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void ArmCodegen::GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValue(cu, rl_src, kFPReg);
@@ -283,7 +284,7 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void ArmCodegen::GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValueWide(cu, rl_src, kFPReg);
@@ -293,7 +294,7 @@
   StoreValueWide(cu, rl_dest, rl_result);
 }
 
-bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+bool ArmCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
   DCHECK_EQ(cu->instruction_set, kThumb2);
   LIR *branch;
   RegLocation rl_src = info->args[0];
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index 45fe807..0a6abd2 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -19,12 +19,13 @@
 #include "oat_compilation_unit.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
+LIR* ArmCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
          int src2, LIR* target)
 {
   OpRegReg(cu, kOpCmp, src1, src2);
@@ -41,14 +42,15 @@
  * met, and an "E" means the instruction is executed if the condition
  * is not met.
  */
-LIR* OpIT(CompilationUnit* cu, ArmConditionCode code, const char* guide)
+LIR* ArmCodegen::OpIT(CompilationUnit* cu, ConditionCode ccode, const char* guide)
 {
   int mask;
-  int cond_bit = code & 1;
-  int alt_bit = cond_bit ^ 1;
   int mask3 = 0;
   int mask2 = 0;
   int mask1 = 0;
+  ArmConditionCode code = ArmConditionEncoding(ccode);
+  int cond_bit = code & 1;
+  int alt_bit = cond_bit ^ 1;
 
   //Note: case fallthroughs intentional
   switch (strlen(guide)) {
@@ -84,8 +86,8 @@
  *     neg   rX
  * done:
  */
-void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
-        RegLocation rl_src1, RegLocation rl_src2)
+void ArmCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LIR* target1;
   LIR* target2;
@@ -99,7 +101,7 @@
   OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
   LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
 
-  OpIT(cu, kArmCondHi, "E");
+  OpIT(cu, kCondHi, "E");
   NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
   LoadConstant(cu, t_reg, 1);
   GenBarrier(cu);
@@ -119,7 +121,7 @@
   branch3->target = branch1->target;
 }
 
-void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void ArmCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
 {
   LIR* label_list = cu->block_label_list;
   LIR* taken = &label_list[bb->taken->id];
@@ -168,8 +170,8 @@
  * Generate a register comparison to an immediate and branch.  Caller
  * is responsible for setting branch target field.
  */
-LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
-          int check_value, LIR* target)
+LIR* ArmCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+                                LIR* target)
 {
   LIR* branch;
   int mod_imm;
@@ -194,12 +196,13 @@
   branch->target = target;
   return branch;
 }
-LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
+
+LIR* ArmCodegen::OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
 {
   LIR* res;
   int opcode;
   if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
-    return FpRegCopy(cu, r_dest, r_src);
+    return OpFpRegCopy(cu, r_dest, r_src);
   if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
     opcode = kThumbMovRR;
   else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
@@ -215,15 +218,15 @@
   return res;
 }
 
-LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+LIR* ArmCodegen::OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
 {
   LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
   AppendLIR(cu, res);
   return res;
 }
 
-void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi,
-               int src_lo, int src_hi)
+void ArmCodegen::OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+                               int src_hi)
 {
   bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
   bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
@@ -278,8 +281,8 @@
 };
 
 // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                        RegLocation rl_src, RegLocation rl_dest, int lit)
+bool ArmCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
     return false;
@@ -323,26 +326,28 @@
   return true;
 }
 
-LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+LIR* ArmCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
                     int reg1, int base, int offset, ThrowKind kind)
 {
   LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
   return NULL;
 }
 
-RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit, bool is_div)
+RegLocation ArmCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+                                     bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
   return rl_dest;
 }
 
-RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2, bool is_div)
+RegLocation ArmCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+                                  bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
   return rl_dest;
 }
 
-bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool ArmCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
 {
   DCHECK_EQ(cu->instruction_set, kThumb2);
   RegLocation rl_src1 = info->args[0];
@@ -352,7 +357,7 @@
   RegLocation rl_dest = InlineTarget(cu, info);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
-  OpIT(cu, (is_min) ? kArmCondGt : kArmCondLt, "E");
+  OpIT(cu, (is_min) ? kCondGt : kCondLt, "E");
   OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
   OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
   GenBarrier(cu);
@@ -360,17 +365,17 @@
   return true;
 }
 
-void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void ArmCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void ArmCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
 {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
-bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+bool ArmCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
   DCHECK_EQ(cu->instruction_set, kThumb2);
   // Unused - RegLocation rl_src_unsafe = info->args[0];
   RegLocation rl_src_obj= info->args[1];  // Object - known non-null
@@ -417,7 +422,7 @@
   OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
   FreeTemp(cu, r_old_value);  // Now unneeded.
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpIT(cu, kArmCondEq, "TE");
+  OpIT(cu, kCondEq, "TE");
   NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
   FreeTemp(cu, r_ptr);  // Now unneeded.
   OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
@@ -428,24 +433,24 @@
   return true;
 }
 
-LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
+LIR* ArmCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
 {
   return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
 }
 
-LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* ArmCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
 {
   return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
 }
 
-LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* ArmCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
 {
   return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
 }
 
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                   RegLocation rl_result, int lit,
-                                   int first_bit, int second_bit)
+void ArmCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit)
 {
   OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
                    EncodeShift(kArmLsl, second_bit - first_bit));
@@ -454,7 +459,7 @@
   }
 }
 
-void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void ArmCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
 {
   int t_reg = AllocTemp(cu);
   NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
@@ -463,21 +468,21 @@
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* ArmCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
 {
   NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
   return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
 }
 
 // Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* ArmCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
 {
   // Combine sub & test using sub setflags encoding here
   NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
   return OpCondBranch(cu, c_code, target);
 }
 
-void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
+void ArmCodegen::GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
   int dmb_flavor;
@@ -497,8 +502,7 @@
 #endif
 }
 
-bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src)
+bool ArmCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
@@ -519,36 +523,36 @@
   return false;
 }
 
-bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
   return false;
 }
 
-bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
   return false;
 }
 
-bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
   return false;
 }
 
-bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
-               RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
   return false;
 }
 
-bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
-               RegLocation rl_src1, RegLocation rl_src2)
+bool ArmCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of genXoLong for Arm";
   return false;
diff --git a/src/compiler/codegen/arm/target_arm.cc b/src/compiler/codegen/arm/target_arm.cc
index 9c12237..272dc46 100644
--- a/src/compiler/codegen/arm/target_arm.cc
+++ b/src/compiler/codegen/arm/target_arm.cc
@@ -16,6 +16,7 @@
 
 #include "../../compiler_internals.h"
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../ralloc_util.h"
 #include "../codegen_util.h"
 
@@ -34,32 +35,32 @@
 static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
                         fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
 
-RegLocation LocCReturn()
+RegLocation ArmCodegen::LocCReturn()
 {
   RegLocation res = ARM_LOC_C_RETURN;
   return res;
 }
 
-RegLocation LocCReturnWide()
+RegLocation ArmCodegen::LocCReturnWide()
 {
   RegLocation res = ARM_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation LocCReturnFloat()
+RegLocation ArmCodegen::LocCReturnFloat()
 {
   RegLocation res = ARM_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation LocCReturnDouble()
+RegLocation ArmCodegen::LocCReturnDouble()
 {
   RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int TargetReg(SpecialTargetRegister reg) {
+int ArmCodegen::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rARM_SELF; break;
@@ -85,37 +86,19 @@
 
 
 // Create a double from a pair of singles.
-int S2d(int low_reg, int high_reg)
+int ArmCodegen::S2d(int low_reg, int high_reg)
 {
   return ARM_S2D(low_reg, high_reg);
 }
 
-// Is reg a single or double?
-bool FpReg(int reg)
-{
-  return ARM_FPREG(reg);
-}
-
-// Is reg a single?
-bool SingleReg(int reg)
-{
-  return ARM_SINGLEREG(reg);
-}
-
-// Is reg a double?
-bool DoubleReg(int reg)
-{
-  return ARM_DOUBLEREG(reg);
-}
-
 // Return mask to strip off fp reg flags and bias.
-uint32_t FpRegMask()
+uint32_t ArmCodegen::FpRegMask()
 {
   return ARM_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool SameRegType(int reg1, int reg2)
+bool ArmCodegen::SameRegType(int reg1, int reg2)
 {
   return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2));
 }
@@ -123,7 +106,7 @@
 /*
  * Decode the register id.
  */
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t ArmCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
 {
   uint64_t seed;
   int shift;
@@ -140,17 +123,17 @@
   return (seed << shift);
 }
 
-uint64_t GetPCUseDefEncoding()
+uint64_t ArmCodegen::GetPCUseDefEncoding()
 {
   return ENCODE_ARM_REG_PC;
 }
 
-void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void ArmCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
 {
   DCHECK_EQ(cu->instruction_set, kThumb2);
 
   // Thumb2 specific setup
-  uint64_t flags = EncodingMap[lir->opcode].flags;
+  uint64_t flags = ArmCodegen::EncodingMap[lir->opcode].flags;
   int opcode = lir->opcode;
 
   if (flags & REG_DEF_SP) {
@@ -221,7 +204,7 @@
   }
 }
 
-ArmConditionCode ArmConditionEncoding(ConditionCode ccode)
+ArmConditionCode ArmCodegen::ArmConditionEncoding(ConditionCode ccode)
 {
   ArmConditionCode res;
   switch (ccode) {
@@ -334,7 +317,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.c.
  */
-std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
+std::string ArmCodegen::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
 {
   std::string buf;
   int i;
@@ -473,7 +456,7 @@
   return buf;
 }
 
-void DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
+void ArmCodegen::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -519,30 +502,21 @@
   }
 }
 
-bool BranchUnconditional(LIR* lir)
+bool ArmCodegen::IsUnconditionalBranch(LIR* lir)
 {
   return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
 }
 
-/* Common initialization routine for an architecture family */
-bool ArchInit()
+bool InitArmCodegen(CompilationUnit* cu)
 {
-  int i;
-
-  for (i = 0; i < kArmLast; i++) {
-    if (EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
+  cu->cg.reset(new ArmCodegen());
+  for (int i = 0; i < kArmLast; i++) {
+    if (ArmCodegen::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << ArmCodegen::EncodingMap[i].name
                  << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(EncodingMap[i].opcode);
+                 << static_cast<int>(ArmCodegen::EncodingMap[i].opcode);
     }
   }
-
-  return ArchVariantInit();
-}
-
-/* Architecture-specific initializations and checks go here */
-bool ArchVariantInit(void)
-{
   return true;
 }
 
@@ -550,7 +524,7 @@
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
+int ArmCodegen::AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
 {
   int high_reg;
   int low_reg;
@@ -567,14 +541,14 @@
   return res;
 }
 
-int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
+int ArmCodegen::AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
 {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
     return AllocTempFloat(cu);
   return AllocTemp(cu);
 }
 
-void CompilerInitializeRegAlloc(CompilationUnit* cu)
+void ArmCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
 {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
@@ -629,7 +603,7 @@
   }
 }
 
-void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+void ArmCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
                      RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
@@ -645,7 +619,7 @@
  * machinery is in place, always spill lr.
  */
 
-void AdjustSpillMask(CompilationUnit* cu)
+void ArmCodegen::AdjustSpillMask(CompilationUnit* cu)
 {
   cu->core_spill_mask |= (1 << rARM_LR);
   cu->num_core_spills++;
@@ -657,7 +631,7 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+void ArmCodegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
 {
   DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
   reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
@@ -673,7 +647,7 @@
   cu->fp_spill_mask = ((1 << cu->num_fp_spills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
 }
 
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void ArmCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
 {
   RegisterInfo* info1 = GetRegInfo(cu, reg1);
   RegisterInfo* info2 = GetRegInfo(cu, reg2);
@@ -696,7 +670,7 @@
   }
 }
 
-void FlushReg(CompilationUnit* cu, int reg)
+void ArmCodegen::FlushReg(CompilationUnit* cu, int reg)
 {
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
@@ -707,12 +681,12 @@
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool IsFpReg(int reg) {
+bool ArmCodegen::IsFpReg(int reg) {
   return ARM_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cu)
+void ArmCodegen::ClobberCalleeSave(CompilationUnit *cu)
 {
   Clobber(cu, r0);
   Clobber(cu, r1);
@@ -738,7 +712,7 @@
   Clobber(cu, fr15);
 }
 
-RegLocation GetReturnWideAlt(CompilationUnit* cu)
+RegLocation ArmCodegen::GetReturnWideAlt(CompilationUnit* cu)
 {
   RegLocation res = LocCReturnWide();
   res.low_reg = r2;
@@ -751,7 +725,7 @@
   return res;
 }
 
-RegLocation GetReturnAlt(CompilationUnit* cu)
+RegLocation ArmCodegen::GetReturnAlt(CompilationUnit* cu)
 {
   RegLocation res = LocCReturn();
   res.low_reg = r1;
@@ -760,14 +734,14 @@
   return res;
 }
 
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
+RegisterInfo* ArmCodegen::GetRegInfo(CompilationUnit* cu, int reg)
 {
   return ARM_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & ARM_FP_REG_MASK]
       : &cu->reg_pool->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu)
+void ArmCodegen::LockCallTemps(CompilationUnit* cu)
 {
   LockTemp(cu, r0);
   LockTemp(cu, r1);
@@ -776,7 +750,7 @@
 }
 
 /* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cu)
+void ArmCodegen::FreeCallTemps(CompilationUnit* cu)
 {
   FreeTemp(cu, r0);
   FreeTemp(cu, r1);
@@ -784,25 +758,25 @@
   FreeTemp(cu, r3);
 }
 
-int LoadHelper(CompilationUnit* cu, int offset)
+int ArmCodegen::LoadHelper(CompilationUnit* cu, int offset)
 {
   LoadWordDisp(cu, rARM_SELF, offset, rARM_LR);
   return rARM_LR;
 }
 
-uint64_t GetTargetInstFlags(int opcode)
+uint64_t ArmCodegen::GetTargetInstFlags(int opcode)
 {
-  return EncodingMap[opcode].flags;
+  return ArmCodegen::EncodingMap[opcode].flags;
 }
 
-const char* GetTargetInstName(int opcode)
+const char* ArmCodegen::GetTargetInstName(int opcode)
 {
-  return EncodingMap[opcode].name;
+  return ArmCodegen::EncodingMap[opcode].name;
 }
 
-const char* GetTargetInstFmt(int opcode)
+const char* ArmCodegen::GetTargetInstFmt(int opcode)
 {
-  return EncodingMap[opcode].fmt;
+  return ArmCodegen::EncodingMap[opcode].fmt;
 }
 
 }  // namespace art
diff --git a/src/compiler/codegen/arm/utility_arm.cc b/src/compiler/codegen/arm/utility_arm.cc
index b064135..d1bf14e 100644
--- a/src/compiler/codegen/arm/utility_arm.cc
+++ b/src/compiler/codegen/arm/utility_arm.cc
@@ -15,6 +15,7 @@
  */
 
 #include "arm_lir.h"
+#include "codegen_arm.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
@@ -57,7 +58,7 @@
   }
   LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrs,
                           r_dest, r15pc, 0, 0, 0, data_target);
-  SetMemRefType(load_pc_rel, true, kLiteral);
+  SetMemRefType(cu, load_pc_rel, true, kLiteral);
   load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
   AppendLIR(cu, load_pc_rel);
   return load_pc_rel;
@@ -86,7 +87,7 @@
  * Determine whether value can be encoded as a Thumb2 modified
  * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
  */
-int ModifiedImmediate(uint32_t value)
+int ArmCodegen::ModifiedImmediate(uint32_t value)
 {
    int z_leading;
    int z_trailing;
@@ -124,7 +125,7 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
+LIR* ArmCodegen::LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
 {
   LIR* res;
   int mod_imm;
@@ -160,7 +161,7 @@
   }
   LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset,
                           kThumb2LdrPcRel12, r_dest, 0, 0, 0, 0, data_target);
-  SetMemRefType(load_pc_rel, true, kLiteral);
+  SetMemRefType(cu, load_pc_rel, true, kLiteral);
   load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
   res = load_pc_rel;
   AppendLIR(cu, load_pc_rel);
@@ -175,13 +176,14 @@
   return res;
 }
 
-LIR* OpBranchUnconditional(CompilationUnit* cu, OpKind op)
+LIR* ArmCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
 {
-  DCHECK_EQ(op, kOpUncondBr);
-  return NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched */);
+  LIR* res = NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched  during assembly*/);
+  res->target = target;
+  return res;
 }
 
-LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* ArmCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
 {
   LIR* branch = NewLIR2(cu, kThumb2BCond, 0 /* offset to be patched */,
                         ArmConditionEncoding(cc));
@@ -189,7 +191,7 @@
   return branch;
 }
 
-LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
+LIR* ArmCodegen::OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
 {
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
@@ -202,8 +204,8 @@
   return NewLIR1(cu, opcode, r_dest_src);
 }
 
-LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1,
-                   int r_src2, int shift)
+LIR* ArmCodegen::OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
+                               int shift)
 {
   bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
   ArmOpcode opcode = kThumbBkpt;
@@ -318,13 +320,13 @@
   }
 }
 
-LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* ArmCodegen::OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
 {
   return OpRegRegShift(cu, op, r_dest_src1, r_src2, 0);
 }
 
-LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-            int r_src2, int shift)
+LIR* ArmCodegen::OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                                  int r_src2, int shift)
 {
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
@@ -390,14 +392,12 @@
   }
 }
 
-LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                 int r_src2)
+LIR* ArmCodegen::OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2)
 {
   return OpRegRegRegShift(cu, op, r_dest, r_src1, r_src2, 0);
 }
 
-LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                 int value)
+LIR* ArmCodegen::OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value)
 {
   LIR* res;
   bool neg = (value < 0);
@@ -518,7 +518,7 @@
 }
 
 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
+LIR* ArmCodegen::OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
 {
   bool neg = (value < 0);
   int abs_value = (neg) ? -value : value;
@@ -597,8 +597,8 @@
   return res;
 }
 
-LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
-               int val_lo, int val_hi)
+LIR* ArmCodegen::LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+                                       int val_lo, int val_hi)
 {
   int encoded_imm = EncodeImmDouble(val_lo, val_hi);
   LIR* res;
@@ -614,7 +614,7 @@
       LIR* load_pc_rel =
           RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrd,
                  S2d(r_dest_lo, r_dest_hi), r15pc, 0, 0, 0, data_target);
-      SetMemRefType(load_pc_rel, true, kLiteral);
+      SetMemRefType(cu, load_pc_rel, true, kLiteral);
       load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
       AppendLIR(cu, load_pc_rel);
       res = load_pc_rel;
@@ -626,12 +626,12 @@
   return res;
 }
 
-int EncodeShift(int code, int amount) {
+int ArmCodegen::EncodeShift(int code, int amount) {
   return ((amount & 0x1f) << 2) | code;
 }
 
-LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
-                     int scale, OpSize size)
+LIR* ArmCodegen::LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
+                                 int scale, OpSize size)
 {
   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
   LIR* load;
@@ -695,8 +695,8 @@
   return load;
 }
 
-LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
-                      int scale, OpSize size)
+LIR* ArmCodegen::StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
+                                  int scale, OpSize size)
 {
   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
   LIR* store;
@@ -761,10 +761,10 @@
  * on base (which must have an associated s_reg and MIR).  If not
  * performing null check, incoming MIR can be null.
  */
-LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase,
-                      int displacement, int r_dest, int r_dest_hi, OpSize size,
-                      int s_reg)
+LIR* ArmCodegen::LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                                  int r_dest_hi, OpSize size, int s_reg)
 {
+  Codegen* cg = cu->cg.get();
   LIR* res;
   LIR* load;
   ArmOpcode opcode = kThumbBkpt;
@@ -780,7 +780,7 @@
       if (ARM_FPREG(r_dest)) {
         if (ARM_SINGLEREG(r_dest)) {
           DCHECK(ARM_FPREG(r_dest_hi));
-          r_dest = S2d(r_dest, r_dest_hi);
+          r_dest = cg->S2d(r_dest, r_dest_hi);
         }
         opcode = kThumb2Vldrd;
         if (displacement <= 1020) {
@@ -865,36 +865,34 @@
     load = res = NewLIR3(cu, opcode, r_dest, rBase, encoded_disp);
   } else {
     int reg_offset = AllocTemp(cu);
-    res = LoadConstant(cu, reg_offset, encoded_disp);
-    load = LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
+    res = cg->LoadConstant(cu, reg_offset, encoded_disp);
+    load = cg->LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
     FreeTemp(cu, reg_offset);
   }
 
   // TODO: in future may need to differentiate Dalvik accesses w/ spills
   if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
+    AnnotateDalvikRegAccess(cu, load, displacement >> 2, true /* is_load */, is64bit);
   }
   return load;
 }
 
-LIR* LoadBaseDisp(CompilationUnit* cu, int rBase,
-                  int displacement, int r_dest, OpSize size, int s_reg)
+LIR* ArmCodegen::LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size,
-                          s_reg);
+  return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size, s_reg);
 }
 
- LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase,
-                       int displacement, int r_dest_lo, int r_dest_hi, int s_reg)
+LIR* ArmCodegen::LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi,
-                          kLong, s_reg);
+  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
 
-LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
-                       int r_src, int r_src_hi, OpSize size)
-{
+LIR* ArmCodegen::StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
+                                   int r_src, int r_src_hi, OpSize size) {
+  Codegen* cg = cu->cg.get();
   LIR* res, *store;
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
@@ -913,7 +911,7 @@
       }
       if (ARM_SINGLEREG(r_src)) {
         DCHECK(ARM_FPREG(r_src_hi));
-        r_src = S2d(r_src, r_src_hi);
+        r_src = cg->S2d(r_src, r_src_hi);
       }
       opcode = kThumb2Vstrd;
       if (displacement <= 1020) {
@@ -971,37 +969,36 @@
     store = res = NewLIR3(cu, opcode, r_src, rBase, encoded_disp);
   } else {
     int r_scratch = AllocTemp(cu);
-    res = LoadConstant(cu, r_scratch, encoded_disp);
-    store = StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
+    res = cg->LoadConstant(cu, r_scratch, encoded_disp);
+    store = cg->StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
     FreeTemp(cu, r_scratch);
   }
 
   // TODO: In future, may need to differentiate Dalvik & spill accesses
   if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */,
-                            is64bit);
+    AnnotateDalvikRegAccess(cu, store, displacement >> 2, false /* is_load */, is64bit);
   }
   return res;
 }
 
-LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement,
-                   int r_src, OpSize size)
+LIR* ArmCodegen::StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                               OpSize size)
 {
   return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
 }
 
-LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
-                       int r_src_lo, int r_src_hi)
+LIR* ArmCodegen::StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
+                                   int r_src_lo, int r_src_hi)
 {
   return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg)
+void ArmCodegen::LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg)
 {
   LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
 }
 
-LIR* FpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+LIR* ArmCodegen::OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
 {
   int opcode;
   DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
@@ -1022,38 +1019,35 @@
   return res;
 }
 
-LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* ArmCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
 {
   LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
   return NULL;
 }
 
-LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* ArmCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
 {
   LOG(FATAL) << "Unexpected use of OpMem for Arm";
   return NULL;
 }
 
-LIR* StoreBaseIndexedDisp(CompilationUnit *cu,
-                          int rBase, int r_index, int scale, int displacement,
-                          int r_src, int r_src_hi,
-                          OpSize size, int s_reg)
+LIR* ArmCodegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg)
 {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
   return NULL;
 }
 
-LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
-              int offset)
+LIR* ArmCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
   return NULL;
 }
 
-LIR* LoadBaseIndexedDisp(CompilationUnit *cu,
-                         int rBase, int r_index, int scale, int displacement,
-                         int r_dest, int r_dest_hi,
-                         OpSize size, int s_reg)
+LIR* ArmCodegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg)
 {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
   return NULL;
diff --git a/src/compiler/codegen/codegen.h b/src/compiler/codegen/codegen.h
new file mode 100644
index 0000000..9bc306d
--- /dev/null
+++ b/src/compiler/codegen/codegen.h
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_CODEGEN_CODEGEN_H_
+#define ART_SRC_COMPILER_CODEGEN_CODEGEN_H_
+
+#include "../compiler_ir.h"
+
+namespace art {
+
+// Set to 1 to measure cost of suspend check.
+#define NO_SUSPEND 0
+
+#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
+#define IS_BRANCH            (1ULL << kIsBranch)
+#define IS_IT                (1ULL << kIsIT)
+#define IS_LOAD              (1ULL << kMemLoad)
+#define IS_QUAD_OP           (1ULL << kIsQuadOp)
+#define IS_QUIN_OP           (1ULL << kIsQuinOp)
+#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
+#define IS_STORE             (1ULL << kMemStore)
+#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
+#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
+#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
+#define NO_OPERAND           (1ULL << kNoOperand)
+#define REG_DEF0             (1ULL << kRegDef0)
+#define REG_DEF1             (1ULL << kRegDef1)
+#define REG_DEFA             (1ULL << kRegDefA)
+#define REG_DEFD             (1ULL << kRegDefD)
+#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
+#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
+#define REG_DEF_LIST0        (1ULL << kRegDefList0)
+#define REG_DEF_LIST1        (1ULL << kRegDefList1)
+#define REG_DEF_LR           (1ULL << kRegDefLR)
+#define REG_DEF_SP           (1ULL << kRegDefSP)
+#define REG_USE0             (1ULL << kRegUse0)
+#define REG_USE1             (1ULL << kRegUse1)
+#define REG_USE2             (1ULL << kRegUse2)
+#define REG_USE3             (1ULL << kRegUse3)
+#define REG_USE4             (1ULL << kRegUse4)
+#define REG_USEA             (1ULL << kRegUseA)
+#define REG_USEC             (1ULL << kRegUseC)
+#define REG_USED             (1ULL << kRegUseD)
+#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
+#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
+#define REG_USE_LIST0        (1ULL << kRegUseList0)
+#define REG_USE_LIST1        (1ULL << kRegUseList1)
+#define REG_USE_LR           (1ULL << kRegUseLR)
+#define REG_USE_PC           (1ULL << kRegUsePC)
+#define REG_USE_SP           (1ULL << kRegUseSP)
+#define SETS_CCODES          (1ULL << kSetsCCodes)
+#define USES_CCODES          (1ULL << kUsesCCodes)
+
+// Common combo register usage patterns.
+#define REG_DEF01            (REG_DEF0 | REG_DEF1)
+#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
+#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
+#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
+#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
+#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
+#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
+#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
+#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
+#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
+#define REG_USE012           (REG_USE01 | REG_USE2)
+#define REG_USE014           (REG_USE01 | REG_USE4)
+#define REG_USE01            (REG_USE0 | REG_USE1)
+#define REG_USE02            (REG_USE0 | REG_USE2)
+#define REG_USE12            (REG_USE1 | REG_USE2)
+#define REG_USE23            (REG_USE2 | REG_USE3)
+
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
+                            uint32_t method_idx, uintptr_t direct_code,
+                            uintptr_t direct_method, InvokeType type);
+
+// Target-specific initialization.
+bool InitArmCodegen(CompilationUnit* cu);
+bool InitMipsCodegen(CompilationUnit* cu);
+bool InitX86Codegen(CompilationUnit* cu);
+
+class Codegen {
+
+  public:
+
+    virtual ~Codegen(){};
+
+    // Shared by all targets - implemented in gen_common.cc.
+    void HandleSuspendLaunchPads(CompilationUnit *cu);
+    void HandleIntrinsicLaunchPads(CompilationUnit *cu);
+    void HandleThrowLaunchPads(CompilationUnit *cu);
+    void GenBarrier(CompilationUnit* cu);
+    LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind);
+    LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
+                       ThrowKind kind);
+    LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags);
+    LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
+                        ThrowKind kind);
+    void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src1,
+                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
+    void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src,
+                                 LIR* taken, LIR* fall_through);
+    void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src);
+    void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+                     RegLocation rl_src);
+    void GenFilledNewArray(CompilationUnit* cu, CallInfo* info);
+    void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
+                 bool is_long_or_double, bool is_object);
+    void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+                 bool is_long_or_double, bool is_object);
+    void GenShowTarget(CompilationUnit* cu);
+    void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+    void GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest);
+    void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+    void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
+    void GenThrow(CompilationUnit* cu, RegLocation rl_src);
+    void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+                       RegLocation rl_src);
+    void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
+    void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+                        RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_dest, int scale);
+    void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest,
+                      RegLocation rl_src1, RegLocation rl_src2);
+    bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_shift);
+    bool GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                       RegLocation rl_src1, RegLocation rl_src2);
+    bool GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src, int lit);
+    bool GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_src2);
+    bool GenConversionCall(CompilationUnit* cu, int func_offset, RegLocation rl_dest,
+                           RegLocation rl_src);
+    bool GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    bool GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    bool GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src);
+    void GenSuspendTest(CompilationUnit* cu, int opt_flags);
+    void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target);
+
+    // Shared by all targets - implemented in gen_invoke.cc.
+    int CallHelperSetup(CompilationUnit* cu, int helper_offset);
+    LIR* CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc);
+    void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+                                         RegLocation arg1, bool safepoint_pc);
+    void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+                                         int arg1, bool safepoint_pc);
+    void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
+                                    bool safepoint_pc);
+    void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+                                                 RegLocation arg0, RegLocation arg1,
+                                                 bool safepoint_pc);
+    void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                    int arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+                                               RegLocation arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+                                                    int arg0, RegLocation arg1, RegLocation arg2,
+                                                    bool safepoint_pc);
+    void GenInvoke(CompilationUnit* cu, CallInfo* info);
+    void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
+    int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
+                             NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                             bool skip_this);
+    int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
+                           NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                           bool skip_this);
+    RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info);
+    RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info);
+    CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
+                             bool is_range);
+    bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info);
+    bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty);
+    bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info);
+    bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info);
+    bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info);
+    bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info);
+    bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based);
+    bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info);
+    bool GenIntrinsic(CompilationUnit* cu, CallInfo* info);
+
+    // Shared by all targets - implemented in gen_loadstore.cc.
+    RegLocation LoadCurrMethod(CompilationUnit *cu);
+    void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt);
+    LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value);
+    LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest);
+    RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+    RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+    void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+    void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+    void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+    void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+    LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src);
+    void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+
+    // Required for target - codegen helpers.
+    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+    virtual int LoadHelper(CompilationUnit* cu, int offset) = 0;
+    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg) = 0;
+    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg) = 0;
+    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+                                 OpSize size) = 0;
+    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg) = 0;
+    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value) = 0;
+    virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+                                       int val_lo, int val_hi) = 0;
+    virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg) = 0;
+    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                               OpSize size) = 0;
+    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+                                   int r_src_hi) = 0;
+    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+                                 OpSize size) = 0;
+    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg) = 0;
+    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg) = 0;
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg) = 0;
+    virtual bool SameRegType(int reg1, int reg2) = 0;
+    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
+    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
+    virtual int S2d(int low_reg, int high_reg) = 0;
+    virtual int TargetReg(SpecialTargetRegister reg) = 0;
+    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg) = 0;
+    virtual RegLocation GetReturnAlt(CompilationUnit* cu) = 0;
+    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu) = 0;
+    virtual RegLocation LocCReturn() = 0;
+    virtual RegLocation LocCReturnDouble() = 0;
+    virtual RegLocation LocCReturnFloat() = 0;
+    virtual RegLocation LocCReturnWide() = 0;
+    virtual uint32_t FpRegMask() = 0;
+    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg) = 0;
+    virtual void AdjustSpillMask(CompilationUnit* cu) = 0;
+    virtual void ClobberCalleeSave(CompilationUnit *cu) = 0;
+    virtual void FlushReg(CompilationUnit* cu, int reg) = 0;
+    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2) = 0;
+    virtual void FreeCallTemps(CompilationUnit* cu) = 0;
+    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free) = 0;
+    virtual void LockCallTemps(CompilationUnit* cu) = 0;
+    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg) = 0;
+    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu) = 0;
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr) = 0;
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
+    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir) = 0;
+    virtual const char* GetTargetInstFmt(int opcode) = 0;
+    virtual const char* GetTargetInstName(int opcode) = 0;
+    virtual int AssignInsnOffsets(CompilationUnit* cu) = 0;
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
+    virtual uint64_t GetPCUseDefEncoding() = 0;
+    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
+    virtual int GetInsnSize(LIR* lir) = 0;
+    virtual bool IsUnconditionalBranch(LIR* lir) = 0;
+
+    // Required for target - Dalvik-level generators.
+    virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2) = 0;
+    virtual bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) = 0;
+    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) = 0;
+    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min) = 0;
+    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) = 0;
+    virtual bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2) = 0;
+    virtual bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind) = 0;
+    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div) = 0;
+    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div) = 0;
+    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi) = 0;
+    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+                                  RegLocation rl_method) = 0;
+    virtual void GenExitSequence(CompilationUnit* cu) = 0;
+    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+                                  RegLocation rl_src) = 0;
+    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) = 0;
+    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind) = 0;
+    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit) = 0;
+    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case) = 0;
+
+    // Required for target - single operation generators.
+    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target) = 0;
+    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                             LIR* target) = 0;
+    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+                                LIR* target) = 0;
+    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target) = 0;
+    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+                                LIR* target) = 0;
+    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
+    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide) = 0;
+    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp) = 0;
+    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) = 0;
+    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src) = 0;
+    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
+    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src) = 0;
+    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value) = 0;
+    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset) = 0;
+    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2) = 0;
+    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value) = 0;
+    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                             int r_src2) = 0;
+    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target) = 0;
+    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset) = 0;
+    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count) = 0;
+    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count) = 0;
+    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+                       int offset) = 0;
+    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+                               int src_hi) = 0;
+    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val) = 0;
+    };  // Class Codegen
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_CODEGEN_CODEGEN_H_
diff --git a/src/compiler/codegen/codegen_util.cc b/src/compiler/codegen/codegen_util.cc
index 9af5578..cf69ff9 100644
--- a/src/compiler/codegen/codegen_util.cc
+++ b/src/compiler/codegen/codegen_util.cc
@@ -23,17 +23,36 @@
 
 namespace art {
 
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst)
+{
+  inst->def_mask = ENCODE_ALL;
+  LIR* safepoint_pc = NewLIR0(cu, kPseudoSafepointPC);
+  DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
+}
+
+bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
+                  int& field_offset, bool& is_volatile, bool is_put)
+{
+  OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+               *cu->dex_file,
+               cu->code_item, cu->method_idx,
+               cu->access_flags);
+  return cu->compiler->ComputeInstanceFieldInfo(field_idx, &m_unit,
+           field_offset, is_volatile, is_put);
+}
+
 /* Convert an instruction to a NOP */
 void NopLIR( LIR* lir)
 {
   lir->flags.is_nop = true;
 }
 
-void SetMemRefType(LIR* lir, bool is_load, int mem_type)
+void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type)
 {
   uint64_t *mask_ptr;
   uint64_t mask = ENCODE_MEM;;
-  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
+  Codegen* cg = cu->cg.get();
+  DCHECK(cg->GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
   if (is_load) {
     mask_ptr = &lir->use_mask;
   } else {
@@ -55,7 +74,7 @@
       break;
     case kMustNotAlias:
       /* Currently only loads can be marked as kMustNotAlias */
-      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
+      DCHECK(!(cg->GetTargetInstFlags(lir->opcode) & IS_STORE));
       *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
       break;
     default:
@@ -66,9 +85,9 @@
 /*
  * Mark load/store instructions that access Dalvik registers through the stack.
  */
-void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit)
+void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit)
 {
-  SetMemRefType(lir, is_load, kDalvikReg);
+  SetMemRefType(cu, lir, is_load, kDalvikReg);
 
   /*
    * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
@@ -82,7 +101,8 @@
  */
 void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg)
 {
-  *mask |= GetRegMaskCommon(cu, reg);
+  Codegen* cg = cu->cg.get();
+  *mask |= cg->GetRegMaskCommon(cu, reg);
 }
 
 /*
@@ -91,25 +111,26 @@
 void SetupResourceMasks(CompilationUnit* cu, LIR* lir)
 {
   int opcode = lir->opcode;
+  Codegen* cg = cu->cg.get();
 
   if (opcode <= 0) {
     lir->use_mask = lir->def_mask = 0;
     return;
   }
 
-  uint64_t flags = GetTargetInstFlags(opcode);
+  uint64_t flags = cg->GetTargetInstFlags(opcode);
 
   if (flags & NEEDS_FIXUP) {
     lir->flags.pcRelFixup = true;
   }
 
   /* Get the starting size of the instruction's template */
-  lir->flags.size = GetInsnSize(lir);
+  lir->flags.size = cg->GetInsnSize(lir);
 
   /* Set up the mask for resources that are updated */
   if (flags & (IS_LOAD | IS_STORE)) {
     /* Default to heap - will catch specialized classes later */
-    SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
+    SetMemRefType(cu, lir, flags & IS_LOAD, kHeapRef);
   }
 
   /*
@@ -149,7 +170,7 @@
   }
 
   // Handle target-specific actions
-  SetupTargetResourceMasks(cu, lir);
+  cg->SetupTargetResourceMasks(cu, lir);
 }
 
 /*
@@ -164,6 +185,7 @@
   int offset = lir->offset;
   int dest = lir->operands[0];
   const bool dump_nop = (cu->enable_debug & (1 << kDebugShowNops));
+  Codegen* cg = cu->cg.get();
 
   /* Handle pseudo-ops individually, and all regular insns as a group */
   switch (lir->opcode) {
@@ -228,10 +250,10 @@
       if (lir->flags.is_nop && !dump_nop) {
         break;
       } else {
-        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
-                                            lir, base_addr));
-        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
-                                                lir, base_addr));
+        std::string op_name(cg->BuildInsnString(cg->GetTargetInstName(lir->opcode),
+                                               lir, base_addr));
+        std::string op_operands(cg->BuildInsnString(cg->GetTargetInstFmt(lir->opcode),
+                                                    lir, base_addr));
         LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
                                   reinterpret_cast<unsigned int>(base_addr + offset),
                                   op_name.c_str(), op_operands.c_str(),
@@ -250,12 +272,13 @@
 
 void DumpPromotionMap(CompilationUnit *cu)
 {
+  Codegen* cg = cu->cg.get();
   int num_regs = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
   for (int i = 0; i < num_regs; i++) {
     PromotionMap v_reg_map = cu->promotion_map[i];
     std::string buf;
     if (v_reg_map.fp_location == kLocPhysReg) {
-      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
+      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & cg->FpRegMask());
     }
 
     std::string buf3;
@@ -359,8 +382,9 @@
  */
 LIR* NewLIR0(CompilationUnit* cu, int opcode)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & NO_OPERAND))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode);
@@ -371,8 +395,9 @@
 LIR* NewLIR1(CompilationUnit* cu, int opcode,
                int dest)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_UNARY_OP))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest);
@@ -383,8 +408,9 @@
 LIR* NewLIR2(CompilationUnit* cu, int opcode,
                int dest, int src1)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_BINARY_OP))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1);
@@ -395,8 +421,9 @@
 LIR* NewLIR3(CompilationUnit* cu, int opcode,
                int dest, int src1, int src2)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2);
@@ -407,8 +434,9 @@
 LIR* NewLIR4(CompilationUnit* cu, int opcode,
       int dest, int src1, int src2, int info)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUAD_OP))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info);
@@ -419,8 +447,9 @@
 LIR* NewLIR5(CompilationUnit* cu, int opcode,
        int dest, int src1, int src2, int info1, int info2)
 {
-  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
+  Codegen* cg = cu->cg.get();
+  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUIN_OP))
+      << cg->GetTargetInstName(opcode) << " " << opcode << " "
       << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
       << cu->current_dalvik_offset;
   LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info1, info2);
@@ -840,7 +869,8 @@
  */
 static void AssignOffsets(CompilationUnit* cu)
 {
-  int offset = AssignInsnOffsets(cu);
+  Codegen* cg = cu->cg.get();
+  int offset = cg->AssignInsnOffsets(cu);
 
   /* Const values have to be word aligned */
   offset = (offset + 3) & ~3;
@@ -864,6 +894,7 @@
  */
 void AssembleLIR(CompilationUnit* cu)
 {
+  Codegen* cg = cu->cg.get();
   AssignOffsets(cu);
   /*
    * Assemble here.  Note that we generate code with optimistic assumptions
@@ -871,7 +902,7 @@
    */
 
   while (true) {
-    AssemblerStatus res = AssembleInstructions(cu, 0);
+    AssemblerStatus res = cg->AssembleInstructions(cu, 0);
     if (res == kSuccess) {
       break;
     } else {
diff --git a/src/compiler/codegen/codegen_util.h b/src/compiler/codegen/codegen_util.h
index 380203a..6a9b6cd 100644
--- a/src/compiler/codegen/codegen_util.h
+++ b/src/compiler/codegen/codegen_util.h
@@ -19,19 +19,23 @@
 
 namespace art {
 
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst);
+bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
+                  int& field_offset, bool& is_volatile, bool is_put);
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
 inline int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast<const int32_t*>(switch_data); }
 inline RegisterClass oat_reg_class_by_size(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || size == kSignedByte ) ? kCoreReg : kAnyReg; }
 void AssembleLIR(CompilationUnit* cu);
-void SetMemRefType(LIR* lir, bool is_load, int mem_type);
-void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
+void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type);
+void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit);
 uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
 void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg);
 void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
 void DumpLIRInsn(CompilationUnit* cu, LIR* arg, unsigned char* base_addr);
 void DumpPromotionMap(CompilationUnit *cu);
 void CodegenDump(CompilationUnit* cu);
-// TODO: remove default parameters
-LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0 = 0, int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+            int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
 LIR* NewLIR0(CompilationUnit* cu, int opcode);
 LIR* NewLIR1(CompilationUnit* cu, int opcode, int dest);
 LIR* NewLIR2(CompilationUnit* cu, int opcode, int dest, int src1);
@@ -46,6 +50,7 @@
 void DumpSparseSwitchTable(const uint16_t* table);
 void DumpPackedSwitchTable(const uint16_t* table);
 LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str);
+void NopLIR(LIR* lir);
 
 }  // namespace art
 
diff --git a/src/compiler/codegen/compiler_codegen.h b/src/compiler/codegen/compiler_codegen.h
deleted file mode 100644
index 2d13965..0000000
--- a/src/compiler/codegen/compiler_codegen.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_COMPILERCODEGEN_H_
-#define ART_SRC_COMPILER_COMPILERCODEGEN_H_
-
-#include "../compiler_ir.h"
-
-namespace art {
-
-// Set to 1 to measure cost of suspend check
-#define NO_SUSPEND 0
-
-/* Bit flags describing the behavior of native opcodes (Arm/Mips/x86 combined) */
-enum OpFeatureFlags {
-  kIsBranch = 0,
-  kNoOperand,
-  kIsUnaryOp,
-  kIsBinaryOp,
-  kIsTertiaryOp,
-  kIsQuadOp,
-  kIsQuinOp,
-  kIsSextupleOp,
-  kIsIT,
-  kMemLoad,
-  kMemStore,
-  kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes
-  kRegDef0,
-  kRegDef1,
-  kRegDefA,
-  kRegDefD,
-  kRegDefFPCSList0,
-  kRegDefFPCSList2,
-  kRegDefList0,
-  kRegDefList1,
-  kRegDefList2,
-  kRegDefLR,
-  kRegDefSP,
-  kRegUse0,
-  kRegUse1,
-  kRegUse2,
-  kRegUse3,
-  kRegUse4,
-  kRegUseA,
-  kRegUseC,
-  kRegUseD,
-  kRegUseFPCSList0,
-  kRegUseFPCSList2,
-  kRegUseList0,
-  kRegUseList1,
-  kRegUseLR,
-  kRegUsePC,
-  kRegUseSP,
-  kSetsCCodes,
-  kUsesCCodes
-};
-
-#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
-#define IS_BRANCH            (1ULL << kIsBranch)
-#define IS_IT                (1ULL << kIsIT)
-#define IS_LOAD              (1ULL << kMemLoad)
-#define IS_QUAD_OP           (1ULL << kIsQuadOp)
-#define IS_QUIN_OP           (1ULL << kIsQuinOp)
-#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
-#define IS_STORE             (1ULL << kMemStore)
-#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
-#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
-#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
-#define NO_OPERAND           (1ULL << kNoOperand)
-#define REG_DEF0             (1ULL << kRegDef0)
-#define REG_DEF1             (1ULL << kRegDef1)
-#define REG_DEFA             (1ULL << kRegDefA)
-#define REG_DEFD             (1ULL << kRegDefD)
-#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
-#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
-#define REG_DEF_LIST0        (1ULL << kRegDefList0)
-#define REG_DEF_LIST1        (1ULL << kRegDefList1)
-#define REG_DEF_LR           (1ULL << kRegDefLR)
-#define REG_DEF_SP           (1ULL << kRegDefSP)
-#define REG_USE0             (1ULL << kRegUse0)
-#define REG_USE1             (1ULL << kRegUse1)
-#define REG_USE2             (1ULL << kRegUse2)
-#define REG_USE3             (1ULL << kRegUse3)
-#define REG_USE4             (1ULL << kRegUse4)
-#define REG_USEA             (1ULL << kRegUseA)
-#define REG_USEC             (1ULL << kRegUseC)
-#define REG_USED             (1ULL << kRegUseD)
-#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
-#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
-#define REG_USE_LIST0        (1ULL << kRegUseList0)
-#define REG_USE_LIST1        (1ULL << kRegUseList1)
-#define REG_USE_LR           (1ULL << kRegUseLR)
-#define REG_USE_PC           (1ULL << kRegUsePC)
-#define REG_USE_SP           (1ULL << kRegUseSP)
-#define SETS_CCODES          (1ULL << kSetsCCodes)
-#define USES_CCODES          (1ULL << kUsesCCodes)
-
-/* Common combo register usage patterns */
-#define REG_DEF01            (REG_DEF0 | REG_DEF1)
-#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
-#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
-#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
-#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
-#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
-#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
-#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
-#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
-#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
-#define REG_USE012           (REG_USE01 | REG_USE2)
-#define REG_USE014           (REG_USE01 | REG_USE4)
-#define REG_USE01            (REG_USE0 | REG_USE1)
-#define REG_USE02            (REG_USE0 | REG_USE2)
-#define REG_USE12            (REG_USE1 | REG_USE2)
-#define REG_USE23            (REG_USE2 | REG_USE3)
-
-// TEMP
-#include "gen_loadstore.h"
-#include "gen_common.h"
-#include "gen_invoke.h"
-#include "target_list.h"
-
-}  // namespace art
-
-#endif  // ART_SRC_COMPILER_COMPILERCODEGEN_H_
diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc
index 22b919a..8605b80 100644
--- a/src/compiler/codegen/gen_common.cc
+++ b/src/compiler/codegen/gen_common.cc
@@ -21,242 +21,26 @@
 
 namespace art {
 
-//TODO: remove decl.
-void GenInvoke(CompilationUnit* cu, CallInfo* info);
-
 /*
  * This source files contains "gen" codegen routines that should
  * be applicable to most targets.  Only mid-level support utilities
  * and "op" calls may be used here.
  */
 
-void MarkSafepointPC(CompilationUnit* cu, LIR* inst)
-{
-  inst->def_mask = ENCODE_ALL;
-  LIR* safepoint_pc = NewLIR0(cu, kPseudoSafepointPC);
-  DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
-}
-
-/*
- * To save scheduling time, helper calls are broken into two parts: generation of
- * the helper target address, and the actuall call to the helper.  Because x86
- * has a memory call operation, part 1 is a NOP for x86.  For other targets,
- * load arguments between the two parts.
- */
-int CallHelperSetup(CompilationUnit* cu, int helper_offset)
-{
-  return (cu->instruction_set == kX86) ? 0 : LoadHelper(cu, helper_offset);
-}
-
-/* NOTE: if r_tgt is a temp, it will be freed following use */
-LIR* CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc)
-{
-  LIR* call_inst;
-  if (cu->instruction_set == kX86) {
-    call_inst = OpThreadMem(cu, kOpBlx, helper_offset);
-  } else {
-    call_inst = OpReg(cu, kOpBlx, r_tgt);
-    FreeTemp(cu, r_tgt);
-  }
-  if (safepoint_pc) {
-    MarkSafepointPC(cu, call_inst);
-  }
-  return call_inst;
-}
-
-void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
-                                  bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  if (arg0.wide == 0) {
-    LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
-  } else {
-    LoadValueDirectWideFixed(cu, arg0, TargetReg(kArg0), TargetReg(kArg1));
-  }
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
-                                     RegLocation arg1, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  if (arg1.wide == 0) {
-    LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
-  } else {
-    LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
-  }
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0,
-                                     int arg1, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
-                                             RegLocation arg0, RegLocation arg1, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  if (arg0.wide == 0) {
-    LoadValueDirectFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
-    if (arg1.wide == 0) {
-      if (cu->instruction_set == kMips) {
-        LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
-      } else {
-        LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
-      }
-    } else {
-      if (cu->instruction_set == kMips) {
-        LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
-      } else {
-        LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
-      }
-    }
-  } else {
-    LoadValueDirectWideFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
-    if (arg1.wide == 0) {
-      LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
-    } else {
-      LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
-    }
-  }
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                int arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  LoadConstant(cu, TargetReg(kArg2), arg2);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
-                                           RegLocation arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2,
-                                   bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg2), arg2);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
-void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
-                                                int arg0, RegLocation arg1, RegLocation arg2,
-                                                bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
-  if (arg2.wide == 0) {
-    LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
-  } else {
-    LoadValueDirectWideFixed(cu, arg2, TargetReg(kArg2), TargetReg(kArg3));
-  }
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
-}
-
 /*
  * Generate an kPseudoBarrier marker to indicate the boundary of special
  * blocks.
  */
-void GenBarrier(CompilationUnit* cu)
+void Codegen::GenBarrier(CompilationUnit* cu)
 {
   LIR* barrier = NewLIR0(cu, kPseudoBarrier);
   /* Mark all resources as being clobbered */
   barrier->def_mask = -1;
 }
 
-
-/* Generate unconditional branch instructions */
-LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
-{
-  LIR* branch = OpBranchUnconditional(cu, kOpUncondBr);
-  branch->target = target;
-  return branch;
-}
-
 // FIXME: need to do some work to split out targets with
 // condition codes and those without
-LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code,
-              ThrowKind kind)
+LIR* Codegen::GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind)
 {
   DCHECK_NE(cu->instruction_set, kMips);
   LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
@@ -267,8 +51,8 @@
   return branch;
 }
 
-LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code,
-                   int reg, int imm_val, ThrowKind kind)
+LIR* Codegen::GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
+                            ThrowKind kind)
 {
   LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
                     cu->current_dalvik_offset);
@@ -284,7 +68,7 @@
 }
 
 /* Perform null-check on a register.  */
-LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags)
+LIR* Codegen::GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags)
 {
   if (!(cu->disable_opt & (1 << kNullCheckElimination)) &&
     opt_flags & MIR_IGNORE_NULL_CHECK) {
@@ -294,8 +78,8 @@
 }
 
 /* Perform check on two registers */
-LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code,
-                    int reg1, int reg2, ThrowKind kind)
+LIR* Codegen::GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
+                             ThrowKind kind)
 {
   LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
                     cu->current_dalvik_offset, reg1, reg2);
@@ -305,9 +89,9 @@
   return branch;
 }
 
-void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
-                         RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
-                         LIR* fall_through)
+void Codegen::GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
+                                  LIR* fall_through)
 {
   ConditionCode cond;
   rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
@@ -339,8 +123,8 @@
   OpUnconditionalBranch(cu, fall_through);
 }
 
-void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode,
-                             RegLocation rl_src, LIR* taken, LIR* fall_through)
+void Codegen::GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+                                      RegLocation rl_src, LIR* taken, LIR* fall_through)
 {
   ConditionCode cond;
   rl_src = LoadValue(cu, rl_src, kCoreReg);
@@ -376,8 +160,7 @@
   OpUnconditionalBranch(cu, fall_through);
 }
 
-void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest,
-                  RegLocation rl_src)
+void Codegen::GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
@@ -389,8 +172,8 @@
   StoreValueWide(cu, rl_dest, rl_result);
 }
 
-void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode,
-                     RegLocation rl_dest, RegLocation rl_src)
+void Codegen::GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                              RegLocation rl_src)
 {
    rl_src = LoadValue(cu, rl_src, kCoreReg);
    RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
@@ -417,8 +200,8 @@
  * Array::AllocFromCode(type_idx, method, count);
  * Note: AllocFromCode will handle checks for errNegativeArraySize.
  */
-void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
-                 RegLocation rl_src)
+void Codegen::GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+                          RegLocation rl_src)
 {
   FlushAllRegs(cu);  /* Everything to home location */
   int func_offset;
@@ -440,7 +223,7 @@
  * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
  * Current code also throws internal unimp if not 'L', '[' or 'I'.
  */
-void GenFilledNewArray(CompilationUnit* cu, CallInfo* info)
+void Codegen::GenFilledNewArray(CompilationUnit* cu, CallInfo* info)
 {
   int elems = info->num_arg_words;
   int type_idx = info->index;
@@ -546,8 +329,8 @@
   }
 }
 
-void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
-       bool is_long_or_double, bool is_object)
+void Codegen::GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
+                      bool is_long_or_double, bool is_object)
 {
   int field_offset;
   int ssb_index;
@@ -638,8 +421,8 @@
   }
 }
 
-void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
-       bool is_long_or_double, bool is_object)
+void Codegen::GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+                      bool is_long_or_double, bool is_object)
 {
   int field_offset;
   int ssb_index;
@@ -732,7 +515,7 @@
 
 
 // Debugging routine - if null target, branch to DebugMe
-void GenShowTarget(CompilationUnit* cu)
+void Codegen::GenShowTarget(CompilationUnit* cu)
 {
   DCHECK_NE(cu->instruction_set, kX86) << "unimplemented GenShowTarget";
   LIR* branch_over = OpCmpImmBranch(cu, kCondNe, TargetReg(kInvokeTgt), 0, NULL);
@@ -741,7 +524,7 @@
   branch_over->target = target;
 }
 
-void HandleSuspendLaunchPads(CompilationUnit *cu)
+void Codegen::HandleSuspendLaunchPads(CompilationUnit *cu)
 {
   LIR** suspend_label = reinterpret_cast<LIR**>(cu->suspend_launchpads.elem_list);
   int num_elems = cu->suspend_launchpads.num_used;
@@ -759,7 +542,7 @@
   }
 }
 
-void HandleIntrinsicLaunchPads(CompilationUnit *cu)
+void Codegen::HandleIntrinsicLaunchPads(CompilationUnit *cu)
 {
   LIR** intrinsic_label = reinterpret_cast<LIR**>(cu->intrinsic_launchpads.elem_list);
   int num_elems = cu->intrinsic_launchpads.num_used;
@@ -779,7 +562,7 @@
   }
 }
 
-void HandleThrowLaunchPads(CompilationUnit *cu)
+void Codegen::HandleThrowLaunchPads(CompilationUnit *cu)
 {
   LIR** throw_label = reinterpret_cast<LIR**>(cu->throw_launchpads.elem_list);
   int num_elems = cu->throw_launchpads.num_used;
@@ -856,20 +639,9 @@
   }
 }
 
-bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
-                  int& field_offset, bool& is_volatile, bool is_put)
-{
-  OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
-               *cu->dex_file,
-               cu->code_item, cu->method_idx,
-               cu->access_flags);
-  return cu->compiler->ComputeInstanceFieldInfo(field_idx, &m_unit,
-           field_offset, is_volatile, is_put);
-}
-
-void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
-             RegLocation rl_dest, RegLocation rl_obj,
-             bool is_long_or_double, bool is_object)
+void Codegen::GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
+                      bool is_object)
 {
   int field_offset;
   bool is_volatile;
@@ -928,8 +700,9 @@
   }
 }
 
-void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
-             RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object)
+void Codegen::GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
+                      bool is_object)
 {
   int field_offset;
   bool is_volatile;
@@ -976,8 +749,7 @@
   }
 }
 
-void GenConstClass(CompilationUnit* cu, uint32_t type_idx,
-                   RegLocation rl_dest)
+void Codegen::GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
 {
   RegLocation rl_method = LoadCurrMethod(cu);
   int res_reg = AllocTemp(cu);
@@ -1036,8 +808,7 @@
   }
 }
 
-void GenConstString(CompilationUnit* cu, uint32_t string_idx,
-                    RegLocation rl_dest)
+void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest)
 {
   /* NOTE: Most strings should be available at compile time */
   int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
@@ -1059,7 +830,7 @@
       GenBarrier(cu);
       // For testing, always force through helper
       if (!EXERCISE_SLOWEST_STRING_PATH) {
-        OpIT(cu, kArmCondEq, "T");
+        OpIT(cu, kCondEq, "T");
       }
       OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));   // .eq
       LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
@@ -1094,7 +865,7 @@
  * Let helper function take care of everything.  Will
  * call Class::NewInstanceFromCode(type_idx, method);
  */
-void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+void Codegen::GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
 {
   FlushAllRegs(cu);  /* Everything to home location */
   // alloc will always check for resolution, do we also need to verify
@@ -1111,7 +882,7 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-void GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+void Codegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
 {
   FlushAllRegs(cu);  /* Everything to home location */
   int func_offset = ENTRYPOINT_OFFSET(pGetAndClearException);
@@ -1125,14 +896,14 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-void GenThrow(CompilationUnit* cu, RegLocation rl_src)
+void Codegen::GenThrow(CompilationUnit* cu, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   CallRuntimeHelperRegLocation(cu, ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
 }
 
-void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
-                   RegLocation rl_src)
+void Codegen::GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+                            RegLocation rl_src)
 {
   FlushAllRegs(cu);
   // May generate a call - use explicit registers
@@ -1187,7 +958,7 @@
     /* Uses conditional nullification */
     int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
     OpRegReg(cu, kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
-    OpIT(cu, kArmCondEq, "EE");   // if-convert the test
+    OpIT(cu, kCondEq, "EE");   // if-convert the test
     LoadConstant(cu, TargetReg(kArg0), 1);     // .eq case - load true
     OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
     call_inst = OpReg(cu, kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
@@ -1217,7 +988,7 @@
   }
 }
 
-void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
+void Codegen::GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   // May generate a call - use explicit registers
@@ -1289,8 +1060,8 @@
  * Generate array store
  *
  */
-void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
-          RegLocation rl_index, RegLocation rl_src, int scale)
+void Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale)
 {
   int len_offset = Array::LengthOffset().Int32Value();
   int data_offset = Array::DataOffset(sizeof(Object*)).Int32Value();
@@ -1358,9 +1129,8 @@
 /*
  * Generate array load
  */
-void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size,
-                 RegLocation rl_array, RegLocation rl_index,
-                 RegLocation rl_dest, int scale)
+void Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_dest, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
   int len_offset = Array::LengthOffset().Int32Value();
@@ -1457,9 +1227,8 @@
  * Generate array store
  *
  */
-void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size,
-                 RegLocation rl_array, RegLocation rl_index,
-                 RegLocation rl_src, int scale)
+void Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_src, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
   int len_offset = Array::LengthOffset().Int32Value();
@@ -1551,9 +1320,8 @@
   }
 }
 
-void GenLong3Addr(CompilationUnit* cu, OpKind first_op,
-                  OpKind second_op, RegLocation rl_dest,
-                  RegLocation rl_src1, RegLocation rl_src2)
+void Codegen::GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op,
+                           RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   RegLocation rl_result;
   if (cu->instruction_set == kThumb2) {
@@ -1600,8 +1368,8 @@
 }
 
 
-bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                    RegLocation rl_src1, RegLocation rl_shift)
+bool Codegen::GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_shift)
 {
   int func_offset;
 
@@ -1630,8 +1398,8 @@
 }
 
 
-bool GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-           RegLocation rl_src1, RegLocation rl_src2)
+bool Codegen::GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                            RegLocation rl_src1, RegLocation rl_src2)
 {
   OpKind op = kOpBkpt;
   bool is_div_rem = false;
@@ -1801,9 +1569,10 @@
   if ((lit < 2) || ((cu->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
     return false;
   }
+  Codegen* cg = cu->cg.get();
   // No divide instruction for Arm, so check for more special cases
   if ((cu->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
-    return SmallLiteralDivide(cu, dalvik_opcode, rl_src, rl_dest, lit);
+    return cg->SmallLiteralDivide(cu, dalvik_opcode, rl_src, rl_dest, lit);
   }
   int k = LowestSetBit(lit);
   if (k >= 30) {
@@ -1812,38 +1581,38 @@
   }
   bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
       dalvik_opcode == Instruction::DIV_INT_LIT16);
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   if (div) {
     int t_reg = AllocTemp(cu);
     if (lit == 2) {
       // Division by 2 is by far the most common division by constant.
-      OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, 32 - k);
-      OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
-      OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+      cg->OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+      cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
     } else {
-      OpRegRegImm(cu, kOpAsr, t_reg, rl_src.low_reg, 31);
-      OpRegRegImm(cu, kOpLsr, t_reg, t_reg, 32 - k);
-      OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
-      OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+      cg->OpRegRegImm(cu, kOpAsr, t_reg, rl_src.low_reg, 31);
+      cg->OpRegRegImm(cu, kOpLsr, t_reg, t_reg, 32 - k);
+      cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
     }
   } else {
     int t_reg1 = AllocTemp(cu);
     int t_reg2 = AllocTemp(cu);
     if (lit == 2) {
-      OpRegRegImm(cu, kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
-      OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
-      OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit -1);
-      OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+      cg->OpRegRegImm(cu, kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+      cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit -1);
+      cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
     } else {
-      OpRegRegImm(cu, kOpAsr, t_reg1, rl_src.low_reg, 31);
-      OpRegRegImm(cu, kOpLsr, t_reg1, t_reg1, 32 - k);
-      OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
-      OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit - 1);
-      OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+      cg->OpRegRegImm(cu, kOpAsr, t_reg1, rl_src.low_reg, 31);
+      cg->OpRegRegImm(cu, kOpLsr, t_reg1, t_reg1, 32 - k);
+      cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit - 1);
+      cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
     }
   }
-  StoreValue(cu, rl_dest, rl_result);
+  cg->StoreValue(cu, rl_dest, rl_result);
   return true;
 }
 
@@ -1868,32 +1637,31 @@
   } else {
     return false;
   }
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  Codegen* cg = cu->cg.get();
+  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   if (power_of_two) {
     // Shift.
-    OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg,
-                LowestSetBit(lit));
+    cg->OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
   } else if (pop_count_le2) {
     // Shift and add and shift.
     int first_bit = LowestSetBit(lit);
     int second_bit = LowestSetBit(lit ^ (1 << first_bit));
-    GenMultiplyByTwoBitMultiplier(cu, rl_src, rl_result, lit,
-                                  first_bit, second_bit);
+    cg->GenMultiplyByTwoBitMultiplier(cu, rl_src, rl_result, lit, first_bit, second_bit);
   } else {
     // Reverse subtract: (src << (shift + 1)) - src.
     DCHECK(power_of_two_minus_one);
     // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
     int t_reg = AllocTemp(cu);
-    OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
-    OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+    cg->OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+    cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
   }
-  StoreValue(cu, rl_dest, rl_result);
+  cg->StoreValue(cu, rl_dest, rl_result);
   return true;
 }
 
-bool GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode,
-                      RegLocation rl_dest, RegLocation rl_src, int lit)
+bool Codegen::GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode,
+                               RegLocation rl_dest, RegLocation rl_src, int lit)
 {
   RegLocation rl_result;
   OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
@@ -2008,8 +1776,8 @@
   return false;
 }
 
-bool GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-          RegLocation rl_src1, RegLocation rl_src2)
+bool Codegen::GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                             RegLocation rl_src1, RegLocation rl_src2)
 {
   RegLocation rl_result;
   OpKind first_op = kOpBkpt;
@@ -2129,8 +1897,8 @@
   return false;
 }
 
-bool GenConversionCall(CompilationUnit* cu, int func_offset,
-                       RegLocation rl_dest, RegLocation rl_src)
+bool Codegen::GenConversionCall(CompilationUnit* cu, int func_offset,
+                                RegLocation rl_dest, RegLocation rl_src)
 {
   /*
    * Don't optimize the register usage since it calls out to support
@@ -2156,9 +1924,9 @@
   return false;
 }
 
-bool GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode,
-                             RegLocation rl_dest, RegLocation rl_src1,
-                             RegLocation rl_src2)
+bool Codegen::GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode,
+                                      RegLocation rl_dest, RegLocation rl_src1,
+                                      RegLocation rl_src2)
 {
   RegLocation rl_result;
   int func_offset;
@@ -2198,9 +1966,9 @@
   return false;
 }
 
-bool GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode,
-                              RegLocation rl_dest, RegLocation rl_src1,
-                              RegLocation rl_src2)
+bool Codegen::GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode,
+                                       RegLocation rl_dest, RegLocation rl_src1,
+                                       RegLocation rl_src2)
 {
   RegLocation rl_result;
   int func_offset;
@@ -2240,8 +2008,8 @@
   return false;
 }
 
-bool GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode,
-                           RegLocation rl_dest, RegLocation rl_src)
+bool Codegen::GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode,
+                                    RegLocation rl_dest, RegLocation rl_src)
 {
 
   switch (opcode) {
@@ -2282,7 +2050,7 @@
 }
 
 /* Check if we need to check for pending suspend request */
-void GenSuspendTest(CompilationUnit* cu, int opt_flags)
+void Codegen::GenSuspendTest(CompilationUnit* cu, int opt_flags)
 {
   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
     return;
@@ -2297,7 +2065,7 @@
 }
 
 /* Check if we need to check for pending suspend request */
-void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target)
+void Codegen::GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target)
 {
   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
     OpUnconditionalBranch(cu, target);
diff --git a/src/compiler/codegen/gen_common.h b/src/compiler/codegen/gen_common.h
deleted file mode 100644
index 413d828..0000000
--- a/src/compiler/codegen/gen_common.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
-#define ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
-
-void MarkSafepointPC(CompilationUnit* cu, LIR* inst);
-void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
-void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
-void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0, bool safepoint_pc);
-void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
-void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg1, bool safepoint_pc);
-void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0, int arg1, bool safepoint_pc);
-void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
-void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
-void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
-void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc);
-void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1, bool safepoint_pc);
-void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1, int arg2, bool safepoint_pc);
-void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg2, bool safepoint_pc);
-void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2, bool safepoint_pc);
-void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset, int arg0, RegLocation arg1, RegLocation arg2, bool safepoint_pc);
-void GenBarrier(CompilationUnit* cu);
-LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
-LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind);
-LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val, ThrowKind kind);
-LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags);
-LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2, ThrowKind kind);
-void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, LIR* taken, LIR* fall_through);
-void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src, LIR* taken, LIR* fall_through);
-void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
-void GenFilledNewArray(CompilationUnit* cu, CallInfo* info);
-void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src, bool is_long_or_double, bool is_object);
-void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest, bool is_long_or_double, bool is_object);
-void GenShowTarget(CompilationUnit* cu);
-void HandleSuspendLaunchPads(CompilationUnit *cu);
-void HandleIntrinsicLaunchPads(CompilationUnit *cu);
-void HandleThrowLaunchPads(CompilationUnit *cu);
-void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
-bool FastInstance(CompilationUnit* cu,  uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
-void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
-void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size, RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
-void GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
-void GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest);
-void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
-void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
-void GenThrow(CompilationUnit* cu, RegLocation rl_src);
-void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
-void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
-void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale);
-void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index, RegLocation rl_dest, int scale);
-void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index, RegLocation rl_src, int scale);
-void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
-bool GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, int lit);
-bool GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenConversionCall(CompilationUnit* cu, int func_offset, RegLocation rl_dest, RegLocation rl_src);
-bool GenArithOpFloatPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenArithOpDoublePortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenConversionPortable(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-void GenSuspendTest(CompilationUnit* cu, int opt_flags);
-void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target);
-
-#endif // ART_SRC_COMPILER_CODEGEN_GENCOMMON_H_
diff --git a/src/compiler/codegen/gen_invoke.cc b/src/compiler/codegen/gen_invoke.cc
index 91d3db1..41924e2 100644
--- a/src/compiler/codegen/gen_invoke.cc
+++ b/src/compiler/codegen/gen_invoke.cc
@@ -28,6 +28,206 @@
  */
 
 /*
+ * To save scheduling time, helper calls are broken into two parts: generation of
+ * the helper target address, and the actuall call to the helper.  Because x86
+ * has a memory call operation, part 1 is a NOP for x86.  For other targets,
+ * load arguments between the two parts.
+ */
+int Codegen::CallHelperSetup(CompilationUnit* cu, int helper_offset)
+{
+  return (cu->instruction_set == kX86) ? 0 : LoadHelper(cu, helper_offset);
+}
+
+/* NOTE: if r_tgt is a temp, it will be freed following use */
+LIR* Codegen::CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc)
+{
+  LIR* call_inst;
+  if (cu->instruction_set == kX86) {
+    call_inst = OpThreadMem(cu, kOpBlx, helper_offset);
+  } else {
+    call_inst = OpReg(cu, kOpBlx, r_tgt);
+    FreeTemp(cu, r_tgt);
+  }
+  if (safepoint_pc) {
+    MarkSafepointPC(cu, call_inst);
+  }
+  return call_inst;
+}
+
+void Codegen::CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0,
+                                   bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0,
+                                   bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  OpRegCopy(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+                                          bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  if (arg0.wide == 0) {
+    LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+  } else {
+    LoadValueDirectWideFixed(cu, arg0, TargetReg(kArg0), TargetReg(kArg1));
+  }
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  LoadConstant(cu, TargetReg(kArg1), arg1);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+                                              RegLocation arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  if (arg1.wide == 0) {
+    LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+  } else {
+    LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+  }
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset,
+                                              RegLocation arg0, int arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+  LoadConstant(cu, TargetReg(kArg1), arg1);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  OpRegCopy(cu, TargetReg(kArg1), arg1);
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                             bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  OpRegCopy(cu, TargetReg(kArg0), arg0);
+  LoadConstant(cu, TargetReg(kArg1), arg1);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
+                                         bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadCurrMethodDirect(cu, TargetReg(kArg1));
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+                                                      RegLocation arg0, RegLocation arg1,
+                                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  if (arg0.wide == 0) {
+    LoadValueDirectFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+    if (arg1.wide == 0) {
+      if (cu->instruction_set == kMips) {
+        LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+      } else {
+        LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+      }
+    } else {
+      if (cu->instruction_set == kMips) {
+        LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+      } else {
+        LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+      }
+    }
+  } else {
+    LoadValueDirectWideFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+    if (arg1.wide == 0) {
+      LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+    } else {
+      LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+    }
+  }
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
+  OpRegCopy(cu, TargetReg(kArg0), arg0);
+  OpRegCopy(cu, TargetReg(kArg1), arg1);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+                                         int arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
+  OpRegCopy(cu, TargetReg(kArg0), arg0);
+  OpRegCopy(cu, TargetReg(kArg1), arg1);
+  LoadConstant(cu, TargetReg(kArg2), arg2);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset,
+                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+  LoadCurrMethodDirect(cu, TargetReg(kArg1));
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0,
+                                            int arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadCurrMethodDirect(cu, TargetReg(kArg1));
+  LoadConstant(cu, TargetReg(kArg2), arg2);
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+                                                         int arg0, RegLocation arg1,
+                                                         RegLocation arg2, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(cu, helper_offset);
+  LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+  if (arg2.wide == 0) {
+    LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+  } else {
+    LoadValueDirectWideFixed(cu, arg2, TargetReg(kArg2), TargetReg(kArg3));
+  }
+  LoadConstant(cu, TargetReg(kArg0), arg0);
+  ClobberCalleeSave(cu);
+  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+/*
  * If there are any ins passed in registers that have not been promoted
  * to a callee-save register, flush them to the frame.  Perform intial
  * assignment of promoted arguments.
@@ -35,7 +235,7 @@
  * ArgLocs is an array of location records describing the incoming arguments
  * with one location record per word of argument.
  */
-void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+void Codegen::FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
 {
   /*
    * Dummy up a RegLocation for the incoming Method*
@@ -119,6 +319,7 @@
                           uintptr_t direct_code, uintptr_t direct_method,
                           InvokeType type)
 {
+  Codegen* cg = cu->cg.get();
   if (cu->instruction_set != kThumb2) {
     // Disable sharpening
     direct_code = 0;
@@ -128,26 +329,26 @@
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       if (direct_code != static_cast<unsigned int>(-1)) {
-        LoadConstant(cu, TargetReg(kInvokeTgt), direct_code);
+        cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
       } else {
         LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
         if (data_target == NULL) {
           data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
           data_target->operands[1] = type;
         }
-        LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kInvokeTgt), data_target);
+        LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
         AppendLIR(cu, load_pc_rel);
         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
       }
       if (direct_method != static_cast<unsigned int>(-1)) {
-        LoadConstant(cu, TargetReg(kArg0), direct_method);
+        cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
       } else {
         LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
         if (data_target == NULL) {
           data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
           data_target->operands[1] = type;
         }
-        LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kArg0), data_target);
+        LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
         AppendLIR(cu, load_pc_rel);
         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
       }
@@ -159,36 +360,37 @@
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       // TUNING: we can save a reg copy if Method* has been promoted.
-      LoadCurrMethodDirect(cu, TargetReg(kArg0));
+      cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
       break;
     case 1:  // Get method->dex_cache_resolved_methods_
-      LoadWordDisp(cu, TargetReg(kArg0),
-        AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), TargetReg(kArg0));
+      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+        AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
       // Set up direct code if known.
       if (direct_code != 0) {
         if (direct_code != static_cast<unsigned int>(-1)) {
-          LoadConstant(cu, TargetReg(kInvokeTgt), direct_code);
+          cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
         } else {
           LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
           if (data_target == NULL) {
             data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
             data_target->operands[1] = type;
           }
-          LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kInvokeTgt), data_target);
+          LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
           AppendLIR(cu, load_pc_rel);
           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
         }
       }
       break;
     case 2:  // Grab target method*
-      LoadWordDisp(cu, TargetReg(kArg0),
-                   Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4, TargetReg(kArg0));
+      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+                       Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
+                       cg-> TargetReg(kArg0));
       break;
     case 3:  // Grab the code from the method*
       if (cu->instruction_set != kX86) {
         if (direct_code == 0) {
-          LoadWordDisp(cu, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
-                       TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
         }
         break;
       }
@@ -211,6 +413,7 @@
                          int state, uint32_t dex_idx, uint32_t method_idx,
                          uintptr_t unused, uintptr_t unused2, InvokeType unused3)
 {
+  Codegen* cg = cu->cg.get();
   /*
    * This is the fast path in which the target virtual method is
    * fully resolved at compile time.
@@ -218,27 +421,27 @@
   switch (state) {
     case 0: {  // Get "this" [set kArg1]
       RegLocation  rl_arg = info->args[0];
-      LoadValueDirectFixed(cu, rl_arg, TargetReg(kArg1));
+      cg->LoadValueDirectFixed(cu, rl_arg, cg->TargetReg(kArg1));
       break;
     }
     case 1: // Is "this" null? [use kArg1]
-      GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+      cg->GenNullCheck(cu, info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
       // get this->klass_ [use kArg1, set kInvokeTgt]
-      LoadWordDisp(cu, TargetReg(kArg1), Object::ClassOffset().Int32Value(),
-                   TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cu, cg->TargetReg(kArg1), Object::ClassOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
       break;
     case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
-      LoadWordDisp(cu, TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(),
-                   TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), Class::VTableOffset().Int32Value(),
+                       cg->TargetReg(kInvokeTgt));
       break;
     case 3: // Get target method [use kInvokeTgt, set kArg0]
-      LoadWordDisp(cu, TargetReg(kInvokeTgt), (method_idx * 4) +
-                   Array::DataOffset(sizeof(Object*)).Int32Value(), TargetReg(kArg0));
+      cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), (method_idx * 4) +
+                       Array::DataOffset(sizeof(Object*)).Int32Value(), cg->TargetReg(kArg0));
       break;
     case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
       if (cu->instruction_set != kX86) {
-        LoadWordDisp(cu, TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
-                     TargetReg(kInvokeTgt));
+        cg->LoadWordDisp(cu, cg->TargetReg(kArg0), AbstractMethod::GetCodeOffset().Int32Value(),
+                         cg->TargetReg(kInvokeTgt));
         break;
       }
       // Intentional fallthrough for X86
@@ -256,6 +459,7 @@
                                  uint32_t dex_idx, uint32_t unused, uintptr_t unused2,
                                  uintptr_t direct_method, InvokeType unused4)
 {
+  Codegen* cg = cu->cg.get();
   if (cu->instruction_set != kThumb2) {
     // Disable sharpening
     direct_method = 0;
@@ -267,18 +471,18 @@
     switch (state) {
       case 0:  // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
         }
         // Get the interface Method* [sets kArg0]
         if (direct_method != static_cast<unsigned int>(-1)) {
-          LoadConstant(cu, TargetReg(kArg0), direct_method);
+          cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
         } else {
           LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
           if (data_target == NULL) {
             data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
             data_target->operands[1] = kInterface;
           }
-          LIR* load_pc_rel = OpPcRelLoad(cu, TargetReg(kArg0), data_target);
+          LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
           AppendLIR(cu, load_pc_rel);
           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
         }
@@ -290,21 +494,21 @@
     switch (state) {
       case 0:
         // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
-        LoadCurrMethodDirect(cu, TargetReg(kArg0));
+        cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
         // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
         }
         break;
     case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
-      LoadWordDisp(cu, TargetReg(kArg0),
-                   AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
-                   TargetReg(kArg0));
+      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+                       AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+                       cg->TargetReg(kArg0));
       break;
     case 2:  // Grab target method* [set/use kArg0]
-      LoadWordDisp(cu, TargetReg(kArg0),
-                   Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
-                   TargetReg(kArg0));
+      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+                       Array::DataOffset(sizeof(Object*)).Int32Value() + dex_idx * 4,
+                       cg->TargetReg(kArg0));
       break;
     default:
       return -1;
@@ -316,6 +520,7 @@
 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
                             int state, uint32_t dex_idx, uint32_t method_idx)
 {
+  Codegen* cg = cu->cg.get();
   /*
    * This handles the case in which the base method is not fully
    * resolved at compile time, we bail to a runtime helper.
@@ -323,10 +528,10 @@
   if (state == 0) {
     if (cu->instruction_set != kX86) {
       // Load trampoline target
-      LoadWordDisp(cu, TargetReg(kSelf), trampoline, TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
     }
     // Load kArg0 with method index
-    LoadConstant(cu, TargetReg(kArg0), dex_idx);
+    cg->LoadConstant(cu, cg->TargetReg(kArg0), dex_idx);
     return 1;
   }
   return -1;
@@ -380,8 +585,9 @@
                        uint32_t method_idx, uintptr_t direct_code,
                        uintptr_t direct_method, InvokeType type, bool skip_this)
 {
-  int last_arg_reg = TargetReg(kArg3);
-  int next_reg = TargetReg(kArg1);
+  Codegen* cg = cu->cg.get();
+  int last_arg_reg = cg->TargetReg(kArg3);
+  int next_reg = cg->TargetReg(kArg1);
   int next_arg = 0;
   if (skip_this) {
     next_reg++;
@@ -390,13 +596,13 @@
   for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
     RegLocation rl_arg = info->args[next_arg++];
     rl_arg = UpdateRawLoc(cu, rl_arg);
-    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
-      LoadValueDirectWideFixed(cu, rl_arg, next_reg, next_reg + 1);
+    if (rl_arg.wide && (next_reg <= cg->TargetReg(kArg2))) {
+      cg->LoadValueDirectWideFixed(cu, rl_arg, next_reg, next_reg + 1);
       next_reg++;
       next_arg++;
     } else {
       rl_arg.wide = false;
-      LoadValueDirectFixed(cu, rl_arg, next_reg);
+      cg->LoadValueDirectFixed(cu, rl_arg, next_reg);
     }
     call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
                  direct_code, direct_method, type);
@@ -411,12 +617,10 @@
  * the target method pointer.  Note, this may also be called
  * for "range" variants if the number of arguments is 5 or fewer.
  */
-int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info,
-                         int call_state,
-                         LIR** pcrLabel, NextCallInsn next_call_insn,
-                         uint32_t dex_idx, uint32_t method_idx,
-                         uintptr_t direct_code, uintptr_t direct_method,
-                         InvokeType type, bool skip_this)
+int Codegen::GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info,
+                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+                                  uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code,
+                                  uintptr_t direct_method, InvokeType type, bool skip_this)
 {
   RegLocation rl_arg;
 
@@ -512,11 +716,10 @@
  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
  *
  */
-int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state,
-                       LIR** pcrLabel, NextCallInsn next_call_insn,
-                       uint32_t dex_idx, uint32_t method_idx,
-                       uintptr_t direct_code, uintptr_t direct_method,
-                       InvokeType type, bool skip_this)
+int Codegen::GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state,
+                                LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx,
+                                uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method,
+                                InvokeType type, bool skip_this)
 {
 
   // If we can treat it as non-range (Jumbo ops will use range form)
@@ -574,14 +777,14 @@
       LIR* ld = OpVldm(cu, TargetReg(kArg3), regs_left);
       //TUNING: loosen barrier
       ld->def_mask = ENCODE_ALL;
-      SetMemRefType(ld, true /* is_load */, kDalvikReg);
+      SetMemRefType(cu, ld, true /* is_load */, kDalvikReg);
       call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
       OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
       call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
       LIR* st = OpVstm(cu, TargetReg(kArg3), regs_left);
-      SetMemRefType(st, false /* is_load */, kDalvikReg);
+      SetMemRefType(cu, st, false /* is_load */, kDalvikReg);
       st->def_mask = ENCODE_ALL;
       call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
@@ -601,7 +804,7 @@
   return call_state;
 }
 
-RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info)
+RegLocation Codegen::InlineTarget(CompilationUnit* cu, CallInfo* info)
 {
   RegLocation res;
   if (info->result.location == kLocInvalid) {
@@ -612,7 +815,7 @@
   return res;
 }
 
-RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info)
+RegLocation Codegen::InlineTargetWide(CompilationUnit* cu, CallInfo* info)
 {
   RegLocation res;
   if (info->result.location == kLocInvalid) {
@@ -623,7 +826,7 @@
   return res;
 }
 
-bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info)
+bool Codegen::GenInlinedCharAt(CompilationUnit* cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -702,8 +905,7 @@
 }
 
 // Generates an inlined String.is_empty or String.length.
-bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info,
-                                     bool is_empty)
+bool Codegen::GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -733,7 +935,7 @@
   return true;
 }
 
-bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info)
+bool Codegen::GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -752,7 +954,7 @@
   return true;
 }
 
-bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info)
+bool Codegen::GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -794,7 +996,7 @@
   }
 }
 
-bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info)
+bool Codegen::GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -806,7 +1008,7 @@
   return true;
 }
 
-bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info)
+bool Codegen::GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -822,8 +1024,7 @@
  * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
  * otherwise bails to standard library code.
  */
-bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info,
-                       bool zero_based)
+bool Codegen::GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -867,7 +1068,7 @@
 }
 
 /* Fast string.compareTo(Ljava/lang/string;)I. */
-bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info)
+bool Codegen::GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info)
 {
   if (cu->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -904,7 +1105,7 @@
   return true;
 }
 
-bool GenIntrinsic(CompilationUnit* cu, CallInfo* info)
+bool Codegen::GenIntrinsic(CompilationUnit* cu, CallInfo* info)
 {
   if (info->opt_flags & MIR_INLINED) {
     return false;
@@ -982,7 +1183,7 @@
   return false;
 }
 
-void GenInvoke(CompilationUnit* cu, CallInfo* info)
+void Codegen::GenInvoke(CompilationUnit* cu, CallInfo* info)
 {
   if (GenIntrinsic(cu, info)) {
     return;
@@ -1109,8 +1310,8 @@
  * high-word loc for wide arguments.  Also pull up any following
  * MOVE_RESULT and incorporate it into the invoke.
  */
-CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                         InvokeType type, bool is_range)
+CallInfo* Codegen::NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
+                                  bool is_range)
 {
   CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
   MIR* move_result_mir = FindMoveResult(cu, bb, mir);
@@ -1134,5 +1335,4 @@
   return info;
 }
 
-
 }  // namespace art
diff --git a/src/compiler/codegen/gen_invoke.h b/src/compiler/codegen/gen_invoke.h
deleted file mode 100644
index c2d32fe..0000000
--- a/src/compiler/codegen/gen_invoke.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
-#define ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
-
-typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
-                            uint32_t method_idx, uintptr_t direct_code,
-                            uintptr_t direct_method, InvokeType type);
-
-void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
-int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this);
-int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this);
-RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info);
-RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info);
-bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info);
-bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty);
-bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info);
-bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info);
-bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info);
-bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info);
-bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based);
-bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info);
-bool GenIntrinsic(CompilationUnit* cu, CallInfo* info);
-void GenInvoke(CompilationUnit* cu, CallInfo* info);
-CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
-
-#endif // ART_SRC_COMPILER_CODEGEN_GENINVOKE_H_
diff --git a/src/compiler/codegen/gen_loadstore.cc b/src/compiler/codegen/gen_loadstore.cc
index 438a16b..7d28e1b 100644
--- a/src/compiler/codegen/gen_loadstore.cc
+++ b/src/compiler/codegen/gen_loadstore.cc
@@ -26,7 +26,7 @@
  * Load an immediate value into a fixed or temp register.  Target
  * register is clobbered, and marked in_use.
  */
-LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value)
+LIR* Codegen::LoadConstant(CompilationUnit* cu, int r_dest, int value)
 {
   if (IsTemp(cu, r_dest)) {
     Clobber(cu, r_dest);
@@ -36,15 +36,13 @@
 }
 
 /* Load a word at base + displacement.  Displacement must be word multiple */
-LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement,
-                  int r_dest)
+LIR* Codegen::LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest)
 {
   return LoadBaseDisp(cu, rBase, displacement, r_dest, kWord,
                       INVALID_SREG);
 }
 
-LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement,
-                   int r_src)
+LIR* Codegen::StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src)
 {
   return StoreBaseDisp(cu, rBase, displacement, r_src, kWord);
 }
@@ -54,7 +52,7 @@
  * using this routine, as it doesn't perform any bookkeeping regarding
  * register liveness.  That is the responsibility of the caller.
  */
-void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+void Codegen::LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest)
 {
   rl_src = UpdateLoc(cu, rl_src);
   if (rl_src.location == kLocPhysReg) {
@@ -71,7 +69,7 @@
  * register.  Should be used when loading to a fixed register (for example,
  * loading arguments to an out of line call.
  */
-void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+void Codegen::LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest)
 {
   Clobber(cu, r_dest);
   MarkInUse(cu, r_dest);
@@ -83,7 +81,7 @@
  * using this routine, as it doesn't perform any bookkeeping regarding
  * register liveness.  That is the responsibility of the caller.
  */
-void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+void Codegen::LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
              int reg_hi)
 {
   rl_src = UpdateLocWide(cu, rl_src);
@@ -102,8 +100,8 @@
  * registers.  Should be used when loading to a fixed registers (for example,
  * loading arguments to an out of line call.
  */
-void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src,
-                              int reg_lo, int reg_hi)
+void Codegen::LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+                                       int reg_hi)
 {
   Clobber(cu, reg_lo);
   Clobber(cu, reg_hi);
@@ -112,8 +110,7 @@
   LoadValueDirectWide(cu, rl_src, reg_lo, reg_hi);
 }
 
-RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src,
-                      RegisterClass op_kind)
+RegLocation Codegen::LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
 {
   rl_src = EvalLoc(cu, rl_src, op_kind, false);
   if (rl_src.location != kLocPhysReg) {
@@ -126,7 +123,7 @@
   return rl_src;
 }
 
-void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void Codegen::StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
 #ifndef NDEBUG
   /*
@@ -179,8 +176,7 @@
   }
 }
 
-RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src,
-              RegisterClass op_kind)
+RegLocation Codegen::LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
 {
   DCHECK(rl_src.wide);
   rl_src = EvalLoc(cu, rl_src, op_kind, false);
@@ -196,8 +192,7 @@
   return rl_src;
 }
 
-void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest,
-          RegLocation rl_src)
+void Codegen::StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
 #ifndef NDEBUG
   /*
@@ -211,7 +206,7 @@
 #endif
   LIR* def_start;
   LIR* def_end;
-  DCHECK_EQ(FpReg(rl_src.low_reg), FpReg(rl_src.high_reg));
+  DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
   DCHECK(rl_dest.wide);
   DCHECK(rl_src.wide);
   if (rl_src.location == kLocPhysReg) {
@@ -261,12 +256,12 @@
 }
 
 /* Utilities to load the current Method* */
-void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt)
+void Codegen::LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt)
 {
   LoadValueDirectFixed(cu, cu->method_loc, r_tgt);
 }
 
-RegLocation LoadCurrMethod(CompilationUnit *cu)
+RegLocation Codegen::LoadCurrMethod(CompilationUnit *cu)
 {
   return LoadValue(cu, cu->method_loc, kCoreReg);
 }
diff --git a/src/compiler/codegen/gen_loadstore.h b/src/compiler/codegen/gen_loadstore.h
deleted file mode 100644
index 19c43ba..0000000
--- a/src/compiler/codegen/gen_loadstore.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
-#define ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
-
-LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value);
-LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest);
-LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src);
-void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest);
-void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest);
-void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
-void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
-RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
-void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
-void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt);
-RegLocation LoadCurrMethod(CompilationUnit *cu);
-bool MethodStarInReg(CompilationUnit* cu);
-
-#endif // ART_SRC_COMPILER_CODEGEN_GENLOADSTORE_H_
diff --git a/src/compiler/codegen/local_optimizations.cc b/src/compiler/codegen/local_optimizations.cc
index cf04b21..ec915f0 100644
--- a/src/compiler/codegen/local_optimizations.cc
+++ b/src/compiler/codegen/local_optimizations.cc
@@ -42,9 +42,10 @@
 /* Convert a more expensive instruction (ie load) into a move */
 static void ConvertMemOpIntoMove(CompilationUnit* cu, LIR* orig_lir, int dest, int src)
 {
+  Codegen* cg = cu->cg.get();
   /* Insert a move to replace the load */
   LIR* move_lir;
-  move_lir = OpRegCopyNoInsert( cu, dest, src);
+  move_lir = cg->OpRegCopyNoInsert( cu, dest, src);
   /*
    * Insert the converted instruction after the original since the
    * optimization is scannng in the top-down order and the new instruction
@@ -74,6 +75,7 @@
  */
 static void ApplyLoadStoreElimination(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
 {
+  Codegen* cg = cu->cg.get();
   LIR* this_lir;
 
   if (head_lir == tail_lir) return;
@@ -84,20 +86,20 @@
     /* Skip non-interesting instructions */
     if ((this_lir->flags.is_nop == true) ||
         is_pseudo_opcode(this_lir->opcode) ||
-        (GetTargetInstFlags(this_lir->opcode) & IS_BRANCH) ||
-        !(GetTargetInstFlags(this_lir->opcode) & (IS_LOAD | IS_STORE))) {
+        (cg->GetTargetInstFlags(this_lir->opcode) & IS_BRANCH) ||
+        !(cg->GetTargetInstFlags(this_lir->opcode) & (IS_LOAD | IS_STORE))) {
       continue;
     }
 
     int native_reg_id;
     if (cu->instruction_set == kX86) {
       // If x86, location differs depending on whether memory/reg operation.
-      native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
+      native_reg_id = (cg->GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
           : this_lir->operands[0];
     } else {
       native_reg_id = this_lir->operands[0];
     }
-    bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+    bool is_this_lir_load = cg->GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
     LIR* check_lir;
     /* Use the mem mask to determine the rough memory location */
     uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
@@ -119,7 +121,7 @@
        * region bits since stop_mask is used to check data/control
        * dependencies.
        */
-        stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+        stop_use_reg_mask = (cg->GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
     }
 
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
@@ -138,16 +140,16 @@
        * Potential aliases seen - check the alias relations
        */
       if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
-        bool is_check_lir_load = GetTargetInstFlags(check_lir->opcode) & IS_LOAD;
+        bool is_check_lir_load = cg->GetTargetInstFlags(check_lir->opcode) & IS_LOAD;
         if  (alias_condition == ENCODE_LITERAL) {
           /*
            * Should only see literal loads in the instruction
            * stream.
            */
-          DCHECK(!(GetTargetInstFlags(check_lir->opcode) & IS_STORE));
+          DCHECK(!(cg->GetTargetInstFlags(check_lir->opcode) & IS_STORE));
           /* Same value && same register type */
           if (check_lir->alias_info == this_lir->alias_info &&
-              SameRegType(check_lir->operands[0], native_reg_id)) {
+              cg->SameRegType(check_lir->operands[0], native_reg_id)) {
             /*
              * Different destination register - insert
              * a move
@@ -162,7 +164,7 @@
           /* Must alias */
           if (check_lir->alias_info == this_lir->alias_info) {
             /* Only optimize compatible registers */
-            bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
+            bool reg_compatible = cg->SameRegType(check_lir->operands[0], native_reg_id);
             if ((is_this_lir_load && is_check_lir_load) ||
                 (!is_this_lir_load && is_check_lir_load)) {
               /* RAR or RAW */
@@ -227,7 +229,7 @@
         if (cu->instruction_set == kX86) {
           // Prevent stores from being sunk between ops that generate ccodes and
           // ops that use them.
-          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
+          uint64_t flags = cg->GetTargetInstFlags(check_lir->opcode);
           if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
             check_lir = PREV_LIR(check_lir);
             sink_distance--;
@@ -260,6 +262,7 @@
  */
 void ApplyLoadHoisting(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
 {
+  Codegen* cg = cu->cg.get();
   LIR* this_lir, *check_lir;
   /*
    * Store the list of independent instructions that can be hoisted past.
@@ -276,7 +279,7 @@
     /* Skip non-interesting instructions */
     if ((this_lir->flags.is_nop == true) ||
         is_pseudo_opcode(this_lir->opcode) ||
-        !(GetTargetInstFlags(this_lir->opcode) & IS_LOAD)) {
+        !(cg->GetTargetInstFlags(this_lir->opcode) & IS_LOAD)) {
       continue;
     }
 
@@ -290,7 +293,7 @@
        * conservatively here.
        */
       if (stop_use_all_mask & ENCODE_HEAP_REF) {
-        stop_use_all_mask |= GetPCUseDefEncoding();
+        stop_use_all_mask |= cg->GetPCUseDefEncoding();
       }
     }
 
@@ -374,7 +377,7 @@
       LIR* dep_lir = prev_inst_list[next_slot-1];
       /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
       if (!is_pseudo_opcode(dep_lir->opcode) &&
-        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
+        (cg->GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
         first_slot -= LDLD_DISTANCE;
       }
       /*
@@ -391,7 +394,7 @@
            * If the first instruction is a load, don't hoist anything
            * above it since it is unlikely to be beneficial.
            */
-          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
+          if (cg->GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
           /*
            * If the remaining number of slots is less than LD_LATENCY,
            * insert the hoisted load here.
@@ -411,7 +414,7 @@
          * the remaining instructions are less than LD_LATENCY.
          */
         bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
-            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
+            (cg->GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
         if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
           break;
         }
@@ -452,11 +455,12 @@
 void RemoveRedundantBranches(CompilationUnit* cu)
 {
   LIR* this_lir;
+  Codegen* cg = cu->cg.get();
 
   for (this_lir = cu->first_lir_insn; this_lir != cu->last_lir_insn; this_lir = NEXT_LIR(this_lir)) {
 
     /* Branch to the next instruction */
-    if (BranchUnconditional(this_lir)) {
+    if (cg->IsUnconditionalBranch(this_lir)) {
       LIR* next_lir = this_lir;
 
       while (true) {
diff --git a/src/compiler/codegen/method_codegen_driver.h b/src/compiler/codegen/method_codegen_driver.h
deleted file mode 100644
index 4c0ffba..0000000
--- a/src/compiler/codegen/method_codegen_driver.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_CODEGEN_METHODCODEGENDRIVER_H_
-#define ART_SRC_COMPILER_CODEGEN_METHODCODEGENDRIVER_H_
-
-namespace art {
-// TODO: move GenInvoke to gen_invoke.cc
-void GenInvoke(CompilationUnit* cu, CallInfo* info);
-// TODO: move GenInvoke to gen_invoke.cc or utils
-CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
-void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case);
-void MethodMIR2LIR(CompilationUnit* cu);
-
-
-}  // namespace art
-
-#endif // ART_SRC_COMPILER_CODEGEN_METHODCODEGENDRIVER_H_
diff --git a/src/compiler/codegen/mips/assemble_mips.cc b/src/compiler/codegen/mips/assemble_mips.cc
index 933cb60..4574a42 100644
--- a/src/compiler/codegen/mips/assemble_mips.cc
+++ b/src/compiler/codegen/mips/assemble_mips.cc
@@ -15,6 +15,7 @@
  */
 
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../codegen_util.h"
 
 namespace art {
@@ -80,7 +81,7 @@
  * is expanded to include a nop.  This scheme should be replaced with
  * an assembler pass to fill those slots when possible.
  */
-MipsEncodingMap EncodingMap[kMipsLast] = {
+const MipsEncodingMap MipsCodegen::EncodingMap[kMipsLast] = {
     ENCODING_MAP(kMips32BitData, 0x00000000,
                  kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP,
@@ -305,7 +306,6 @@
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
                  "xori", "!0r,!1r,0x!2h(!2d)", 4),
-#ifdef __mips_hard_float
     ENCODING_MAP(kMipsFadds, 0x46000000,
                  kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -394,7 +394,6 @@
                  kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
                  "mtc1", "!0r,!1s", 4),
-#endif
     ENCODING_MAP(kMipsDelta, 0x27e00000,
                  kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
                  kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
@@ -514,8 +513,7 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus AssembleInstructions(CompilationUnit *cu,
-                    uintptr_t start_addr)
+AssemblerStatus MipsCodegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr)
 {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
@@ -710,7 +708,7 @@
   return res;
 }
 
-int GetInsnSize(LIR* lir)
+int MipsCodegen::GetInsnSize(LIR* lir)
 {
   return EncodingMap[lir->opcode].size;
 }
@@ -718,7 +716,7 @@
  * Target-dependent offset assignment.
  * independent.
  */
-int AssignInsnOffsets(CompilationUnit* cu)
+int MipsCodegen::AssignInsnOffsets(CompilationUnit* cu)
 {
   LIR* mips_lir;
   int offset = 0;
diff --git a/src/compiler/codegen/mips/call_mips.cc b/src/compiler/codegen/mips/call_mips.cc
index b25b7e6..f14ebab 100644
--- a/src/compiler/codegen/mips/call_mips.cc
+++ b/src/compiler/codegen/mips/call_mips.cc
@@ -18,13 +18,14 @@
 
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                    SpecialCaseHandler special_case)
+void MipsCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                 SpecialCaseHandler special_case)
 {
     // TODO
 }
@@ -60,8 +61,7 @@
  * done:
  *
  */
-void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -140,8 +140,7 @@
  *   jr    r_RA
  * done:
  */
-void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -224,8 +223,7 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                      RegLocation rl_src)
+void MipsCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   // Add the table to the list - we'll process it later
@@ -267,7 +265,7 @@
 /*
  * TODO: implement fast path to short-circuit thin-lock case
  */
-void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void MipsCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);  // Get obj
@@ -283,7 +281,7 @@
 /*
  * TODO: implement fast path to short-circuit thin-lock case
  */
-void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void MipsCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);  // Get obj
@@ -299,7 +297,7 @@
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void MipsCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
 {
   int reg_card_base = AllocTemp(cu);
   int reg_card_no = AllocTemp(cu);
@@ -313,8 +311,7 @@
   FreeTemp(cu, reg_card_base);
   FreeTemp(cu, reg_card_no);
 }
-void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                      RegLocation rl_method)
+void MipsCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
 {
   int spill_count = cu->num_core_spills + cu->num_fp_spills;
   /*
@@ -361,7 +358,7 @@
   FreeTemp(cu, rMIPS_ARG3);
 }
 
-void GenExitSequence(CompilationUnit* cu)
+void MipsCodegen::GenExitSequence(CompilationUnit* cu)
 {
   /*
    * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
diff --git a/src/compiler/codegen/mips/codegen_mips.h b/src/compiler/codegen/mips/codegen_mips.h
new file mode 100644
index 0000000..b0ecfce
--- /dev/null
+++ b/src/compiler/codegen/mips/codegen_mips.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_CODEGEN_MIPS_CODEGENMIPS_H_
+#define ART_SRC_COMPILER_CODEGEN_MIPS_CODEGENMIPS_H_
+
+#include "../../compiler_internals.h"
+
+namespace art {
+
+class MipsCodegen : public Codegen {
+  public:
+    // Required for target - codegen utilities.
+    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit);
+    virtual int LoadHelper(CompilationUnit* cu, int offset);
+    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg);
+    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+                                 OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg);
+    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+    virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+                                       int val_lo, int val_hi);
+    virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
+    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                               OpSize size);
+    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+                                   int r_src_hi);
+    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+                                 OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg);
+    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg);
+    virtual bool SameRegType(int reg1, int reg2);
+    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int S2d(int low_reg, int high_reg);
+    virtual int TargetReg(SpecialTargetRegister reg);
+    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegLocation LocCReturn();
+    virtual RegLocation LocCReturnDouble();
+    virtual RegLocation LocCReturnFloat();
+    virtual RegLocation LocCReturnWide();
+    virtual uint32_t FpRegMask();
+    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+    virtual void AdjustSpillMask(CompilationUnit* cu);
+    virtual void ClobberCalleeSave(CompilationUnit *cu);
+    virtual void FlushReg(CompilationUnit* cu, int reg);
+    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+    virtual void FreeCallTemps(CompilationUnit* cu);
+    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps(CompilationUnit* cu);
+    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual const char* GetTargetInstFmt(int opcode);
+    virtual const char* GetTargetInstName(int opcode);
+    virtual int AssignInsnOffsets(CompilationUnit* cu);
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    virtual uint64_t GetPCUseDefEncoding();
+    virtual uint64_t GetTargetInstFlags(int opcode);
+    virtual int GetInsnSize(LIR* lir);
+    virtual bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2);
+    virtual bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src);
+    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+    virtual bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2);
+    virtual bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind);
+    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div);
+    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div);
+    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+                                  RegLocation rl_method);
+    virtual void GenExitSequence(CompilationUnit* cu);
+    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+                                  RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double);
+    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit);
+    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case);
+
+    // Required for target - single operation generators.
+    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                             LIR* target);
+    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+                                LIR* target);
+    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+                                LIR* target);
+    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                             int r_src2);
+    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+                       int offset);
+    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+                               int src_hi);
+    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+    LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                          int r_dest_hi, OpSize size, int s_reg);
+    LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                           int r_src_hi, OpSize size);
+    void SpillCoreRegs(CompilationUnit* cu);
+    void UnSpillCoreRegs(CompilationUnit* cu);
+    static const MipsEncodingMap EncodingMap[kMipsLast];
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_CODEGEN_MIPS_CODEGENMIPS_H_
diff --git a/src/compiler/codegen/mips/fp_mips.cc b/src/compiler/codegen/mips/fp_mips.cc
index 8f33dfa..efc4f80 100644
--- a/src/compiler/codegen/mips/fp_mips.cc
+++ b/src/compiler/codegen/mips/fp_mips.cc
@@ -16,15 +16,15 @@
 
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
-                     RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
-#ifdef __mips_hard_float
   int op = kMipsNop;
   RegLocation rl_result;
 
@@ -64,15 +64,11 @@
   StoreValue(cu, rl_dest, rl_result);
 
   return false;
-#else
-  return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
-#endif
 }
 
-bool GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
-                      RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
-#ifdef __mips_hard_float
   int op = kMipsNop;
   RegLocation rl_result;
 
@@ -112,15 +108,11 @@
           S2d(rl_src2.low_reg, rl_src2.high_reg));
   StoreValueWide(cu, rl_dest, rl_result);
   return false;
-#else
-  return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
-#endif
 }
 
-bool GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
-                   RegLocation rl_src)
+bool MipsCodegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                                RegLocation rl_src)
 {
-#ifdef __mips_hard_float
   int op = kMipsNop;
   int src_reg;
   RegLocation rl_result;
@@ -164,13 +156,10 @@
     StoreValue(cu, rl_dest, rl_result);
   }
   return false;
-#else
-  return GenConversionPortable(cu, opcode, rl_dest, rl_src);
-#endif
 }
 
-bool GenCmpFP(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
-              RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenCmpFP(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
 {
   bool wide = true;
   int offset;
@@ -210,13 +199,13 @@
   return false;
 }
 
-void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MipsCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 bool gt_bias, bool is_double)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
 }
 
-void GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void MipsCodegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValue(cu, rl_src, kCoreReg);
@@ -225,7 +214,7 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-void GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void MipsCodegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
@@ -235,7 +224,7 @@
   StoreValueWide(cu, rl_dest, rl_result);
 }
 
-bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool MipsCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
 {
   // TODO: need Mips implementation
   return false;
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index 273e4bd..bb36dc1 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -18,6 +18,7 @@
 
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
@@ -39,8 +40,8 @@
  * finish:
  *
  */
-void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
-        RegLocation rl_src1, RegLocation rl_src2)
+void MipsCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
 {
   rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
@@ -61,8 +62,8 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
-         int src2, LIR* target)
+LIR* MipsCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                              LIR* target)
 {
   LIR* branch;
   MipsOpCode slt_op;
@@ -129,8 +130,8 @@
   return branch;
 }
 
-LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
-          int check_value, LIR* target)
+LIR* MipsCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+                                 int check_value, LIR* target)
 {
   LIR* branch;
   if (check_value != 0) {
@@ -163,12 +164,10 @@
   return branch;
 }
 
-LIR* OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsCodegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
 {
-#ifdef __mips_hard_float
   if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
-    return FpRegCopy(cu, r_dest, r_src);
-#endif
+    return OpFpRegCopy(cu, r_dest, r_src);
   LIR* res = RawLIR(cu, cu->current_dalvik_offset, kMipsMove,
             r_dest, r_src);
   if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
@@ -177,17 +176,16 @@
   return res;
 }
 
-LIR* OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsCodegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
 {
   LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
   AppendLIR(cu, res);
   return res;
 }
 
-void OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
-          int src_lo, int src_hi)
+void MipsCodegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi, int src_lo,
+                                int src_hi)
 {
-#ifdef __mips_hard_float
   bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
   bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi);
   assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi));
@@ -215,31 +213,22 @@
       }
     }
   }
-#else
-  // Handle overlap
-  if (src_hi == dest_lo) {
-    OpRegCopy(cu, dest_hi, src_hi);
-    OpRegCopy(cu, dest_lo, src_lo);
-  } else {
-    OpRegCopy(cu, dest_lo, src_lo);
-    OpRegCopy(cu, dest_hi, src_hi);
-  }
-#endif
 }
 
-void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void MipsCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
 }
 
-LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+LIR* MipsCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
                     int reg1, int base, int offset, ThrowKind kind)
 {
   LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
   return NULL;
 }
 
-RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2, bool is_div)
+RegLocation MipsCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+                                    bool is_div)
 {
   NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, reg2);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
@@ -251,7 +240,8 @@
   return rl_result;
 }
 
-RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit, bool is_div)
+RegLocation MipsCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+                                       bool is_div)
 {
   int t_reg = AllocTemp(cu);
   NewLIR3(cu, kMipsAddiu, t_reg, r_ZERO, lit);
@@ -266,46 +256,46 @@
   return rl_result;
 }
 
-void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void MipsCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void MipsCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
 {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
-bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+bool MipsCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
   DCHECK_NE(cu->instruction_set, kThumb2);
   return false;
 }
 
-bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+bool MipsCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
   DCHECK_NE(cu->instruction_set, kThumb2);
   return false;
 }
 
-LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+LIR* MipsCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
   LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
   return NULL;
 }
 
-LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* MipsCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVldm for Mips";
   return NULL;
 }
 
-LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* MipsCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVstm for Mips";
   return NULL;
 }
 
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                   RegLocation rl_result, int lit,
-                                   int first_bit, int second_bit)
+void MipsCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                                RegLocation rl_result, int lit,
+                                                int first_bit, int second_bit)
 {
   int t_reg = AllocTemp(cu);
   OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
@@ -316,7 +306,7 @@
   }
 }
 
-void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void MipsCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
 {
   int t_reg = AllocTemp(cu);
   OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
@@ -325,34 +315,34 @@
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* MipsCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
 {
   OpRegImm(cu, kOpSub, rMIPS_SUSPEND, 1);
   return OpCmpImmBranch(cu, (target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
 }
 
 // Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* MipsCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
 {
   OpRegImm(cu, kOpSub, reg, 1);
   return OpCmpImmBranch(cu, c_code, reg, 0, target);
 }
 
-bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                        RegLocation rl_src, RegLocation rl_dest, int lit)
+bool MipsCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                     RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
   return false;
 }
 
-LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide)
+LIR* MipsCodegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
 {
   LOG(FATAL) << "Unexpected use of OpIT in Mips";
   return NULL;
 }
 
-bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
 {
   rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
@@ -375,8 +365,8 @@
   return false;
 }
 
-bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
-        RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
 {
   rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
@@ -399,8 +389,7 @@
   return false;
 }
 
-bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src)
+bool MipsCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
@@ -422,22 +411,22 @@
   return false;
 }
 
-bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
   return false;
 }
 
-bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
-               RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
   return false;
 }
 
-bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
-               RegLocation rl_src1, RegLocation rl_src2)
+bool MipsCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
   return false;
diff --git a/src/compiler/codegen/mips/mips_lir.h b/src/compiler/codegen/mips/mips_lir.h
index e3d9b62..cecd4ab 100644
--- a/src/compiler/codegen/mips/mips_lir.h
+++ b/src/compiler/codegen/mips/mips_lir.h
@@ -86,13 +86,13 @@
  * +========================+
  */
 
-/* Offset to distingish FP regs */
+// Offset to distingish FP regs.
 #define MIPS_FP_REG_OFFSET 32
-/* Offset to distinguish DP FP regs */
+// Offset to distinguish DP FP regs.
 #define MIPS_FP_DOUBLE 64
-/* Offset to distingish the extra regs */
+// Offset to distingish the extra regs.
 #define MIPS_EXTRA_REG_OFFSET 128
-/* Reg types */
+// Reg types.
 #define MIPS_REGTYPE(x) (x & (MIPS_FP_REG_OFFSET | MIPS_FP_DOUBLE))
 #define MIPS_FPREG(x) ((x & MIPS_FP_REG_OFFSET) == MIPS_FP_REG_OFFSET)
 #define MIPS_EXTRAREG(x) ((x & MIPS_EXTRA_REG_OFFSET) == MIPS_EXTRA_REG_OFFSET)
@@ -106,7 +106,7 @@
  * code that reg locations always describe doubles as a pair of singles.
  */
 #define MIPS_S2D(x,y) ((x) | MIPS_FP_DOUBLE)
-/* Mask to strip off fp flags */
+// Mask to strip off fp flags.
 #define MIPS_FP_REG_MASK (MIPS_FP_REG_OFFSET-1)
 
 #ifdef HAVE_LITTLE_ENDIAN
@@ -129,7 +129,7 @@
 #define r_RESULT1 r_V0
 #endif
 
-/* These are the same for both big and little endian. */
+// These are the same for both big and little endian.
 #define r_FARG0 r_F12
 #define r_FARG1 r_F13
 #define r_FARG2 r_F14
@@ -137,11 +137,11 @@
 #define r_FRESULT0 r_F0
 #define r_FRESULT1 r_F1
 
-/* Regs not used for Mips */
+// Regs not used for Mips.
 #define rMIPS_LR INVALID_REG
 #define rMIPS_PC INVALID_REG
 
-/* RegisterLocation templates return values (r_V0, or r_V0/r_V1) */
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
 #define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
                            INVALID_SREG, INVALID_SREG}
 #define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0, \
@@ -155,7 +155,7 @@
   kMipsGPReg0   = 0,
   kMipsRegSP    = 29,
   kMipsRegLR    = 31,
-  kMipsFPReg0   = 32, /* only 16 fp regs supported currently */
+  kMipsFPReg0   = 32, // only 16 fp regs supported currently.
   kMipsFPRegEnd   = 48,
   kMipsRegHI    = kMipsFPRegEnd,
   kMipsRegLO,
@@ -168,10 +168,6 @@
 #define ENCODE_MIPS_REG_LR           (1ULL << kMipsRegLR)
 #define ENCODE_MIPS_REG_PC           (1ULL << kMipsRegPC)
 
-/*
- * Annotate special-purpose core registers:
- */
-
 enum MipsNativeRegisterPool {
   r_ZERO = 0,
   r_AT = 1,
@@ -222,7 +218,11 @@
   r_F13,
   r_F14,
   r_F15,
-#if 0 /* only 16 fp regs supported currently */
+#if 0
+  /*
+   * TODO: The shared resource mask doesn't have enough bit positions to describe all
+   * MIPS registers.  Expand it and enable use of fp registers 16 through 31.
+   */
   r_F16,
   r_F17,
   r_F18,
@@ -248,7 +248,7 @@
   r_DF5 = r_F10 + MIPS_FP_DOUBLE,
   r_DF6 = r_F12 + MIPS_FP_DOUBLE,
   r_DF7 = r_F14 + MIPS_FP_DOUBLE,
-#if 0 /* only 16 fp regs supported currently */
+#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
   r_DF8 = r_F16 + MIPS_FP_DOUBLE,
   r_DF9 = r_F18 + MIPS_FP_DOUBLE,
   r_DF10 = r_F20 + MIPS_FP_DOUBLE,
@@ -263,10 +263,6 @@
   r_PC,
 };
 
-/*
- * Target-independent aliases
- */
-
 #define rMIPS_SUSPEND r_S0
 #define rMIPS_SELF r_S1
 #define rMIPS_SP r_SP
@@ -283,7 +279,6 @@
 #define rMIPS_INVOKE_TGT r_T9
 #define rMIPS_COUNT INVALID_REG
 
-/* Shift encodings */
 enum MipsShiftEncodings {
   kMipsLsl = 0x0,
   kMipsLsr = 0x1,
@@ -291,7 +286,7 @@
   kMipsRor = 0x3
 };
 
-// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist)
+// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist).
 #define kSYNC0        0x00
 #define kSYNC_WMB     0x04
 #define kSYNC_MB      0x01
@@ -299,7 +294,7 @@
 #define kSYNC_RELEASE 0x12
 #define kSYNC_RMB     0x13
 
-// TODO: Use smaller hammer when appropriate for target CPU
+// TODO: Use smaller hammer when appropriate for target CPU.
 #define kST kSYNC0
 #define kSY kSYNC0
 
@@ -310,103 +305,99 @@
  */
 enum MipsOpCode {
   kMipsFirst = 0,
-  kMips32BitData = kMipsFirst, /* data [31..0] */
-  kMipsAddiu, /* addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
-  kMipsAddu,  /* add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001] */
-  kMipsAnd,   /* and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100] */
-  kMipsAndi,  /* andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0] */
-  kMipsB,     /* b o   [0001000000000000] o[15..0] */
-  kMipsBal,   /* bal o [0000010000010001] o[15..0] */
-  /* NOTE: the code tests the range kMipsBeq thru kMipsBne, so
-       adding an instruction in this range may require updates */
-  kMipsBeq,   /* beq s,t,o [000100] s[25..21] t[20..16] o[15..0] */
-  kMipsBeqz,  /* beqz s,o [000100] s[25..21] [00000] o[15..0] */
-  kMipsBgez,  /* bgez s,o [000001] s[25..21] [00001] o[15..0] */
-  kMipsBgtz,  /* bgtz s,o [000111] s[25..21] [00000] o[15..0] */
-  kMipsBlez,  /* blez s,o [000110] s[25..21] [00000] o[15..0] */
-  kMipsBltz,  /* bltz s,o [000001] s[25..21] [00000] o[15..0] */
-  kMipsBnez,  /* bnez s,o [000101] s[25..21] [00000] o[15..0] */
-  kMipsBne,   /* bne s,t,o [000101] s[25..21] t[20..16] o[15..0] */
-  kMipsDiv,   /* div s,t [000000] s[25..21] t[20..16] [0000000000011010] */
+  kMips32BitData = kMipsFirst, // data [31..0].
+  kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsAddu,  // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+  kMipsAnd,   // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+  kMipsAndi,  // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+  kMipsB,     // b o   [0001000000000000] o[15..0].
+  kMipsBal,   // bal o [0000010000010001] o[15..0].
+  // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+  //       range may require updates.
+  kMipsBeq,   // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+  kMipsBeqz,  // beqz s,o [000100] s[25..21] [00000] o[15..0].
+  kMipsBgez,  // bgez s,o [000001] s[25..21] [00001] o[15..0].
+  kMipsBgtz,  // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+  kMipsBlez,  // blez s,o [000110] s[25..21] [00000] o[15..0].
+  kMipsBltz,  // bltz s,o [000001] s[25..21] [00000] o[15..0].
+  kMipsBnez,  // bnez s,o [000101] s[25..21] [00000] o[15..0].
+  kMipsBne,   // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+  kMipsDiv,   // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
 #if __mips_isa_rev>=2
-  kMipsExt,   /* ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000] */
+  kMipsExt,   // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
 #endif
-  kMipsJal,   /* jal t [000011] t[25..0] */
-  kMipsJalr,  /* jalr d,s [000000] s[25..21] [00000] d[15..11]
-                  hint[10..6] [001001] */
-  kMipsJr,    /* jr s [000000] s[25..21] [0000000000] hint[10..6] [001000] */
-  kMipsLahi,  /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi */
-  kMipsLalo,  /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo */
-  kMipsLui,   /* lui t,imm16 [00111100000] t[20..16] imm16[15..0] */
-  kMipsLb,    /* lb t,o(b) [100000] b[25..21] t[20..16] o[15..0] */
-  kMipsLbu,   /* lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0] */
-  kMipsLh,    /* lh t,o(b) [100001] b[25..21] t[20..16] o[15..0] */
-  kMipsLhu,   /* lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0] */
-  kMipsLw,    /* lw t,o(b) [100011] b[25..21] t[20..16] o[15..0] */
-  kMipsMfhi,  /* mfhi d [0000000000000000] d[15..11] [00000010000] */
-  kMipsMflo,  /* mflo d [0000000000000000] d[15..11] [00000010010] */
-  kMipsMove,  /* move d,s [000000] s[25..21] [00000] d[15..11] [00000100101] */
-  kMipsMovz,  /* movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010] */
-  kMipsMul,   /* mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010] */
-  kMipsNop,   /* nop [00000000000000000000000000000000] */
-  kMipsNor,   /* nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111] */
-  kMipsOr,    /* or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101] */
-  kMipsOri,   /* ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] */
-  kMipsPref,  /* pref h,o(b) [101011] b[25..21] h[20..16] o[15..0] */
-  kMipsSb,    /* sb t,o(b) [101000] b[25..21] t[20..16] o[15..0] */
+  kMipsJal,   // jal t [000011] t[25..0].
+  kMipsJalr,  // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+  kMipsJr,    // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+  kMipsLahi,  // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+  kMipsLalo,  // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+  kMipsLui,   // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+  kMipsLb,    // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+  kMipsLbu,   // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+  kMipsLh,    // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+  kMipsLhu,   // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+  kMipsLw,    // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+  kMipsMfhi,  // mfhi d [0000000000000000] d[15..11] [00000010000].
+  kMipsMflo,  // mflo d [0000000000000000] d[15..11] [00000010010].
+  kMipsMove,  // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+  kMipsMovz,  // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
+  kMipsMul,   // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+  kMipsNop,   // nop [00000000000000000000000000000000].
+  kMipsNor,   // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+  kMipsOr,    // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+  kMipsOri,   // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMipsPref,  // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+  kMipsSb,    // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
 #if __mips_isa_rev>=2
-  kMipsSeb,   /* seb d,t [01111100000] t[20..16] d[15..11] [10000100000] */
-  kMipsSeh,   /* seh d,t [01111100000] t[20..16] d[15..11] [11000100000] */
+  kMipsSeb,   // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+  kMipsSeh,   // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
 #endif
-  kMipsSh,    /* sh t,o(b) [101001] b[25..21] t[20..16] o[15..0] */
-  kMipsSll,   /* sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000] */
-  kMipsSllv,  /* sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100] */
-  kMipsSlt,   /* slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010] */
-  kMipsSlti,  /* slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0] */
-  kMipsSltu,  /* sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011] */
-  kMipsSra,   /* sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011] */
-  kMipsSrav,  /* srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111] */
-  kMipsSrl,   /* srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010] */
-  kMipsSrlv,  /* srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110] */
-  kMipsSubu,  /* subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011] */
-  kMipsSw,    /* sw t,o(b) [101011] b[25..21] t[20..16] o[15..0] */
-  kMipsXor,   /* xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110] */
-  kMipsXori,  /* xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0] */
-#ifdef __mips_hard_float
-  kMipsFadds, /* add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000] */
-  kMipsFsubs, /* sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001] */
-  kMipsFmuls, /* mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010] */
-  kMipsFdivs, /* div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011] */
-  kMipsFaddd, /* add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000] */
-  kMipsFsubd, /* sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001] */
-  kMipsFmuld, /* mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010] */
-  kMipsFdivd, /* div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011] */
-  kMipsFcvtsd,/* cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000] */
-  kMipsFcvtsw,/* cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000] */
-  kMipsFcvtds,/* cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001] */
-  kMipsFcvtdw,/* cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001] */
-  kMipsFcvtws,/* cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100] */
-  kMipsFcvtwd,/* cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100] */
-  kMipsFmovs, /* mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110] */
-  kMipsFmovd, /* mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110] */
-  kMipsFlwc1, /* lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0] */
-  kMipsFldc1, /* ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0] */
-  kMipsFswc1, /* swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0] */
-  kMipsFsdc1, /* sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0] */
-  kMipsMfc1,  /* mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000] */
-  kMipsMtc1,  /* mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000] */
-#endif
-  kMipsDelta, /* Psuedo for ori t, s, <label>-<label> */
-  kMipsDeltaHi, /* Pseudo for lui t, high16(<label>-<label>) */
-  kMipsDeltaLo, /* Pseudo for ori t, s, low16(<label>-<label>) */
-  kMipsCurrPC,  /* jal to .+8 to materialize pc */
-  kMipsSync,  /* sync kind [000000] [0000000000000000] s[10..6] [001111] */
-  kMipsUndefined,  /* undefined [011001xxxxxxxxxxxxxxxx] */
+  kMipsSh,    // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+  kMipsSll,   // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+  kMipsSllv,  // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+  kMipsSlt,   // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+  kMipsSlti,  // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+  kMipsSltu,  // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+  kMipsSra,   // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+  kMipsSrav,  // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+  kMipsSrl,   // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+  kMipsSrlv,  // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+  kMipsSubu,  // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+  kMipsSw,    // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+  kMipsXor,   // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+  kMipsXori,  // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+  kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFcvtsd,// cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtsw,// cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtds,// cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtdw,// cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtws,// cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+  kMipsFcvtwd,// cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+  kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+  kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+  kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+  kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+  kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+  kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+  kMipsMfc1,  // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+  kMipsMtc1,  // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+  kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
+  kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+  kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+  kMipsCurrPC,  // jal to .+8 to materialize pc.
+  kMipsSync,    // sync kind [000000] [0000000000000000] s[10..6] [001111].
+  kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
   kMipsLast
 };
 
-/* Bit flags describing the behavior of each native opcode */
-/* Instruction assembly field_loc kind */
+// Instruction assembly field_loc kind.
 enum MipsEncodingKind {
   kFmtUnused,
   kFmtBitBlt,    /* Bit string using end/start */
@@ -415,26 +406,26 @@
   kFmtBlt5_2,    /* Same 5-bit field to 2 locations */
 };
 
-/* Struct used to define the snippet positions for each MIPS opcode */
+// Struct used to define the snippet positions for each MIPS opcode.
 struct MipsEncodingMap {
   uint32_t skeleton;
   struct {
     MipsEncodingKind kind;
-    int end;   /* end for kFmtBitBlt, 1-bit slice end for FP regs */
-    int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
+    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
+    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
   } field_loc[4];
   MipsOpCode opcode;
   uint64_t flags;
   const char *name;
   const char* fmt;
-  int size;   /* Size in bytes */
+  int size;   // Note: size is in bytes.
 };
 
 extern MipsEncodingMap EncodingMap[kMipsLast];
 
 #define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
 #define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
-#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) /* 2 offsets must fit */
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
 
 }  // namespace art
 
diff --git a/src/compiler/codegen/mips/target_mips.cc b/src/compiler/codegen/mips/target_mips.cc
index b9159ed..ed884b2 100644
--- a/src/compiler/codegen/mips/target_mips.cc
+++ b/src/compiler/codegen/mips/target_mips.cc
@@ -16,6 +16,7 @@
 
 #include "../../compiler_internals.h"
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../ralloc_util.h"
 #include "../codegen_util.h"
 
@@ -31,39 +32,37 @@
                              r_RA};
 static int core_temps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
                            r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
-#ifdef __mips_hard_float
 static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
                        r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
 static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
                          r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
-#endif
 
-RegLocation LocCReturn()
+RegLocation MipsCodegen::LocCReturn()
 {
   RegLocation res = MIPS_LOC_C_RETURN;
   return res;
 }
 
-RegLocation LocCReturnWide()
+RegLocation MipsCodegen::LocCReturnWide()
 {
   RegLocation res = MIPS_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation LocCReturnFloat()
+RegLocation MipsCodegen::LocCReturnFloat()
 {
   RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation LocCReturnDouble()
+RegLocation MipsCodegen::LocCReturnDouble()
 {
   RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int TargetReg(SpecialTargetRegister reg) {
+int MipsCodegen::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rMIPS_SELF; break;
@@ -88,37 +87,19 @@
 }
 
 // Create a double from a pair of singles.
-int S2d(int low_reg, int high_reg)
+int MipsCodegen::S2d(int low_reg, int high_reg)
 {
   return MIPS_S2D(low_reg, high_reg);
 }
 
-// Is reg a single or double?
-bool FpReg(int reg)
-{
-  return MIPS_FPREG(reg);
-}
-
-// Is reg a single?
-bool SingleReg(int reg)
-{
-  return MIPS_SINGLEREG(reg);
-}
-
-// Is reg a double?
-bool DoubleReg(int reg)
-{
-  return MIPS_DOUBLEREG(reg);
-}
-
 // Return mask to strip off fp reg flags and bias.
-uint32_t FpRegMask()
+uint32_t MipsCodegen::FpRegMask()
 {
   return MIPS_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool SameRegType(int reg1, int reg2)
+bool MipsCodegen::SameRegType(int reg1, int reg2)
 {
   return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2));
 }
@@ -126,7 +107,7 @@
 /*
  * Decode the register id.
  */
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t MipsCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
 {
   uint64_t seed;
   int shift;
@@ -143,18 +124,18 @@
   return (seed << shift);
 }
 
-uint64_t GetPCUseDefEncoding()
+uint64_t MipsCodegen::GetPCUseDefEncoding()
 {
   return ENCODE_MIPS_REG_PC;
 }
 
 
-void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void MipsCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
 {
   DCHECK_EQ(cu->instruction_set, kMips);
 
   // Mips-specific resource map setup here.
-  uint64_t flags = EncodingMap[lir->opcode].flags;
+  uint64_t flags = MipsCodegen::EncodingMap[lir->opcode].flags;
 
   if (flags & REG_DEF_SP) {
     lir->def_mask |= ENCODE_MIPS_REG_SP;
@@ -182,7 +163,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.c.
  */
-std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
+std::string MipsCodegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
 {
   std::string buf;
   int i;
@@ -275,7 +256,7 @@
 }
 
 // FIXME: need to redo resource maps for MIPS - fix this at that time
-void DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
+void MipsCodegen::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -326,7 +307,7 @@
  * machinery is in place, always spill lr.
  */
 
-void AdjustSpillMask(CompilationUnit* cu)
+void MipsCodegen::AdjustSpillMask(CompilationUnit* cu)
 {
   cu->core_spill_mask |= (1 << r_RA);
   cu->num_core_spills++;
@@ -338,12 +319,12 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void MarkPreservedSingle(CompilationUnit* cu, int s_reg, int reg)
+void MipsCodegen::MarkPreservedSingle(CompilationUnit* cu, int s_reg, int reg)
 {
   LOG(FATAL) << "No support yet for promoted FP regs";
 }
 
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void MipsCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
 {
   RegisterInfo* info1 = GetRegInfo(cu, reg1);
   RegisterInfo* info2 = GetRegInfo(cu, reg2);
@@ -365,7 +346,7 @@
   }
 }
 
-void FlushReg(CompilationUnit* cu, int reg)
+void MipsCodegen::FlushReg(CompilationUnit* cu, int reg)
 {
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
@@ -376,12 +357,12 @@
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool IsFpReg(int reg) {
+bool MipsCodegen::IsFpReg(int reg) {
   return MIPS_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cu)
+void MipsCodegen::ClobberCalleeSave(CompilationUnit *cu)
 {
   Clobber(cu, r_ZERO);
   Clobber(cu, r_AT);
@@ -424,28 +405,28 @@
   Clobber(cu, r_F15);
 }
 
-RegLocation GetReturnWideAlt(CompilationUnit* cu)
+RegLocation MipsCodegen::GetReturnWideAlt(CompilationUnit* cu)
 {
   UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
   RegLocation res = LocCReturnWide();
   return res;
 }
 
-RegLocation GetReturnAlt(CompilationUnit* cu)
+RegLocation MipsCodegen::GetReturnAlt(CompilationUnit* cu)
 {
   UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
   RegLocation res = LocCReturn();
   return res;
 }
 
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
+RegisterInfo* MipsCodegen::GetRegInfo(CompilationUnit* cu, int reg)
 {
   return MIPS_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & MIPS_FP_REG_MASK]
             : &cu->reg_pool->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu)
+void MipsCodegen::LockCallTemps(CompilationUnit* cu)
 {
   LockTemp(cu, rMIPS_ARG0);
   LockTemp(cu, rMIPS_ARG1);
@@ -454,7 +435,7 @@
 }
 
 /* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cu)
+void MipsCodegen::FreeCallTemps(CompilationUnit* cu)
 {
   FreeTemp(cu, rMIPS_ARG0);
   FreeTemp(cu, rMIPS_ARG1);
@@ -462,13 +443,7 @@
   FreeTemp(cu, rMIPS_ARG3);
 }
 
-/* Architecture-specific initializations and checks go here */
-bool ArchVariantInit(void)
-{
-  return true;
-}
-
-void GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+void MipsCodegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
   NewLIR1(cu, kMipsSync, 0 /* Only stype currently supported */);
@@ -479,21 +454,19 @@
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+int MipsCodegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
                   int reg_class)
 {
   int high_reg;
   int low_reg;
   int res = 0;
 
-#ifdef __mips_hard_float
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
     low_reg = AllocTempDouble(cu);
     high_reg = low_reg + 1;
     res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
     return res;
   }
-#endif
 
   low_reg = AllocTemp(cu);
   high_reg = AllocTemp(cu);
@@ -501,29 +474,22 @@
   return res;
 }
 
-int AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class)
+int MipsCodegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class)
 {
-#ifdef __mips_hard_float
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
 {
     return AllocTempFloat(cu);
 }
-#endif
   return AllocTemp(cu);
 }
 
-void CompilerInitializeRegAlloc(CompilationUnit* cu)
+void MipsCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
 {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
-#ifdef __mips_hard_float
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-#else
-  int num_fp_regs = 0;
-  int num_fp_temps = 0;
-#endif
   RegisterPool *pool =
       static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
   cu->reg_pool = pool;
@@ -568,8 +534,7 @@
   }
 }
 
-void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
-           RegLocation rl_free)
+void MipsCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
     (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
@@ -584,13 +549,13 @@
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
-int LoadHelper(CompilationUnit* cu, int offset)
+int MipsCodegen::LoadHelper(CompilationUnit* cu, int offset)
 {
   LoadWordDisp(cu, rMIPS_SELF, offset, r_T9);
   return r_T9;
 }
 
-void SpillCoreRegs(CompilationUnit* cu)
+void MipsCodegen::SpillCoreRegs(CompilationUnit* cu)
 {
   if (cu->num_core_spills == 0) {
     return;
@@ -606,7 +571,7 @@
   }
 }
 
-void UnSpillCoreRegs(CompilationUnit* cu)
+void MipsCodegen::UnSpillCoreRegs(CompilationUnit* cu)
 {
   if (cu->num_core_spills == 0) {
     return;
@@ -622,39 +587,38 @@
   OpRegImm(cu, kOpAdd, rMIPS_SP, cu->frame_size);
 }
 
-bool BranchUnconditional(LIR* lir)
+bool MipsCodegen::IsUnconditionalBranch(LIR* lir)
 {
   return (lir->opcode == kMipsB);
 }
 
 /* Common initialization routine for an architecture family */
-bool ArchInit()
+bool InitMipsCodegen(CompilationUnit* cu)
 {
-  int i;
-
-  for (i = 0; i < kMipsLast; i++) {
-    if (EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
-         " is wrong: expecting " << i << ", seeing " << static_cast<int>(EncodingMap[i].opcode);
+  cu->cg.reset(new MipsCodegen());
+  for (int i = 0; i < kMipsLast; i++) {
+    if (MipsCodegen::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << MipsCodegen::EncodingMap[i].name
+                 << " is wrong: expecting " << i << ", seeing "
+                 << static_cast<int>(MipsCodegen::EncodingMap[i].opcode);
     }
   }
-
-  return ArchVariantInit();
+  return true;
 }
 
-uint64_t GetTargetInstFlags(int opcode)
+uint64_t MipsCodegen::GetTargetInstFlags(int opcode)
 {
-  return EncodingMap[opcode].flags;
+  return MipsCodegen::EncodingMap[opcode].flags;
 }
 
-const char* GetTargetInstName(int opcode)
+const char* MipsCodegen::GetTargetInstName(int opcode)
 {
-  return EncodingMap[opcode].name;
+  return MipsCodegen::EncodingMap[opcode].name;
 }
 
-const char* GetTargetInstFmt(int opcode)
+const char* MipsCodegen::GetTargetInstFmt(int opcode)
 {
-  return EncodingMap[opcode].fmt;
+  return MipsCodegen::EncodingMap[opcode].fmt;
 }
 
 } // namespace art
diff --git a/src/compiler/codegen/mips/utility_mips.cc b/src/compiler/codegen/mips/utility_mips.cc
index 168b462..44d75d1 100644
--- a/src/compiler/codegen/mips/utility_mips.cc
+++ b/src/compiler/codegen/mips/utility_mips.cc
@@ -15,23 +15,14 @@
  */
 
 #include "mips_lir.h"
+#include "codegen_mips.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
 /* This file contains codegen for the MIPS32 ISA. */
-
-void GenBarrier(CompilationUnit *cu);
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg);
-LIR *LoadWordDisp(CompilationUnit *cu, int rBase, int displacement,
-                      int r_dest);
-LIR *StoreWordDisp(CompilationUnit *cu, int rBase,
-                       int displacement, int r_src);
-LIR *LoadConstant(CompilationUnit *cu, int r_dest, int value);
-
-#ifdef __mips_hard_float
-LIR *FpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsCodegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
 {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
@@ -60,7 +51,6 @@
   }
   return res;
 }
-#endif
 
 /*
  * Load a immediate using a shortcut if possible; otherwise
@@ -71,18 +61,16 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR *LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+LIR* MipsCodegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
 {
   LIR *res;
 
-#ifdef __mips_hard_float
   int r_dest_save = r_dest;
   int is_fp_reg = MIPS_FPREG(r_dest);
   if (is_fp_reg) {
     DCHECK(MIPS_SINGLEREG(r_dest));
     r_dest = AllocTemp(cu);
   }
-#endif
 
   /* See if the value can be constructed cheaply */
   if (value == 0) {
@@ -97,25 +85,22 @@
       NewLIR3(cu, kMipsOri, r_dest, r_dest, value);
   }
 
-#ifdef __mips_hard_float
   if (is_fp_reg) {
     NewLIR2(cu, kMipsMtc1, r_dest, r_dest_save);
     FreeTemp(cu, r_dest);
   }
-#endif
 
   return res;
 }
 
-LIR *OpBranchUnconditional(CompilationUnit *cu, OpKind op)
+LIR* MipsCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
 {
-  DCHECK_EQ(op, kOpUncondBr);
-  return NewLIR1(cu, kMipsB, 0 /* offset to be patched */ );
+  LIR* res = NewLIR1(cu, kMipsB, 0 /* offset to be patched during assembly*/ );
+  res->target = target;
+  return res;
 }
 
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
-
-LIR *OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+LIR* MipsCodegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
 {
   MipsOpCode opcode = kMipsNop;
   switch (op) {
@@ -131,9 +116,7 @@
   return NewLIR2(cu, opcode, r_RA, r_dest_src);
 }
 
-LIR *OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest,
-           int r_src1, int value);
-LIR *OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1,
+LIR* MipsCodegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1,
           int value)
 {
   LIR *res;
@@ -165,8 +148,7 @@
   return res;
 }
 
-LIR *OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest,
-                 int r_src1, int r_src2)
+LIR* MipsCodegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int r_src2)
 {
   MipsOpCode opcode = kMipsNop;
   switch (op) {
@@ -208,8 +190,7 @@
   return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
 }
 
-LIR *OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest,
-                 int r_src1, int value)
+LIR* MipsCodegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int value)
 {
   LIR *res;
   MipsOpCode opcode = kMipsNop;
@@ -298,7 +279,7 @@
   return res;
 }
 
-LIR *OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* MipsCodegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
 {
   MipsOpCode opcode = kMipsNop;
   LIR *res;
@@ -342,8 +323,8 @@
   return NewLIR2(cu, opcode, r_dest_src1, r_src2);
 }
 
-LIR *LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
-                           int r_dest_hi, int val_lo, int val_hi)
+LIR* MipsCodegen::LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo, int r_dest_hi,
+                                        int val_lo, int val_hi)
 {
   LIR *res;
   res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
@@ -352,15 +333,14 @@
 }
 
 /* Load value from base + scaled index. */
-LIR *LoadBaseIndexed(CompilationUnit *cu, int rBase,
-                     int r_index, int r_dest, int scale, OpSize size)
+LIR* MipsCodegen::LoadBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_dest,
+                                  int scale, OpSize size)
 {
   LIR *first = NULL;
   LIR *res;
   MipsOpCode opcode = kMipsNop;
   int t_reg = AllocTemp(cu);
 
-#ifdef __mips_hard_float
   if (MIPS_FPREG(r_dest)) {
     DCHECK(MIPS_SINGLEREG(r_dest));
     DCHECK((size == kWord) || (size == kSingle));
@@ -369,7 +349,6 @@
     if (size == kSingle)
       size = kWord;
   }
-#endif
 
   if (!scale) {
     first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
@@ -379,11 +358,9 @@
   }
 
   switch (size) {
-#ifdef __mips_hard_float
     case kSingle:
       opcode = kMipsFlwc1;
       break;
-#endif
     case kWord:
       opcode = kMipsLw;
       break;
@@ -409,15 +386,14 @@
 }
 
 /* store value base base + scaled index. */
-LIR *StoreBaseIndexed(CompilationUnit *cu, int rBase,
-                      int r_index, int r_src, int scale, OpSize size)
+LIR* MipsCodegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+                                   int scale, OpSize size)
 {
   LIR *first = NULL;
   MipsOpCode opcode = kMipsNop;
   int r_new_index = r_index;
   int t_reg = AllocTemp(cu);
 
-#ifdef __mips_hard_float
   if (MIPS_FPREG(r_src)) {
     DCHECK(MIPS_SINGLEREG(r_src));
     DCHECK((size == kWord) || (size == kSingle));
@@ -426,7 +402,6 @@
     if (size == kSingle)
       size = kWord;
   }
-#endif
 
   if (!scale) {
     first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
@@ -436,11 +411,9 @@
   }
 
   switch (size) {
-#ifdef __mips_hard_float
     case kSingle:
       opcode = kMipsFswc1;
       break;
-#endif
     case kWord:
       opcode = kMipsSw;
       break;
@@ -460,53 +433,8 @@
   return first;
 }
 
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
-  int i;
-  int load_cnt = 0;
-  LIR *res = NULL ;
-  GenBarrier(cu);
-
-  for (i = 0; i < 8; i++, r_mask >>= 1) {
-    if (r_mask & 0x1) { /* map r0 to MIPS r_A0 */
-      NewLIR3(cu, kMipsLw, i+r_A0, load_cnt*4, rBase);
-      load_cnt++;
-    }
-  }
-
-  if (load_cnt) {/* increment after */
-    NewLIR3(cu, kMipsAddiu, rBase, rBase, load_cnt*4);
-  }
-
-  GenBarrier(cu);
-  return res; /* NULL always returned which should be ok since no callers use it */
-}
-
-LIR *StoreMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
-  int i;
-  int store_cnt = 0;
-  LIR *res = NULL ;
-  GenBarrier(cu);
-
-  for (i = 0; i < 8; i++, r_mask >>= 1) {
-    if (r_mask & 0x1) { /* map r0 to MIPS r_A0 */
-      NewLIR3(cu, kMipsSw, i+r_A0, store_cnt*4, rBase);
-      store_cnt++;
-    }
-  }
-
-  if (store_cnt) { /* increment after */
-    NewLIR3(cu, kMipsAddiu, rBase, rBase, store_cnt*4);
-  }
-
-  GenBarrier(cu);
-  return res; /* NULL always returned which should be ok since no callers use it */
-}
-
-LIR *LoadBaseDispBody(CompilationUnit *cu, int rBase,
-                      int displacement, int r_dest, int r_dest_hi,
-                      OpSize size, int s_reg)
+LIR* MipsCodegen::LoadBaseDispBody(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+                                   int r_dest_hi, OpSize size, int s_reg)
 /*
  * Load value from base + displacement.  Optionally perform null check
  * on base (which must have an associated s_reg and MIR).  If not
@@ -528,7 +456,6 @@
     case kDouble:
       pair = true;
       opcode = kMipsLw;
-#ifdef __mips_hard_float
       if (MIPS_FPREG(r_dest)) {
         opcode = kMipsFlwc1;
         if (MIPS_DOUBLEREG(r_dest)) {
@@ -539,19 +466,16 @@
         }
         r_dest_hi = r_dest + 1;
       }
-#endif
       short_form = IS_SIMM16_2WORD(displacement);
       DCHECK_EQ((displacement & 0x3), 0);
       break;
     case kWord:
     case kSingle:
       opcode = kMipsLw;
-#ifdef __mips_hard_float
       if (MIPS_FPREG(r_dest)) {
         opcode = kMipsFlwc1;
         DCHECK(MIPS_SINGLEREG(r_dest));
       }
-#endif
       DCHECK_EQ((displacement & 0x3), 0);
       break;
     case kUnsignedHalf:
@@ -598,33 +522,31 @@
   }
 
   if (rBase == rMIPS_SP) {
-    AnnotateDalvikRegAccess(load,
-                            (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+    AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                             true /* is_load */, pair /* is64bit */);
     if (pair) {
-      AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+      AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
                               true /* is_load */, pair /* is64bit */);
     }
   }
   return load;
 }
 
-LIR *LoadBaseDisp(CompilationUnit *cu, int rBase,
-                  int displacement, int r_dest, OpSize size, int s_reg)
+LIR* MipsCodegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+                               OpSize size, int s_reg)
 {
   return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1,
                           size, s_reg);
 }
 
-LIR *LoadBaseDispWide(CompilationUnit *cu, int rBase,
-                      int displacement, int r_dest_lo, int r_dest_hi, int s_reg)
+LIR* MipsCodegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+                                   int r_dest_lo, int r_dest_hi, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi,
-                          kLong, s_reg);
+  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
-LIR *StoreBaseDispBody(CompilationUnit *cu, int rBase,
-                       int displacement, int r_src, int r_src_hi, OpSize size)
+LIR* MipsCodegen::StoreBaseDispBody(CompilationUnit *cu, int rBase, int displacement,
+                                    int r_src, int r_src_hi, OpSize size)
 {
   LIR *res;
   LIR *store = NULL;
@@ -638,7 +560,6 @@
     case kDouble:
       pair = true;
       opcode = kMipsSw;
-#ifdef __mips_hard_float
       if (MIPS_FPREG(r_src)) {
         opcode = kMipsFswc1;
         if (MIPS_DOUBLEREG(r_src)) {
@@ -649,19 +570,16 @@
         }
         r_src_hi = r_src + 1;
       }
-#endif
       short_form = IS_SIMM16_2WORD(displacement);
       DCHECK_EQ((displacement & 0x3), 0);
       break;
     case kWord:
     case kSingle:
       opcode = kMipsSw;
-#ifdef __mips_hard_float
       if (MIPS_FPREG(r_src)) {
         opcode = kMipsFswc1;
         DCHECK(MIPS_SINGLEREG(r_src));
       }
-#endif
       DCHECK_EQ((displacement & 0x3), 0);
       break;
     case kUnsignedHalf:
@@ -699,10 +617,10 @@
   }
 
   if (rBase == rMIPS_SP) {
-    AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
-                            >> 2, false /* is_load */, pair /* is64bit */);
+    AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                            false /* is_load */, pair /* is64bit */);
     if (pair) {
-      AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+      AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
                               false /* is_load */, pair /* is64bit */);
     }
   }
@@ -710,37 +628,37 @@
   return res;
 }
 
-LIR *StoreBaseDisp(CompilationUnit *cu, int rBase,
-                   int displacement, int r_src, OpSize size)
+LIR* MipsCodegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_src,
+                                OpSize size)
 {
   return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
 }
 
-LIR *StoreBaseDispWide(CompilationUnit *cu, int rBase,
-                       int displacement, int r_src_lo, int r_src_hi)
+LIR* MipsCodegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+                                    int r_src_lo, int r_src_hi)
 {
   return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
+void MipsCodegen::LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
 {
   LoadWordDisp(cu, base, LOWORD_OFFSET , low_reg);
   LoadWordDisp(cu, base, HIWORD_OFFSET , high_reg);
 }
 
-LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* MipsCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
 {
   LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
   return NULL;
 }
 
-LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* MipsCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
 {
   LOG(FATAL) << "Unexpected use of OpMem for MIPS";
   return NULL;
 }
 
-LIR* StoreBaseIndexedDisp(CompilationUnit *cu,
+LIR* MipsCodegen::StoreBaseIndexedDisp(CompilationUnit *cu,
                           int rBase, int r_index, int scale, int displacement,
                           int r_src, int r_src_hi,
                           OpSize size, int s_reg)
@@ -749,14 +667,14 @@
   return NULL;
 }
 
-LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+LIR* MipsCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
               int offset)
 {
   LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
   return NULL;
 }
 
-LIR* LoadBaseIndexedDisp(CompilationUnit *cu,
+LIR* MipsCodegen::LoadBaseIndexedDisp(CompilationUnit *cu,
                          int rBase, int r_index, int scale, int displacement,
                          int r_dest, int r_dest_hi,
                          OpSize size, int s_reg)
@@ -765,7 +683,7 @@
   return NULL;
 }
 
-LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* MipsCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
 {
   LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
   return NULL;
diff --git a/src/compiler/codegen/method_bitcode.cc b/src/compiler/codegen/mir_to_gbc.cc
similarity index 95%
rename from src/compiler/codegen/method_bitcode.cc
rename to src/compiler/codegen/mir_to_gbc.cc
index 7a9446f..b5ad024 100644
--- a/src/compiler/codegen/method_bitcode.cc
+++ b/src/compiler/codegen/mir_to_gbc.cc
@@ -28,7 +28,6 @@
 #include <llvm/Support/InstIterator.h>
 
 #include "../compiler_internals.h"
-#include "method_codegen_driver.h"
 #include "local_optimizations.h"
 #include "codegen_util.h"
 #include "ralloc_util.h"
@@ -39,10 +38,6 @@
 static const char kCatchBlock = 'C';
 
 namespace art {
-// TODO: unify bad_loc
-const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
-                            INVALID_REG, INVALID_REG, INVALID_SREG,
-                            INVALID_SREG};
 static RegLocation GetLoc(CompilationUnit* cu, llvm::Value* val);
 
 static llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cu, int id)
@@ -594,7 +589,8 @@
 static void ConvertInvoke(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                           InvokeType invoke_type, bool is_range, bool is_filled_new_array)
 {
-  CallInfo* info = NewMemCallInfo(cu, bb, mir, invoke_type, is_range);
+  Codegen* cg = cu->cg.get();
+  CallInfo* info = cg->NewMemCallInfo(cu, bb, mir, invoke_type, is_range);
   llvm::SmallVector<llvm::Value*, 10> args;
   // Insert the invoke_type
   args.push_back(cu->irb->getInt32(static_cast<int>(invoke_type)));
@@ -852,7 +848,7 @@
 {
   bool res = false;   // Assume success
   RegLocation rl_src[3];
-  RegLocation rl_dest = bad_loc;
+  RegLocation rl_dest = GetBadLoc();
   Instruction::Code opcode = mir->dalvikInsn.opcode;
   int op_val = opcode;
   uint32_t vB = mir->dalvikInsn.vB;
@@ -873,7 +869,7 @@
   int next_sreg = 0;
   int next_loc = 0;
   int attrs = oat_data_flow_attributes[opcode];
-  rl_src[0] = rl_src[1] = rl_src[2] = bad_loc;
+  rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
   if (attrs & DF_UA) {
     if (attrs & DF_A_WIDE) {
       rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
@@ -2258,6 +2254,7 @@
 
 static void CvtBinFPOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   /*
    * Normally, we won't ever generate an FP operation with an immediate
@@ -2271,9 +2268,9 @@
   if ((op1C != NULL) && (op == kOpSub)) {
     RegLocation rl_src = GetLoc(cu, inst->getOperand(1));
     if (rl_dest.wide) {
-      GenArithOpDouble(cu, Instruction::NEG_DOUBLE, rl_dest, rl_src, rl_src);
+      cg->GenArithOpDouble(cu, Instruction::NEG_DOUBLE, rl_dest, rl_src, rl_src);
     } else {
-      GenArithOpFloat(cu, Instruction::NEG_FLOAT, rl_dest, rl_src, rl_src);
+      cg->GenArithOpFloat(cu, Instruction::NEG_FLOAT, rl_dest, rl_src, rl_src);
     }
   } else {
     DCHECK(op1C == NULL);
@@ -2281,9 +2278,9 @@
     RegLocation rl_src2 = GetLoc(cu, inst->getOperand(1));
     Instruction::Code dalvik_op = GetDalvikFPOpcode(op, false, rl_dest.wide);
     if (rl_dest.wide) {
-      GenArithOpDouble(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+      cg->GenArithOpDouble(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
     } else {
-      GenArithOpFloat(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+      cg->GenArithOpFloat(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
     }
   }
 }
@@ -2291,13 +2288,15 @@
 static void CvtIntNarrowing(CompilationUnit* cu, llvm::Instruction* inst,
                      Instruction::Code opcode)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  GenIntNarrowing(cu, opcode, rl_dest, rl_src);
+  cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src);
 }
 
 static void CvtIntToFP(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
   Instruction::Code opcode;
@@ -2314,11 +2313,12 @@
       opcode = Instruction::INT_TO_FLOAT;
     }
   }
-  GenConversion(cu, opcode, rl_dest, rl_src);
+  cg->GenConversion(cu, opcode, rl_dest, rl_src);
 }
 
 static void CvtFPToInt(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, call_inst);
   RegLocation rl_src = GetLoc(cu, call_inst->getOperand(0));
   Instruction::Code opcode;
@@ -2335,35 +2335,39 @@
       opcode = Instruction::FLOAT_TO_INT;
     }
   }
-  GenConversion(cu, opcode, rl_dest, rl_src);
+  cg->GenConversion(cu, opcode, rl_dest, rl_src);
 }
 
 static void CvtFloatToDouble(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  GenConversion(cu, Instruction::FLOAT_TO_DOUBLE, rl_dest, rl_src);
+  cg->GenConversion(cu, Instruction::FLOAT_TO_DOUBLE, rl_dest, rl_src);
 }
 
 static void CvtTrunc(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
   rl_src = UpdateLocWide(cu, rl_src);
   rl_src = WideToNarrow(cu, rl_src);
-  StoreValue(cu, rl_dest, rl_src);
+  cg->StoreValue(cu, rl_dest, rl_src);
 }
 
 static void CvtDoubleToFloat(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
-  GenConversion(cu, Instruction::DOUBLE_TO_FLOAT, rl_dest, rl_src);
+  cg->GenConversion(cu, Instruction::DOUBLE_TO_FLOAT, rl_dest, rl_src);
 }
 
 
 static void CvtIntExt(CompilationUnit* cu, llvm::Instruction* inst, bool is_signed)
 {
+  Codegen* cg = cu->cg.get();
   // TODO: evaluate src/tgt types and add general support for more than int to long
   RegLocation rl_dest = GetLoc(cu, inst);
   RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
@@ -2373,20 +2377,21 @@
   DCHECK(!rl_src.fp);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
-    OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+    cg->OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
   } else {
-    LoadValueDirect(cu, rl_src, rl_result.low_reg);
+    cg->LoadValueDirect(cu, rl_src, rl_result.low_reg);
   }
   if (is_signed) {
-    OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+    cg->OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
   } else {
-    LoadConstant(cu, rl_result.high_reg, 0);
+    cg->LoadConstant(cu, rl_result.high_reg, 0);
   }
-  StoreValueWide(cu, rl_dest, rl_result);
+  cg->StoreValueWide(cu, rl_dest, rl_result);
 }
 
 static void CvtBinOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, inst);
   llvm::Value* lhs = inst->getOperand(0);
   // Special-case RSUB/NEG
@@ -2395,9 +2400,9 @@
     RegLocation rl_src1 = GetLoc(cu, inst->getOperand(1));
     if (rl_src1.wide) {
       DCHECK_EQ(lhs_imm->getSExtValue(), 0);
-      GenArithOpLong(cu, Instruction::NEG_LONG, rl_dest, rl_src1, rl_src1);
+      cg->GenArithOpLong(cu, Instruction::NEG_LONG, rl_dest, rl_src1, rl_src1);
     } else {
-      GenArithOpIntLit(cu, Instruction::RSUB_INT, rl_dest, rl_src1,
+      cg->GenArithOpIntLit(cu, Instruction::RSUB_INT, rl_dest, rl_src1,
                        lhs_imm->getSExtValue());
     }
     return;
@@ -2408,7 +2413,7 @@
   llvm::ConstantInt* const_rhs = llvm::dyn_cast<llvm::ConstantInt>(rhs);
   if (!rl_dest.wide && (const_rhs != NULL)) {
     Instruction::Code dalvik_op = GetDalvikOpcode(op, true, false);
-    GenArithOpIntLit(cu, dalvik_op, rl_dest, rl_src1, const_rhs->getSExtValue());
+    cg->GenArithOpIntLit(cu, dalvik_op, rl_dest, rl_src1, const_rhs->getSExtValue());
   } else {
     Instruction::Code dalvik_op = GetDalvikOpcode(op, false, rl_dest.wide);
     RegLocation rl_src2;
@@ -2422,39 +2427,41 @@
       rl_src2 = GetLoc(cu, rhs);
     }
     if (rl_dest.wide) {
-      GenArithOpLong(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+      cg->GenArithOpLong(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
     } else {
-      GenArithOpInt(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+      cg->GenArithOpInt(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
     }
   }
 }
 
 static void CvtShiftOp(CompilationUnit* cu, Instruction::Code opcode, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   RegLocation rl_dest = GetLoc(cu, call_inst);
   RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
   llvm::Value* rhs = call_inst->getArgOperand(1);
   if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
     DCHECK(!rl_dest.wide);
-    GenArithOpIntLit(cu, opcode, rl_dest, rl_src, src2->getSExtValue());
+    cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src, src2->getSExtValue());
   } else {
     RegLocation rl_shift = GetLoc(cu, rhs);
     if (call_inst->getType() == cu->irb->getInt64Ty()) {
-      GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
+      cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
     } else {
-      GenArithOpInt(cu, opcode, rl_dest, rl_src, rl_shift);
+      cg->GenArithOpInt(cu, opcode, rl_dest, rl_src, rl_shift);
     }
   }
 }
 
 static void CvtBr(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   llvm::BranchInst* br_inst = llvm::dyn_cast<llvm::BranchInst>(inst);
   DCHECK(br_inst != NULL);
   DCHECK(br_inst->isUnconditional());  // May change - but this is all we use now
   llvm::BasicBlock* target_bb = br_inst->getSuccessor(0);
-  OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
+  cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
 }
 
 static void CvtPhi(CompilationUnit* cu, llvm::Instruction* inst)
@@ -2464,17 +2471,18 @@
 
 static void CvtRet(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   llvm::ReturnInst* ret_inst = llvm::dyn_cast<llvm::ReturnInst>(inst);
   llvm::Value* ret_val = ret_inst->getReturnValue();
   if (ret_val != NULL) {
     RegLocation rl_src = GetLoc(cu, ret_val);
     if (rl_src.wide) {
-      StoreValueWide(cu, GetReturnWide(cu, rl_src.fp), rl_src);
+      cg->StoreValueWide(cu, GetReturnWide(cu, rl_src.fp), rl_src);
     } else {
-      StoreValue(cu, GetReturn(cu, rl_src.fp), rl_src);
+      cg->StoreValue(cu, GetReturn(cu, rl_src.fp), rl_src);
     }
   }
-  GenExitSequence(cu);
+  cg->GenExitSequence(cu);
 }
 
 static ConditionCode GetCond(llvm::ICmpInst::Predicate llvm_cond)
@@ -2494,13 +2502,14 @@
 
 static void CvtICmp(CompilationUnit* cu, llvm::Instruction* inst)
 {
-  // GenCmpLong(cu, rl_dest, rl_src1, rl_src2)
+  // cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2)
   UNIMPLEMENTED(FATAL);
 }
 
 static void CvtICmpBr(CompilationUnit* cu, llvm::Instruction* inst,
                llvm::BranchInst* br_inst)
 {
+  Codegen* cg = cu->cg.get();
   // Get targets
   llvm::BasicBlock* taken_bb = br_inst->getSuccessor(0);
   LIR* taken = cu->block_to_label_map.Get(taken_bb);
@@ -2513,7 +2522,7 @@
   // Not expecting a constant as 1st operand
   DCHECK(llvm::dyn_cast<llvm::ConstantInt>(lhs) == NULL);
   RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
-  rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+  rl_src1 = cg->LoadValue(cu, rl_src1, kCoreReg);
   llvm::Value* rhs = inst->getOperand(1);
   if (cu->instruction_set == kMips) {
     // Compare and branch in one shot
@@ -2522,36 +2531,38 @@
   //Compare, then branch
   // TODO: handle fused CMP_LONG/IF_xxZ case
   if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
-    OpRegImm(cu, kOpCmp, rl_src1.low_reg, src2->getSExtValue());
+    cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, src2->getSExtValue());
   } else if (llvm::dyn_cast<llvm::ConstantPointerNull>(rhs) != NULL) {
-    OpRegImm(cu, kOpCmp, rl_src1.low_reg, 0);
+    cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, 0);
   } else {
     RegLocation rl_src2 = GetLoc(cu, rhs);
-    rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-    OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+    rl_src2 = cg->LoadValue(cu, rl_src2, kCoreReg);
+    cg->OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
   }
-  OpCondBranch(cu, cond, taken);
+  cg->OpCondBranch(cu, cond, taken);
   // Fallthrough
-  OpUnconditionalBranch(cu, fall_through);
+  cg->OpUnconditionalBranch(cu, fall_through);
 }
 
 static void CvtCopy(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
   RegLocation rl_dest = GetLoc(cu, call_inst);
   DCHECK_EQ(rl_src.wide, rl_dest.wide);
   DCHECK_EQ(rl_src.fp, rl_dest.fp);
   if (rl_src.wide) {
-    StoreValueWide(cu, rl_dest, rl_src);
+    cg->StoreValueWide(cu, rl_dest, rl_src);
   } else {
-    StoreValue(cu, rl_dest, rl_src);
+    cg->StoreValue(cu, rl_dest, rl_src);
   }
 }
 
 // Note: Immediate arg is a ConstantInt regardless of result type
 static void CvtConst(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   llvm::ConstantInt* src =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2559,50 +2570,54 @@
   RegLocation rl_dest = GetLoc(cu, call_inst);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
   if (rl_dest.wide) {
-    LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
+    cg->LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
                           (immval) & 0xffffffff, (immval >> 32) & 0xffffffff);
-    StoreValueWide(cu, rl_dest, rl_result);
+    cg->StoreValueWide(cu, rl_dest, rl_result);
   } else {
-    LoadConstantNoClobber(cu, rl_result.low_reg, immval & 0xffffffff);
-    StoreValue(cu, rl_dest, rl_result);
+    cg->LoadConstantNoClobber(cu, rl_result.low_reg, immval & 0xffffffff);
+    cg->StoreValue(cu, rl_dest, rl_result);
   }
 }
 
 static void CvtConstObject(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_string)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   llvm::ConstantInt* idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   uint32_t index = idx_val->getZExtValue();
   RegLocation rl_dest = GetLoc(cu, call_inst);
   if (is_string) {
-    GenConstString(cu, index, rl_dest);
+    cg->GenConstString(cu, index, rl_dest);
   } else {
-    GenConstClass(cu, index, rl_dest);
+    cg->GenConstClass(cu, index, rl_dest);
   }
 }
 
 static void CvtFillArrayData(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* offset_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  GenFillArrayData(cu, offset_val->getSExtValue(), rl_src);
+  cg->GenFillArrayData(cu, offset_val->getSExtValue(), rl_src);
 }
 
 static void CvtNewInstance(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   llvm::ConstantInt* type_idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   uint32_t type_idx = type_idx_val->getZExtValue();
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenNewInstance(cu, type_idx, rl_dest);
+  cg->GenNewInstance(cu, type_idx, rl_dest);
 }
 
 static void CvtNewArray(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* type_idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2610,11 +2625,12 @@
   llvm::Value* len = call_inst->getArgOperand(1);
   RegLocation rl_len = GetLoc(cu, len);
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenNewArray(cu, type_idx, rl_dest, rl_len);
+  cg->GenNewArray(cu, type_idx, rl_dest, rl_len);
 }
 
 static void CvtInstanceOf(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* type_idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2622,90 +2638,98 @@
   llvm::Value* src = call_inst->getArgOperand(1);
   RegLocation rl_src = GetLoc(cu, src);
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenInstanceof(cu, type_idx, rl_dest, rl_src);
+  cg->GenInstanceof(cu, type_idx, rl_dest, rl_src);
 }
 
 static void CvtThrow(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   llvm::Value* src = call_inst->getArgOperand(0);
   RegLocation rl_src = GetLoc(cu, src);
-  GenThrow(cu, rl_src);
+  cg->GenThrow(cu, rl_src);
 }
 
 static void CvtMonitorEnterExit(CompilationUnit* cu, bool is_enter,
                          llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   llvm::Value* src = call_inst->getArgOperand(1);
   RegLocation rl_src = GetLoc(cu, src);
   if (is_enter) {
-    GenMonitorEnter(cu, opt_flags->getZExtValue(), rl_src);
+    cg->GenMonitorEnter(cu, opt_flags->getZExtValue(), rl_src);
   } else {
-    GenMonitorExit(cu, opt_flags->getZExtValue(), rl_src);
+    cg->GenMonitorExit(cu, opt_flags->getZExtValue(), rl_src);
   }
 }
 
 static void CvtArrayLength(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   llvm::Value* src = call_inst->getArgOperand(1);
   RegLocation rl_src = GetLoc(cu, src);
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
+  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
+  cg->GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
   RegLocation rl_dest = GetLoc(cu, call_inst);
   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
   int len_offset = Array::LengthOffset().Int32Value();
-  LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  cg->LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
+  cg->StoreValue(cu, rl_dest, rl_result);
 }
 
 static void CvtMoveException(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenMoveException(cu, rl_dest);
+  cg->GenMoveException(cu, rl_dest);
 }
 
 static void CvtSget(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
   llvm::ConstantInt* type_idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   uint32_t type_idx = type_idx_val->getZExtValue();
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenSget(cu, type_idx, rl_dest, is_wide, is_object);
+  cg->GenSget(cu, type_idx, rl_dest, is_wide, is_object);
 }
 
 static void CvtSput(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* type_idx_val =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   uint32_t type_idx = type_idx_val->getZExtValue();
   llvm::Value* src = call_inst->getArgOperand(1);
   RegLocation rl_src = GetLoc(cu, src);
-  GenSput(cu, type_idx, rl_src, is_wide, is_object);
+  cg->GenSput(cu, type_idx, rl_src, is_wide, is_object);
 }
 
 static void CvtAget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size, int scale)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(1));
   RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(2));
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenArrayGet(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+  cg->GenArrayGet(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
               rl_dest, scale);
 }
 
 static void CvtAput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
                     int scale, bool is_object)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2713,10 +2737,10 @@
   RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(2));
   RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(3));
   if (is_object) {
-    GenArrayObjPut(cu, opt_flags->getZExtValue(), rl_array, rl_index,
+    cg->GenArrayObjPut(cu, opt_flags->getZExtValue(), rl_array, rl_index,
                    rl_src, scale);
   } else {
-    GenArrayPut(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+    cg->GenArrayPut(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
                 rl_src, scale);
   }
 }
@@ -2735,6 +2759,7 @@
 static void CvtIget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
                     bool is_wide, bool is_obj)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2742,13 +2767,14 @@
   llvm::ConstantInt* field_idx =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(2));
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenIGet(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+  cg->GenIGet(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
           size, rl_dest, rl_obj, is_wide, is_obj);
 }
 
 static void CvtIput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
                     bool is_wide, bool is_obj)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
   llvm::ConstantInt* opt_flags =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
@@ -2756,38 +2782,42 @@
   RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(2));
   llvm::ConstantInt* field_idx =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(3));
-  GenIPut(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+  cg->GenIPut(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
           size, rl_src, rl_obj, is_wide, is_obj);
 }
 
 static void CvtCheckCast(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
   llvm::ConstantInt* type_idx =
       llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
   RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
-  GenCheckCast(cu, type_idx->getZExtValue(), rl_src);
+  cg->GenCheckCast(cu, type_idx->getZExtValue(), rl_src);
 }
 
 static void CvtFPCompare(CompilationUnit* cu, llvm::CallInst* call_inst,
                          Instruction::Code opcode)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
   RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenCmpFP(cu, opcode, rl_dest, rl_src1, rl_src2);
+  cg->GenCmpFP(cu, opcode, rl_dest, rl_src1, rl_src2);
 }
 
 static void CvtLongCompare(CompilationUnit* cu, llvm::CallInst* call_inst)
 {
+  Codegen* cg = cu->cg.get();
   RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
   RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
   RegLocation rl_dest = GetLoc(cu, call_inst);
-  GenCmpLong(cu, rl_dest, rl_src1, rl_src2);
+  cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2);
 }
 
 static void CvtSwitch(CompilationUnit* cu, llvm::Instruction* inst)
 {
+  Codegen* cg = cu->cg.get();
   llvm::SwitchInst* sw_inst = llvm::dyn_cast<llvm::SwitchInst>(inst);
   DCHECK(sw_inst != NULL);
   llvm::Value* test_val = sw_inst->getCondition();
@@ -2800,16 +2830,17 @@
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   uint16_t table_magic = *table;
   if (table_magic == 0x100) {
-    GenPackedSwitch(cu, table_offset, rl_src);
+    cg->GenPackedSwitch(cu, table_offset, rl_src);
   } else {
     DCHECK_EQ(table_magic, 0x200);
-    GenSparseSwitch(cu, table_offset, rl_src);
+    cg->GenSparseSwitch(cu, table_offset, rl_src);
   }
 }
 
 static void CvtInvoke(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_void,
                       bool is_filled_new_array)
 {
+  Codegen* cg = cu->cg.get();
   CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
   if (is_void) {
     info->result.location = kLocInvalid;
@@ -2850,9 +2881,9 @@
   info->is_range = (info->num_arg_words > 5);
 
   if (is_filled_new_array) {
-    GenFilledNewArray(cu, info);
+    cg->GenFilledNewArray(cu, info);
   } else {
-    GenInvoke(cu, info);
+    cg->GenInvoke(cu, info);
   }
 }
 
@@ -2866,6 +2897,7 @@
 
 static bool BitcodeBlockCodeGen(CompilationUnit* cu, llvm::BasicBlock* bb)
 {
+  Codegen* cg = cu->cg.get();
   while (cu->llvm_blocks.find(bb) == cu->llvm_blocks.end()) {
     llvm::BasicBlock* next_bb = NULL;
     cu->llvm_blocks.insert(bb);
@@ -2926,7 +2958,7 @@
           i++;
         }
       }
-      GenEntrySequence(cu, ArgLocs, cu->method_loc);
+      cg->GenEntrySequence(cu, ArgLocs, cu->method_loc);
     }
 
     // Visit all of the instructions in the block
@@ -3017,7 +3049,7 @@
                 // Already dealt with - just ignore it here.
                 break;
               case greenland::IntrinsicHelper::CheckSuspend:
-                GenSuspendTest(cu, 0 /* opt_flags already applied */);
+                cg->GenSuspendTest(cu, 0 /* opt_flags already applied */);
                 break;
               case greenland::IntrinsicHelper::HLInvokeObj:
               case greenland::IntrinsicHelper::HLInvokeFloat:
@@ -3266,8 +3298,7 @@
                    */
                    llvm::BasicBlock* target_bb = sw_inst->getDefaultDest();
                    DCHECK(target_bb != NULL);
-                   OpUnconditionalBranch(cu,
-                                         cu->block_to_label_map.Get(target_bb));
+                   cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
                    ++it;
                    // Set next bb to default target - improves code layout
                    next_bb = target_bb;
@@ -3378,6 +3409,7 @@
  */
 void MethodBitcode2LIR(CompilationUnit* cu)
 {
+  Codegen* cg = cu->cg.get();
   llvm::Function* func = cu->func;
   int num_basic_blocks = func->getBasicBlockList().size();
   // Allocate a list for LIR basic block labels
@@ -3475,7 +3507,7 @@
       }
     }
   }
-  AdjustSpillMask(cu);
+  cg->AdjustSpillMask(cu);
   cu->frame_size = ComputeFrameSize(cu);
 
   // Create RegLocations for arguments
@@ -3498,11 +3530,11 @@
     BitcodeBlockCodeGen(cu, static_cast<llvm::BasicBlock*>(i));
   }
 
-  HandleSuspendLaunchPads(cu);
+  cg->HandleSuspendLaunchPads(cu);
 
-  HandleThrowLaunchPads(cu);
+  cg->HandleThrowLaunchPads(cu);
 
-  HandleIntrinsicLaunchPads(cu);
+  cg->HandleIntrinsicLaunchPads(cu);
 
   cu->func->eraseFromParent();
   cu->func = NULL;
diff --git a/src/compiler/codegen/method_bitcode.h b/src/compiler/codegen/mir_to_gbc.h
similarity index 82%
rename from src/compiler/codegen/method_bitcode.h
rename to src/compiler/codegen/mir_to_gbc.h
index df4f4d4..a9660de 100644
--- a/src/compiler/codegen/method_bitcode.h
+++ b/src/compiler/codegen/mir_to_gbc.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
-#define ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
+#ifndef ART_SRC_COMPILER_CODEGEN_MIRTOGBC_H_
+#define ART_SRC_COMPILER_CODEGEN_MIRTOGBC_H_
 
 namespace art {
 
@@ -24,4 +24,4 @@
 
 }  // namespace art
 
-#endif // ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
+#endif // ART_SRC_COMPILER_CODEGEN_MIRTOGBC_H_
diff --git a/src/compiler/codegen/method_codegen_driver.cc b/src/compiler/codegen/mir_to_lir.cc
similarity index 65%
rename from src/compiler/codegen/method_codegen_driver.cc
rename to src/compiler/codegen/mir_to_lir.cc
index fe5d522..5a6a5fc 100644
--- a/src/compiler/codegen/method_codegen_driver.cc
+++ b/src/compiler/codegen/mir_to_lir.cc
@@ -23,39 +23,6 @@
 
 namespace art {
 
-// TODO: unify bad_loc
-const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
-                            INVALID_REG, INVALID_REG, INVALID_SREG,
-                            INVALID_SREG};
-
-/* Mark register usage state and return long retloc */
-RegLocation GetReturnWide(CompilationUnit* cu, bool is_double)
-{
-  RegLocation gpr_res = LocCReturnWide();
-  RegLocation fpr_res = LocCReturnDouble();
-  RegLocation res = is_double ? fpr_res : gpr_res;
-  Clobber(cu, res.low_reg);
-  Clobber(cu, res.high_reg);
-  LockTemp(cu, res.low_reg);
-  LockTemp(cu, res.high_reg);
-  MarkPair(cu, res.low_reg, res.high_reg);
-  return res;
-}
-
-RegLocation GetReturn(CompilationUnit* cu, bool is_float)
-{
-  RegLocation gpr_res = LocCReturn();
-  RegLocation fpr_res = LocCReturnFloat();
-  RegLocation res = is_float ? fpr_res : gpr_res;
-  Clobber(cu, res.low_reg);
-  if (cu->instruction_set == kMips) {
-    MarkInUse(cu, res.low_reg);
-  } else {
-    LockTemp(cu, res.low_reg);
-  }
-  return res;
-}
-
 /*
  * Target-independent code generation.  Use only high-level
  * load/store utilities here, or target-dependent genXX() handlers
@@ -64,20 +31,21 @@
 static bool CompileDalvikInstruction(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
                                      LIR* label_list)
 {
+  Codegen* cg = cu->cg.get();
   bool res = false;   // Assume success
   RegLocation rl_src[3];
-  RegLocation rl_dest = bad_loc;
-  RegLocation rl_result = bad_loc;
+  RegLocation rl_dest = GetBadLoc();
+  RegLocation rl_result = GetBadLoc();
   Instruction::Code opcode = mir->dalvikInsn.opcode;
   int opt_flags = mir->optimization_flags;
   uint32_t vB = mir->dalvikInsn.vB;
   uint32_t vC = mir->dalvikInsn.vC;
 
-  /* Prep Src and Dest locations */
+  // Prep Src and Dest locations.
   int next_sreg = 0;
   int next_loc = 0;
   int attrs = oat_data_flow_attributes[opcode];
-  rl_src[0] = rl_src[1] = rl_src[2] = bad_loc;
+  rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
   if (attrs & DF_UA) {
     if (attrs & DF_A_WIDE) {
       rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
@@ -115,41 +83,41 @@
       break;
 
     case Instruction::MOVE_EXCEPTION:
-      GenMoveException(cu, rl_dest);
+      cg->GenMoveException(cu, rl_dest);
       break;
     case Instruction::RETURN_VOID:
       if (!(cu->attrs & METHOD_IS_LEAF)) {
-        GenSuspendTest(cu, opt_flags);
+        cg->GenSuspendTest(cu, opt_flags);
       }
       break;
 
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT:
       if (!(cu->attrs & METHOD_IS_LEAF)) {
-        GenSuspendTest(cu, opt_flags);
+        cg->GenSuspendTest(cu, opt_flags);
       }
-      StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
+      cg->StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
       break;
 
     case Instruction::RETURN_WIDE:
       if (!(cu->attrs & METHOD_IS_LEAF)) {
-        GenSuspendTest(cu, opt_flags);
+        cg->GenSuspendTest(cu, opt_flags);
       }
-      StoreValueWide(cu, GetReturnWide(cu,
+      cg->StoreValueWide(cu, GetReturnWide(cu,
                        cu->shorty[0] == 'D'), rl_src[0]);
       break;
 
     case Instruction::MOVE_RESULT_WIDE:
       if (opt_flags & MIR_INLINED)
-        break;  // Nop - combined w/ previous invoke
-      StoreValueWide(cu, rl_dest, GetReturnWide(cu, rl_dest.fp));
+        break;  // Nop - combined w/ previous invoke.
+      cg->StoreValueWide(cu, rl_dest, GetReturnWide(cu, rl_dest.fp));
       break;
 
     case Instruction::MOVE_RESULT:
     case Instruction::MOVE_RESULT_OBJECT:
       if (opt_flags & MIR_INLINED)
-        break;  // Nop - combined w/ previous invoke
-      StoreValue(cu, rl_dest, GetReturn(cu, rl_dest.fp));
+        break;  // Nop - combined w/ previous invoke.
+      cg->StoreValue(cu, rl_dest, GetReturn(cu, rl_dest.fp));
       break;
 
     case Instruction::MOVE:
@@ -158,140 +126,140 @@
     case Instruction::MOVE_OBJECT_16:
     case Instruction::MOVE_FROM16:
     case Instruction::MOVE_OBJECT_FROM16:
-      StoreValue(cu, rl_dest, rl_src[0]);
+      cg->StoreValue(cu, rl_dest, rl_src[0]);
       break;
 
     case Instruction::MOVE_WIDE:
     case Instruction::MOVE_WIDE_16:
     case Instruction::MOVE_WIDE_FROM16:
-      StoreValueWide(cu, rl_dest, rl_src[0]);
+      cg->StoreValueWide(cu, rl_dest, rl_src[0]);
       break;
 
     case Instruction::CONST:
     case Instruction::CONST_4:
     case Instruction::CONST_16:
       rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      LoadConstantNoClobber(cu, rl_result.low_reg, vB);
-      StoreValue(cu, rl_dest, rl_result);
+      cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB);
+      cg->StoreValue(cu, rl_dest, rl_result);
       break;
 
     case Instruction::CONST_HIGH16:
       rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      LoadConstantNoClobber(cu, rl_result.low_reg, vB << 16);
-      StoreValue(cu, rl_dest, rl_result);
+      cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB << 16);
+      cg->StoreValue(cu, rl_dest, rl_result);
       break;
 
     case Instruction::CONST_WIDE_16:
     case Instruction::CONST_WIDE_32:
       rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg, vB,
+      cg->LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg, vB,
                             (vB & 0x80000000) ? -1 : 0);
-      StoreValueWide(cu, rl_dest, rl_result);
+      cg->StoreValueWide(cu, rl_dest, rl_result);
       break;
 
     case Instruction::CONST_WIDE:
       rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
+      cg->LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
                             mir->dalvikInsn.vB_wide & 0xffffffff,
                             (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
-      StoreValueWide(cu, rl_dest, rl_result);
+      cg->StoreValueWide(cu, rl_dest, rl_result);
       break;
 
     case Instruction::CONST_WIDE_HIGH16:
       rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
+      cg->LoadConstantValueWide(cu, rl_result.low_reg, rl_result.high_reg,
                             0, vB << 16);
-      StoreValueWide(cu, rl_dest, rl_result);
+      cg->StoreValueWide(cu, rl_dest, rl_result);
       break;
 
     case Instruction::MONITOR_ENTER:
-      GenMonitorEnter(cu, opt_flags, rl_src[0]);
+      cg->GenMonitorEnter(cu, opt_flags, rl_src[0]);
       break;
 
     case Instruction::MONITOR_EXIT:
-      GenMonitorExit(cu, opt_flags, rl_src[0]);
+      cg->GenMonitorExit(cu, opt_flags, rl_src[0]);
       break;
 
     case Instruction::CHECK_CAST:
-      GenCheckCast(cu, vB, rl_src[0]);
+      cg->GenCheckCast(cu, vB, rl_src[0]);
       break;
 
     case Instruction::INSTANCE_OF:
-      GenInstanceof(cu, vC, rl_dest, rl_src[0]);
+      cg->GenInstanceof(cu, vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::NEW_INSTANCE:
-      GenNewInstance(cu, vB, rl_dest);
+      cg->GenNewInstance(cu, vB, rl_dest);
       break;
 
     case Instruction::THROW:
-      GenThrow(cu, rl_src[0]);
+      cg->GenThrow(cu, rl_src[0]);
       break;
 
     case Instruction::ARRAY_LENGTH:
       int len_offset;
       len_offset = Array::LengthOffset().Int32Value();
-      rl_src[0] = LoadValue(cu, rl_src[0], kCoreReg);
-      GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+      rl_src[0] = cg->LoadValue(cu, rl_src[0], kCoreReg);
+      cg->GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
       rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      LoadWordDisp(cu, rl_src[0].low_reg, len_offset, rl_result.low_reg);
-      StoreValue(cu, rl_dest, rl_result);
+      cg->LoadWordDisp(cu, rl_src[0].low_reg, len_offset, rl_result.low_reg);
+      cg->StoreValue(cu, rl_dest, rl_result);
       break;
 
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
-      GenConstString(cu, vB, rl_dest);
+      cg->GenConstString(cu, vB, rl_dest);
       break;
 
     case Instruction::CONST_CLASS:
-      GenConstClass(cu, vB, rl_dest);
+      cg->GenConstClass(cu, vB, rl_dest);
       break;
 
     case Instruction::FILL_ARRAY_DATA:
-      GenFillArrayData(cu, vB, rl_src[0]);
+      cg->GenFillArrayData(cu, vB, rl_src[0]);
       break;
 
     case Instruction::FILLED_NEW_ARRAY:
-      GenFilledNewArray(cu, NewMemCallInfo(cu, bb, mir, kStatic,
+      cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
                         false /* not range */));
       break;
 
     case Instruction::FILLED_NEW_ARRAY_RANGE:
-      GenFilledNewArray(cu, NewMemCallInfo(cu, bb, mir, kStatic,
+      cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
                         true /* range */));
       break;
 
     case Instruction::NEW_ARRAY:
-      GenNewArray(cu, vC, rl_dest, rl_src[0]);
+      cg->GenNewArray(cu, vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32:
       if (bb->taken->start_offset <= mir->offset) {
-        GenSuspendTestAndBranch(cu, opt_flags, &label_list[bb->taken->id]);
+        cg->GenSuspendTestAndBranch(cu, opt_flags, &label_list[bb->taken->id]);
       } else {
-        OpUnconditionalBranch(cu, &label_list[bb->taken->id]);
+        cg->OpUnconditionalBranch(cu, &label_list[bb->taken->id]);
       }
       break;
 
     case Instruction::PACKED_SWITCH:
-      GenPackedSwitch(cu, vB, rl_src[0]);
+      cg->GenPackedSwitch(cu, vB, rl_src[0]);
       break;
 
     case Instruction::SPARSE_SWITCH:
-      GenSparseSwitch(cu, vB, rl_src[0]);
+      cg->GenSparseSwitch(cu, vB, rl_src[0]);
       break;
 
     case Instruction::CMPL_FLOAT:
     case Instruction::CMPG_FLOAT:
     case Instruction::CMPL_DOUBLE:
     case Instruction::CMPG_DOUBLE:
-      res = GenCmpFP(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      res = cg->GenCmpFP(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::CMP_LONG:
-      GenCmpLong(cu, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenCmpLong(cu, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IF_EQ:
@@ -305,9 +273,9 @@
       bool backward_branch;
       backward_branch = (bb->taken->start_offset <= mir->offset);
       if (backward_branch) {
-        GenSuspendTest(cu, opt_flags);
+        cg->GenSuspendTest(cu, opt_flags);
       }
-      GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
+      cg->GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
                           fall_through);
       break;
       }
@@ -323,123 +291,117 @@
       bool backward_branch;
       backward_branch = (bb->taken->start_offset <= mir->offset);
       if (backward_branch) {
-        GenSuspendTest(cu, opt_flags);
+        cg->GenSuspendTest(cu, opt_flags);
       }
-      GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
+      cg->GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
       break;
       }
 
     case Instruction::AGET_WIDE:
-      GenArrayGet(cu, opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
+      cg->GenArrayGet(cu, opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
       break;
     case Instruction::AGET:
     case Instruction::AGET_OBJECT:
-      GenArrayGet(cu, opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
+      cg->GenArrayGet(cu, opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
       break;
     case Instruction::AGET_BOOLEAN:
-      GenArrayGet(cu, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      cg->GenArrayGet(cu, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
       break;
     case Instruction::AGET_BYTE:
-      GenArrayGet(cu, opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      cg->GenArrayGet(cu, opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
       break;
     case Instruction::AGET_CHAR:
-      GenArrayGet(cu, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      cg->GenArrayGet(cu, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
       break;
     case Instruction::AGET_SHORT:
-      GenArrayGet(cu, opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      cg->GenArrayGet(cu, opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
       break;
     case Instruction::APUT_WIDE:
-      GenArrayPut(cu, opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
+      cg->GenArrayPut(cu, opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
       break;
     case Instruction::APUT:
-      GenArrayPut(cu, opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
+      cg->GenArrayPut(cu, opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
       break;
     case Instruction::APUT_OBJECT:
-      GenArrayObjPut(cu, opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
+      cg->GenArrayObjPut(cu, opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
       break;
     case Instruction::APUT_SHORT:
     case Instruction::APUT_CHAR:
-      GenArrayPut(cu, opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
+      cg->GenArrayPut(cu, opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
       break;
     case Instruction::APUT_BYTE:
     case Instruction::APUT_BOOLEAN:
-      GenArrayPut(cu, opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
+      cg->GenArrayPut(cu, opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
             rl_src[0], 0);
       break;
 
     case Instruction::IGET_OBJECT:
-    //case Instruction::IGET_OBJECT_VOLATILE:
-      GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+      cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
       break;
 
     case Instruction::IGET_WIDE:
-    //case Instruction::IGET_WIDE_VOLATILE:
-      GenIGet(cu, vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+      cg->GenIGet(cu, vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
       break;
 
     case Instruction::IGET:
-    //case Instruction::IGET_VOLATILE:
-      GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+      cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_CHAR:
-      GenIGet(cu, vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+      cg->GenIGet(cu, vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_SHORT:
-      GenIGet(cu, vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+      cg->GenIGet(cu, vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_BOOLEAN:
     case Instruction::IGET_BYTE:
-      GenIGet(cu, vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+      cg->GenIGet(cu, vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IPUT_WIDE:
-    //case Instruction::IPUT_WIDE_VOLATILE:
-      GenIPut(cu, vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+      cg->GenIPut(cu, vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
       break;
 
     case Instruction::IPUT_OBJECT:
-    //case Instruction::IPUT_OBJECT_VOLATILE:
-      GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+      cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
       break;
 
     case Instruction::IPUT:
-    //case Instruction::IPUT_VOLATILE:
-      GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+      cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
-      GenIPut(cu, vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+      cg->GenIPut(cu, vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_CHAR:
-      GenIPut(cu, vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+      cg->GenIPut(cu, vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_SHORT:
-      GenIPut(cu, vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+      cg->GenIPut(cu, vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::SGET_OBJECT:
-      GenSget(cu, vB, rl_dest, false, true);
+      cg->GenSget(cu, vB, rl_dest, false, true);
       break;
     case Instruction::SGET:
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT:
-      GenSget(cu, vB, rl_dest, false, false);
+      cg->GenSget(cu, vB, rl_dest, false, false);
       break;
 
     case Instruction::SGET_WIDE:
-      GenSget(cu, vB, rl_dest, true, false);
+      cg->GenSget(cu, vB, rl_dest, true, false);
       break;
 
     case Instruction::SPUT_OBJECT:
-      GenSput(cu, vB, rl_src[0], false, true);
+      cg->GenSput(cu, vB, rl_src[0], false, true);
       break;
 
     case Instruction::SPUT:
@@ -447,80 +409,80 @@
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT:
-      GenSput(cu, vB, rl_src[0], false, false);
+      cg->GenSput(cu, vB, rl_src[0], false, false);
       break;
 
     case Instruction::SPUT_WIDE:
-      GenSput(cu, vB, rl_src[0], true, false);
+      cg->GenSput(cu, vB, rl_src[0], true, false);
       break;
 
     case Instruction::INVOKE_STATIC_RANGE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kStatic, true));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, true));
       break;
     case Instruction::INVOKE_STATIC:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kStatic, false));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, false));
       break;
 
     case Instruction::INVOKE_DIRECT:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kDirect, false));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, false));
       break;
     case Instruction::INVOKE_DIRECT_RANGE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kDirect, true));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, true));
       break;
 
     case Instruction::INVOKE_VIRTUAL:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kVirtual, false));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, false));
       break;
     case Instruction::INVOKE_VIRTUAL_RANGE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kVirtual, true));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, true));
       break;
 
     case Instruction::INVOKE_SUPER:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kSuper, false));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, false));
       break;
     case Instruction::INVOKE_SUPER_RANGE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kSuper, true));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, true));
       break;
 
     case Instruction::INVOKE_INTERFACE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kInterface, false));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, false));
       break;
     case Instruction::INVOKE_INTERFACE_RANGE:
-      GenInvoke(cu, NewMemCallInfo(cu, bb, mir, kInterface, true));
+      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, true));
       break;
 
     case Instruction::NEG_INT:
     case Instruction::NOT_INT:
-      res = GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      res = cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_LONG:
     case Instruction::NOT_LONG:
-      res = GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      res = cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_FLOAT:
-      res = GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      res = cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_DOUBLE:
-      res = GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      res = cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::INT_TO_LONG:
-      GenIntToLong(cu, rl_dest, rl_src[0]);
+      cg->GenIntToLong(cu, rl_dest, rl_src[0]);
       break;
 
     case Instruction::LONG_TO_INT:
       rl_src[0] = UpdateLocWide(cu, rl_src[0]);
       rl_src[0] = WideToNarrow(cu, rl_src[0]);
-      StoreValue(cu, rl_dest, rl_src[0]);
+      cg->StoreValue(cu, rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_BYTE:
     case Instruction::INT_TO_SHORT:
     case Instruction::INT_TO_CHAR:
-      GenIntNarrowing(cu, opcode, rl_dest, rl_src[0]);
+      cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_FLOAT:
@@ -533,7 +495,7 @@
     case Instruction::DOUBLE_TO_INT:
     case Instruction::DOUBLE_TO_LONG:
     case Instruction::DOUBLE_TO_FLOAT:
-      GenConversion(cu, opcode, rl_dest, rl_src[0]);
+      cg->GenConversion(cu, opcode, rl_dest, rl_src[0]);
       break;
 
     case Instruction::ADD_INT:
@@ -558,7 +520,7 @@
     case Instruction::SHL_INT_2ADDR:
     case Instruction::SHR_INT_2ADDR:
     case Instruction::USHR_INT_2ADDR:
-      GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::ADD_LONG:
@@ -577,7 +539,7 @@
     case Instruction::AND_LONG_2ADDR:
     case Instruction::OR_LONG_2ADDR:
     case Instruction::XOR_LONG_2ADDR:
-      GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::SHL_LONG:
@@ -586,7 +548,7 @@
     case Instruction::SHL_LONG_2ADDR:
     case Instruction::SHR_LONG_2ADDR:
     case Instruction::USHR_LONG_2ADDR:
-      GenShiftOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::ADD_FLOAT:
@@ -599,7 +561,7 @@
     case Instruction::MUL_FLOAT_2ADDR:
     case Instruction::DIV_FLOAT_2ADDR:
     case Instruction::REM_FLOAT_2ADDR:
-      GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::ADD_DOUBLE:
@@ -612,7 +574,7 @@
     case Instruction::MUL_DOUBLE_2ADDR:
     case Instruction::DIV_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE_2ADDR:
-      GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::RSUB_INT:
@@ -634,7 +596,7 @@
     case Instruction::SHL_INT_LIT8:
     case Instruction::SHR_INT_LIT8:
     case Instruction::USHR_INT_LIT8:
-      GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], vC);
+      cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], vC);
       break;
 
     default:
@@ -643,9 +605,10 @@
   return res;
 }
 
-/* Extended MIR instructions like PHI */
+// Process extended MIR instructions (such as PHI).
 static void HandleExtendedMethodMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
 {
+  Codegen* cg = cu->cg.get();
   int op_offset = mir->dalvikInsn.opcode - kMirOpFirst;
   char* msg = NULL;
   if (cu->verbose) {
@@ -668,33 +631,34 @@
     case kMirOpCopy: {
       RegLocation rl_src = GetSrc(cu, mir, 0);
       RegLocation rl_dest = GetDest(cu, mir);
-      StoreValue(cu, rl_dest, rl_src);
+      cg->StoreValue(cu, rl_dest, rl_src);
       break;
     }
     case kMirOpFusedCmplFloat:
-      GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, false /*double*/);
+      cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, false /*double*/);
       break;
     case kMirOpFusedCmpgFloat:
-      GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, false /*double*/);
+      cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, false /*double*/);
       break;
     case kMirOpFusedCmplDouble:
-      GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, true /*double*/);
+      cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, true /*double*/);
       break;
     case kMirOpFusedCmpgDouble:
-      GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, true /*double*/);
+      cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, true /*double*/);
       break;
     case kMirOpFusedCmpLong:
-      GenFusedLongCmpBranch(cu, bb, mir);
+      cg->GenFusedLongCmpBranch(cu, bb, mir);
       break;
     default:
       break;
   }
 }
 
-/* Handle the content in each basic block */
+// Handle the content in each basic block.
 static bool MethodBlockCodeGen(CompilationUnit* cu, BasicBlock* bb)
 {
   if (bb->block_type == kDead) return false;
+  Codegen* cg = cu->cg.get();
   cu->current_dalvik_offset = bb->start_offset;
   MIR* mir;
   LIR* label_list = cu->block_label_list;
@@ -703,30 +667,29 @@
   cu->cur_block = bb;
   label_list[block_id].operands[0] = bb->start_offset;
 
-  /* Insert the block label */
+  // Insert the block label.
   label_list[block_id].opcode = kPseudoNormalBlockLabel;
   AppendLIR(cu, &label_list[block_id]);
 
   LIR* head_lir = NULL;
 
-  /* If this is a catch block, export the start address */
+  // If this is a catch block, export the start address.
   if (bb->catch_entry) {
     head_lir = NewLIR0(cu, kPseudoExportedPC);
   }
 
-  /* Free temp registers and reset redundant store tracking */
+  // Free temp registers and reset redundant store tracking.
   ResetRegPool(cu);
   ResetDefTracking(cu);
 
   ClobberAllRegs(cu);
 
-
   if (bb->block_type == kEntryBlock) {
     int start_vreg = cu->num_dalvik_registers - cu->num_ins;
-    GenEntrySequence(cu, &cu->reg_location[start_vreg],
-                     cu->reg_location[cu->method_sreg]);
+    cg->GenEntrySequence(cu, &cu->reg_location[start_vreg],
+                         cu->reg_location[cu->method_sreg]);
   } else if (bb->block_type == kExitBlock) {
-    GenExitSequence(cu);
+    cg->GenExitSequence(cu);
   }
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -740,7 +703,7 @@
     }
 
 #ifndef NDEBUG
-    /* Reset temp tracking sanity check */
+    // Reset temp tracking sanity check.
     cu->live_sreg = INVALID_SREG;
 #endif
 
@@ -748,18 +711,18 @@
     int opcode = mir->dalvikInsn.opcode;
     LIR* boundary_lir;
 
-    /* Mark the beginning of a Dalvik instruction for line tracking */
+    // Mark the beginning of a Dalvik instruction for line tracking.
     char* inst_str = cu->verbose ?
        GetDalvikDisassembly(cu, mir->dalvikInsn, "") : NULL;
     boundary_lir = MarkBoundary(cu, mir->offset, inst_str);
-    /* Remember the first LIR for this block */
+    // Remember the first LIR for this block.
     if (head_lir == NULL) {
       head_lir = boundary_lir;
-      /* Set the first boundary_lir as a scheduling barrier */
+      // Set the first boundary_lir as a scheduling barrier.
       head_lir->def_mask = ENCODE_ALL;
     }
 
-    /* Don't generate the SSA annotation unless verbose mode is on */
+    // Don't generate the SSA annotation unless verbose mode is on.
     if (cu->verbose && mir->ssa_rep) {
       char* ssa_string = GetSSAString(cu, mir->ssa_rep);
       NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
@@ -790,17 +753,12 @@
   }
 
   if (head_lir) {
-    /*
-     * Eliminate redundant loads/stores and delay stores into later
-     * slots
-     */
+    // Eliminate redundant loads/stores and delay stores into later slots.
     ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
 
-    /*
-     * Generate an unconditional branch to the fallthrough block.
-     */
+    // Generate an unconditional branch to the fallthrough block.
     if (bb->fall_through) {
-      OpUnconditionalBranch(cu, &label_list[bb->fall_through->id]);
+      cg->OpUnconditionalBranch(cu, &label_list[bb->fall_through->id]);
     }
   }
   return false;
@@ -808,7 +766,8 @@
 
 void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case)
 {
-  /* Find the first DalvikByteCode block */
+  Codegen* cg = cu->cg.get();
+  // Find the first DalvikByteCode block.
   int num_reachable_blocks = cu->num_reachable_blocks;
   const GrowableList *block_list = &cu->block_list;
   BasicBlock*bb = NULL;
@@ -825,31 +784,32 @@
   DCHECK_EQ(bb->start_offset, 0);
   DCHECK(bb->first_mir_insn != NULL);
 
-  /* Get the first instruction */
+  // Get the first instruction.
   MIR* mir = bb->first_mir_insn;
 
-  /* Free temp registers and reset redundant store tracking */
+  // Free temp registers and reset redundant store tracking.
   ResetRegPool(cu);
   ResetDefTracking(cu);
   ClobberAllRegs(cu);
 
-  GenSpecialCase(cu, bb, mir, special_case);
+  cg->GenSpecialCase(cu, bb, mir, special_case);
 }
 
 void MethodMIR2LIR(CompilationUnit* cu)
 {
-  /* Used to hold the labels of each block */
+  Codegen* cg = cu->cg.get();
+  // Hold the labels of each block.
   cu->block_label_list =
       static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->num_blocks, true, kAllocLIR));
 
   DataFlowAnalysisDispatcher(cu, MethodBlockCodeGen,
                                 kPreOrderDFSTraversal, false /* Iterative */);
 
-  HandleSuspendLaunchPads(cu);
+  cg->HandleSuspendLaunchPads(cu);
 
-  HandleThrowLaunchPads(cu);
+  cg->HandleThrowLaunchPads(cu);
 
-  HandleIntrinsicLaunchPads(cu);
+  cg->HandleIntrinsicLaunchPads(cu);
 
   if (!(cu->disable_opt & (1 << kSafeOptimizations))) {
     RemoveRedundantBranches(cu);
diff --git a/src/compiler/codegen/method_bitcode.h b/src/compiler/codegen/mir_to_lir.h
similarity index 72%
copy from src/compiler/codegen/method_bitcode.h
copy to src/compiler/codegen/mir_to_lir.h
index df4f4d4..084498a 100644
--- a/src/compiler/codegen/method_bitcode.h
+++ b/src/compiler/codegen/mir_to_lir.h
@@ -14,14 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
-#define ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
+#ifndef ART_SRC_COMPILER_CODEGEN_MIRTOLIR_H_
+#define ART_SRC_COMPILER_CODEGEN_MIRTOLIR_H_
 
 namespace art {
+void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case);
+void MethodMIR2LIR(CompilationUnit* cu);
 
-void MethodMIR2Bitcode(CompilationUnit* cu);
-void MethodBitcode2LIR(CompilationUnit* cu);
 
 }  // namespace art
 
-#endif // ART_SRC_COMPILER_CODEGEN_METHODBITCODE_H_
+#endif // ART_SRC_COMPILER_CODEGEN_MIRTOLIR_H_
diff --git a/src/compiler/codegen/ralloc_util.cc b/src/compiler/codegen/ralloc_util.cc
index a26e0cd..7cc3fad 100644
--- a/src/compiler/codegen/ralloc_util.cc
+++ b/src/compiler/codegen/ralloc_util.cc
@@ -24,6 +24,10 @@
 
 namespace art {
 
+static const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+                                    INVALID_REG, INVALID_REG, INVALID_SREG,
+                                    INVALID_SREG};
+
 /*
  * Free all allocated temps in the temp pools.  Note that this does
  * not affect the "liveness" of a temp register, which will stay
@@ -102,7 +106,8 @@
 /* Mark a temp register as dead.  Does not affect allocation state. */
 void Clobber(CompilationUnit* cu, int reg)
 {
-  ClobberBody(cu, GetRegInfo(cu, reg));
+  Codegen* cg = cu->cg.get();
+  ClobberBody(cu, cg->GetRegInfo(cu, reg));
 }
 
 static void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
@@ -158,9 +163,10 @@
 
 void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg)
 {
+  Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
   int v_reg = SRegToVReg(cu, s_reg);
-  GetRegInfo(cu, reg)->in_use = true;
+  cg->GetRegInfo(cu, reg)->in_use = true;
   cu->core_spill_mask |= (1 << reg);
   // Include reg for later sort
   cu->core_vmap_table.push_back(reg << VREG_NUM_WIDTH |
@@ -187,10 +193,11 @@
 
 void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg)
 {
+  Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
   int v_reg = SRegToVReg(cu, s_reg);
-  GetRegInfo(cu, reg)->in_use = true;
-  MarkPreservedSingle(cu, v_reg, reg);
+  cg->GetRegInfo(cu, reg)->in_use = true;
+  cg->MarkPreservedSingle(cu, v_reg, reg);
   cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
   cu->promotion_map[p_map_idx].FpReg = reg;
 }
@@ -225,6 +232,7 @@
  */
 static int AllocPreservedDouble(CompilationUnit* cu, int s_reg)
 {
+  Codegen* cg = cu->cg.get();
   int res = -1; // Assume failure
   int v_reg = SRegToVReg(cu, s_reg);
   int p_map_idx = SRegToPMap(cu, s_reg);
@@ -236,7 +244,7 @@
       return res;
     }
     // Is the low reg of the pair free?
-    RegisterInfo* p = GetRegInfo(cu, high_reg-1);
+    RegisterInfo* p = cg->GetRegInfo(cu, high_reg-1);
     if (p->in_use || p->is_temp) {
       // Already allocated or not preserved - fail.
       return res;
@@ -245,7 +253,7 @@
     res = p->reg;
     p->in_use = true;
     DCHECK_EQ((res & 1), 0);
-    MarkPreservedSingle(cu, v_reg, res);
+    cg->MarkPreservedSingle(cu, v_reg, res);
   } else {
     RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
     for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
@@ -256,10 +264,10 @@
         (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
         res = FPRegs[i].reg;
         FPRegs[i].in_use = true;
-        MarkPreservedSingle(cu, v_reg, res);
+        cg->MarkPreservedSingle(cu, v_reg, res);
         FPRegs[i+1].in_use = true;
         DCHECK_EQ(res + 1, FPRegs[i+1].reg);
-        MarkPreservedSingle(cu, v_reg+1, res+1);
+        cg->MarkPreservedSingle(cu, v_reg+1, res+1);
         break;
       }
     }
@@ -498,19 +506,22 @@
 
 RegisterInfo* IsTemp(CompilationUnit* cu, int reg)
 {
-  RegisterInfo* p = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* p = cg->GetRegInfo(cu, reg);
   return (p->is_temp) ? p : NULL;
 }
 
 RegisterInfo* IsPromoted(CompilationUnit* cu, int reg)
 {
-  RegisterInfo* p = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* p = cg->GetRegInfo(cu, reg);
   return (p->is_temp) ? NULL : p;
 }
 
 bool IsDirty(CompilationUnit* cu, int reg)
 {
-  RegisterInfo* p = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* p = cg->GetRegInfo(cu, reg);
   return p->dirty;
 }
 
@@ -553,7 +564,8 @@
 
 void ResetDef(CompilationUnit* cu, int reg)
 {
-  ResetDefBody(GetRegInfo(cu, reg));
+  Codegen* cg = cu->cg.get();
+  ResetDefBody(cg->GetRegInfo(cu, reg));
 }
 
 static void NullifyRange(CompilationUnit* cu, LIR *start, LIR *finish, int s_reg1, int s_reg2)
@@ -580,7 +592,8 @@
   DCHECK(!rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  RegisterInfo* p = GetRegInfo(cu, rl.low_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
   p->def_start = start->next;
   p->def_end = finish;
 }
@@ -596,7 +609,8 @@
   DCHECK(rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  RegisterInfo* p = GetRegInfo(cu, rl.low_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
   ResetDef(cu, rl.high_reg);  // Only track low of pair
   p->def_start = start->next;
   p->def_end = finish;
@@ -605,9 +619,10 @@
 RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl)
 {
   DCHECK(rl.wide);
+  Codegen* cg = cu->cg.get();
   if (rl.location == kLocPhysReg) {
-    RegisterInfo* info_lo = GetRegInfo(cu, rl.low_reg);
-    RegisterInfo* info_hi = GetRegInfo(cu, rl.high_reg);
+    RegisterInfo* info_lo = cg->GetRegInfo(cu, rl.low_reg);
+    RegisterInfo* info_hi = cg->GetRegInfo(cu, rl.high_reg);
     if (info_lo->is_temp) {
       info_lo->pair = false;
       info_lo->def_start = NULL;
@@ -675,13 +690,14 @@
 // Make sure nothing is live and dirty
 static void FlushAllRegsBody(CompilationUnit* cu, RegisterInfo* info, int num_regs)
 {
+  Codegen* cg = cu->cg.get();
   int i;
   for (i=0; i < num_regs; i++) {
     if (info[i].live && info[i].dirty) {
       if (info[i].pair) {
-        FlushRegWide(cu, info[i].reg, info[i].partner);
+        cg->FlushRegWide(cu, info[i].reg, info[i].partner);
       } else {
-        FlushReg(cu, info[i].reg);
+        cg->FlushReg(cu, info[i].reg);
       }
     }
   }
@@ -698,20 +714,22 @@
 
 
 //TUNING: rewrite all of this reg stuff.  Probably use an attribute table
-static bool RegClassMatches(int reg_class, int reg)
+static bool RegClassMatches(CompilationUnit* cu, int reg_class, int reg)
 {
+  Codegen* cg = cu->cg.get();
   if (reg_class == kAnyReg) {
     return true;
   } else if (reg_class == kCoreReg) {
-    return !IsFpReg(reg);
+    return !cg->IsFpReg(reg);
   } else {
-    return IsFpReg(reg);
+    return cg->IsFpReg(reg);
   }
 }
 
 void MarkLive(CompilationUnit* cu, int reg, int s_reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info = cg->GetRegInfo(cu, reg);
   if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
     return;  /* already live */
   } else if (s_reg != INVALID_SREG) {
@@ -729,20 +747,23 @@
 
 void MarkTemp(CompilationUnit* cu, int reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info = cg->GetRegInfo(cu, reg);
   info->is_temp = true;
 }
 
 void UnmarkTemp(CompilationUnit* cu, int reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info = cg->GetRegInfo(cu, reg);
   info->is_temp = false;
 }
 
 void MarkPair(CompilationUnit* cu, int low_reg, int high_reg)
 {
-  RegisterInfo* info_lo = GetRegInfo(cu, low_reg);
-  RegisterInfo* info_hi = GetRegInfo(cu, high_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info_lo = cg->GetRegInfo(cu, low_reg);
+  RegisterInfo* info_hi = cg->GetRegInfo(cu, high_reg);
   info_lo->pair = info_hi->pair = true;
   info_lo->partner = high_reg;
   info_hi->partner = low_reg;
@@ -750,10 +771,11 @@
 
 void MarkClean(CompilationUnit* cu, RegLocation loc)
 {
-  RegisterInfo* info = GetRegInfo(cu, loc.low_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
   info->dirty = false;
   if (loc.wide) {
-    info = GetRegInfo(cu, loc.high_reg);
+    info = cg->GetRegInfo(cu, loc.high_reg);
     info->dirty = false;
   }
 }
@@ -764,24 +786,27 @@
     // If already home, can't be dirty
     return;
   }
-  RegisterInfo* info = GetRegInfo(cu, loc.low_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
   info->dirty = true;
   if (loc.wide) {
-    info = GetRegInfo(cu, loc.high_reg);
+    info = cg->GetRegInfo(cu, loc.high_reg);
     info->dirty = true;
   }
 }
 
 void MarkInUse(CompilationUnit* cu, int reg)
 {
-    RegisterInfo* info = GetRegInfo(cu, reg);
+  Codegen* cg = cu->cg.get();
+    RegisterInfo* info = cg->GetRegInfo(cu, reg);
     info->in_use = true;
 }
 
 static void CopyRegInfo(CompilationUnit* cu, int new_reg, int old_reg)
 {
-  RegisterInfo* new_info = GetRegInfo(cu, new_reg);
-  RegisterInfo* old_info = GetRegInfo(cu, old_reg);
+  Codegen* cg = cu->cg.get();
+  RegisterInfo* new_info = cg->GetRegInfo(cu, new_reg);
+  RegisterInfo* old_info = cg->GetRegInfo(cu, old_reg);
   // Target temp status must not change
   bool is_temp = new_info->is_temp;
   *new_info = *old_info;
@@ -792,12 +817,13 @@
 
 static bool CheckCorePoolSanity(CompilationUnit* cu)
 {
+  Codegen* cg = cu->cg.get();
    for (static int i = 0; i < cu->reg_pool->num_core_regs; i++) {
      if (cu->reg_pool->core_regs[i].pair) {
        static int my_reg = cu->reg_pool->core_regs[i].reg;
        static int my_sreg = cu->reg_pool->core_regs[i].s_reg;
        static int partner_reg = cu->reg_pool->core_regs[i].partner;
-       static RegisterInfo* partner = GetRegInfo(cu, partner_reg);
+       static RegisterInfo* partner = cg->GetRegInfo(cu, partner_reg);
        DCHECK(partner != NULL);
        DCHECK(partner->pair);
        DCHECK_EQ(my_reg, partner->partner);
@@ -855,6 +881,7 @@
 {
   DCHECK(loc.wide);
   DCHECK(CheckCorePoolSanity(cu));
+  Codegen* cg = cu->cg.get();
   if (loc.location != kLocPhysReg) {
     DCHECK((loc.location == kLocDalvikFrame) ||
          (loc.location == kLocCompilerTemp));
@@ -866,9 +893,9 @@
     match = match && (info_lo != NULL);
     match = match && (info_hi != NULL);
     // Are they both core or both FP?
-    match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
+    match = match && (cg->IsFpReg(info_lo->reg) == cg->IsFpReg(info_hi->reg));
     // If a pair of floating point singles, are they properly aligned?
-    if (match && IsFpReg(info_lo->reg)) {
+    if (match && cg->IsFpReg(info_lo->reg)) {
       match &= ((info_lo->reg & 0x1) == 0);
       match &= ((info_hi->reg - info_lo->reg) == 1);
     }
@@ -884,7 +911,7 @@
       loc.high_reg = info_hi->reg;
       loc.location = kLocPhysReg;
       MarkPair(cu, loc.low_reg, loc.high_reg);
-      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+      DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
       return loc;
     }
     // Can't easily reuse - clobber and free any overlaps
@@ -920,20 +947,20 @@
   int new_regs;
   int low_reg;
   int high_reg;
+  Codegen* cg = cu->cg.get();
 
   loc = UpdateLocWide(cu, loc);
 
   /* If already in registers, we can assume proper form.  Right reg class? */
   if (loc.location == kLocPhysReg) {
-    DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
-    DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
-    if (!RegClassMatches(reg_class, loc.low_reg)) {
+    DCHECK_EQ(cg->IsFpReg(loc.low_reg), cg->IsFpReg(loc.high_reg));
+    DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+    if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
       /* Wrong register class.  Reallocate and copy */
-      new_regs = AllocTypedTempPair(cu, loc.fp, reg_class);
+      new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
       low_reg = new_regs & 0xff;
       high_reg = (new_regs >> 8) & 0xff;
-      OpRegCopyWide(cu, low_reg, high_reg, loc.low_reg,
-                    loc.high_reg);
+      cg->OpRegCopyWide(cu, low_reg, high_reg, loc.low_reg, loc.high_reg);
       CopyRegInfo(cu, low_reg, loc.low_reg);
       CopyRegInfo(cu, high_reg, loc.high_reg);
       Clobber(cu, loc.low_reg);
@@ -941,7 +968,7 @@
       loc.low_reg = low_reg;
       loc.high_reg = high_reg;
       MarkPair(cu, loc.low_reg, loc.high_reg);
-      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+      DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
     }
     return loc;
   }
@@ -949,7 +976,7 @@
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
   DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
 
-  new_regs = AllocTypedTempPair(cu, loc.fp, reg_class);
+  new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
   loc.low_reg = new_regs & 0xff;
   loc.high_reg = (new_regs >> 8) & 0xff;
 
@@ -959,7 +986,7 @@
     MarkLive(cu, loc.low_reg, loc.s_reg_low);
     MarkLive(cu, loc.high_reg, GetSRegHi(loc.s_reg_low));
   }
-  DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+  DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
   return loc;
 }
 
@@ -971,13 +998,14 @@
   if (loc.wide)
     return EvalLocWide(cu, loc, reg_class, update);
 
+  Codegen* cg = cu->cg.get();
   loc = UpdateLoc(cu, loc);
 
   if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(reg_class, loc.low_reg)) {
+    if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
       /* Wrong register class.  Realloc, copy and transfer ownership */
-      new_reg = AllocTypedTemp(cu, loc.fp, reg_class);
-      OpRegCopy(cu, new_reg, loc.low_reg);
+      new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
+      cg->OpRegCopy(cu, new_reg, loc.low_reg);
       CopyRegInfo(cu, new_reg, loc.low_reg);
       Clobber(cu, loc.low_reg);
       loc.low_reg = new_reg;
@@ -987,7 +1015,7 @@
 
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
 
-  new_reg = AllocTypedTemp(cu, loc.fp, reg_class);
+  new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
   loc.low_reg = new_reg;
 
   if (update) {
@@ -1092,13 +1120,14 @@
  */
 void DoPromotion(CompilationUnit* cu)
 {
+  Codegen* cg = cu->cg.get();
   int reg_bias = cu->num_compiler_temps + 1;
   int dalvik_regs = cu->num_dalvik_registers;
   int num_regs = dalvik_regs + reg_bias;
   const int promotion_threshold = 2;
 
   // Allow target code to add any special registers
-  AdjustSpillMask(cu);
+  cg->AdjustSpillMask(cu);
 
   /*
    * Simple register promotion. Just do a static count of the uses
@@ -1262,4 +1291,40 @@
   return VRegOffset(cu, SRegToVReg(cu, s_reg));
 }
 
+RegLocation GetBadLoc()
+{
+  RegLocation res = bad_loc;
+  return res;
+}
+
+/* Mark register usage state and return long retloc */
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double)
+{
+  Codegen* cg = cu->cg.get();
+  RegLocation gpr_res = cg->LocCReturnWide();
+  RegLocation fpr_res = cg->LocCReturnDouble();
+  RegLocation res = is_double ? fpr_res : gpr_res;
+  Clobber(cu, res.low_reg);
+  Clobber(cu, res.high_reg);
+  LockTemp(cu, res.low_reg);
+  LockTemp(cu, res.high_reg);
+  MarkPair(cu, res.low_reg, res.high_reg);
+  return res;
+}
+
+RegLocation GetReturn(CompilationUnit* cu, bool is_float)
+{
+  Codegen* cg = cu->cg.get();
+  RegLocation gpr_res = cg->LocCReturn();
+  RegLocation fpr_res = cg->LocCReturnFloat();
+  RegLocation res = is_float ? fpr_res : gpr_res;
+  Clobber(cu, res.low_reg);
+  if (cu->instruction_set == kMips) {
+    MarkInUse(cu, res.low_reg);
+  } else {
+    LockTemp(cu, res.low_reg);
+  }
+  return res;
+}
+
 }  // namespace art
diff --git a/src/compiler/codegen/ralloc_util.h b/src/compiler/codegen/ralloc_util.h
index 4e897ca..78a623b 100644
--- a/src/compiler/codegen/ralloc_util.h
+++ b/src/compiler/codegen/ralloc_util.h
@@ -21,9 +21,9 @@
  * This file contains target independent register alloction support.
  */
 
-#include "../compiler_utility.h"
 #include "../compiler_ir.h"
 #include "../dataflow.h"
+#include "../compiler_utility.h"
 
 namespace art {
 
@@ -34,7 +34,6 @@
   bool double_start;   // Starting v_reg for a double
 };
 
-
 /*
  * Get the "real" sreg number associated with an s_reg slot.  In general,
  * s_reg values passed through codegen are the SSA names created by
@@ -44,12 +43,10 @@
  * records for operands are first created, we need to ask the locRecord
  * identified by the dataflow pass what it's new name is.
  */
-
 inline int GetSRegHi(int lowSreg) {
   return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
 }
 
-
 inline bool oat_live_out(CompilationUnit* cu, int s_reg) {
   //For now.
   return true;
@@ -63,32 +60,23 @@
 void ClobberSReg(CompilationUnit* cu, int s_reg);
 RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
                               int reg_class, bool update);
-/* Mark a temp register as dead.  Does not affect allocation state. */
+// Mark a temp register as dead.  Does not affect allocation state.
 void Clobber(CompilationUnit* cu, int reg);
+
 RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc);
-
-/* see comments for update_loc */
 RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc);
-
 RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc);
 
 void MarkLive(CompilationUnit* cu, int reg, int s_reg);
-
 void MarkTemp(CompilationUnit* cu, int reg);
-
 void UnmarkTemp(CompilationUnit* cu, int reg);
-
 void MarkDirty(CompilationUnit* cu, RegLocation loc);
-
 void MarkPair(CompilationUnit* cu, int low_reg, int high_reg);
-
 void MarkClean(CompilationUnit* cu, RegLocation loc);
-
 void ResetDef(CompilationUnit* cu, int reg);
-
 void ResetDefLoc(CompilationUnit* cu, RegLocation rl);
 
-/* Set up temp & preserved register pools specialized by target */
+// Set up temp & preserved register pools specialized by target.
 void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
 
 /*
@@ -96,83 +84,57 @@
  * on entry start points to the LIR prior to the beginning of the
  * sequence.
  */
-void MarkDef(CompilationUnit* cu, RegLocation rl, LIR* start,
-                       LIR* finish);
-/*
- * Mark the beginning and end LIR of a def sequence.  Note that
- * on entry start points to the LIR prior to the beginning of the
- * sequence.
- */
-void MarkDefWide(CompilationUnit* cu, RegLocation rl,
-                           LIR* start, LIR* finish);
+void MarkDef(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
+void MarkDefWide(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
+void ResetDefLocWide(CompilationUnit* cu, RegLocation rl);
+void ResetDefTracking(CompilationUnit* cu);
 
 
 // Get the LocRecord associated with an SSA name use.
 RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num);
 RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir, int low);
-// Non-width checking version
+// Non-width checking version.
 RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num);
 
 // Get the LocRecord associated with an SSA name def.
 RegLocation GetDest(CompilationUnit* cu, MIR* mir);
 RegLocation GetDestWide(CompilationUnit* cu, MIR* mir);
-// Non-width checking version
+// Non-width checking version.
 RegLocation GetRawDest(CompilationUnit* cu, MIR* mir);
 
-RegLocation GetReturnWide(CompilationUnit* cu, bool is_double);
-
-/* Clobber all regs that might be used by an external C call */
+// Clobber all regs that might be used by an external C call.
 void ClobberCalleeSave(CompilationUnit* cu);
 
 RegisterInfo *IsTemp(CompilationUnit* cu, int reg);
-
 RegisterInfo *IsPromoted(CompilationUnit* cu, int reg);
-
+RegisterInfo *IsLive(CompilationUnit* cu, int reg);
 bool IsDirty(CompilationUnit* cu, int reg);
 
 void MarkInUse(CompilationUnit* cu, int reg);
 
 int AllocTemp(CompilationUnit* cu);
-
 int AllocTempFloat(CompilationUnit* cu);
-
-//REDO: too many assumptions.
 int AllocTempDouble(CompilationUnit* cu);
-
 void FreeTemp(CompilationUnit* cu, int reg);
-
-void ResetDefLocWide(CompilationUnit* cu, RegLocation rl);
-
-void ResetDefTracking(CompilationUnit* cu);
-
-RegisterInfo *IsLive(CompilationUnit* cu, int reg);
-
-/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu);
-
-void FreeCallTemps(CompilationUnit* cu);
-
-void FlushAllRegs(CompilationUnit* cu);
-
-RegLocation GetReturnWideAlt(CompilationUnit* cu);
-
-RegLocation GetReturn(CompilationUnit* cu, bool is_float);
-
-RegLocation GetReturnAlt(CompilationUnit* cu);
-
-/* Clobber any temp associated with an s_reg.  Could be in either class */
-
-/* Return a temp if one is available, -1 otherwise */
+// Return a temp if one is available, -1 otherwise.
 int AllocFreeTemp(CompilationUnit* cu);
-
-/* Attempt to allocate a callee-save register */
 /*
+ * Attempt to allocate a callee-save register.
  * Similar to AllocTemp(), but forces the allocation of a specific
  * register.  No check is made to see if the register was previously
  * allocated.  Use with caution.
  */
 void LockTemp(CompilationUnit* cu, int reg);
 
+/* To be used when explicitly managing register use */
+void LockCallTemps(CompilationUnit* cu);
+void FreeCallTemps(CompilationUnit* cu);
+
+void FlushAllRegs(CompilationUnit* cu);
+
+RegLocation GetReturn(CompilationUnit* cu, bool is_float);
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double);
+RegLocation GetBadLoc();
 RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl);
 
 /*
@@ -193,21 +155,6 @@
 int SRegOffset(CompilationUnit* cu, int reg);
 void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
 void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
-
-
-/* Architecture-dependent register allocation routines. */
-int AllocTypedTempPair(CompilationUnit* cu,
-                                 bool fp_hint, int reg_class);
-
-int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
-
-void oatDumpFPRegPool(CompilationUnit* cUint);
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
-void NopLIR(LIR* lir);
-bool oatIsFPReg(int reg);
-uint32_t oatFPRegMask(void);
-void AdjustSpillMask(CompilationUnit* cu);
-void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
 int ComputeFrameSize(CompilationUnit* cu);
 
 }  // namespace art
diff --git a/src/compiler/codegen/target_list.h b/src/compiler/codegen/target_list.h
deleted file mode 100644
index 0023d90..0000000
--- a/src/compiler/codegen/target_list.h
+++ /dev/null
@@ -1,131 +0,0 @@
-ArmConditionCode ArmConditionEncoding(ConditionCode code);
-AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
-bool DoubleReg(int reg);
-bool FpReg(int reg);
-bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
-bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
-bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
-bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-bool ArchInit();
-bool ArchVariantInit(void);
-bool IsFpReg(int reg);
-bool SameRegType(int reg1, int reg2);
-bool SingleReg(int reg);
-bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit);
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
-RegLocation GetReturnAlt(CompilationUnit* cu);
-RegLocation GetReturnWideAlt(CompilationUnit* cu);
-void ClobberCalleeSave(CompilationUnit *cu);
-void FreeCallTemps(CompilationUnit* cu);
-void LockCallTemps(CompilationUnit* cu);
-InstructionSet InstructionSet();
-int EncodeShift(int code, int amount);
-int LoadHelper(CompilationUnit* cu, int offset);
-int ModifiedImmediate(uint32_t value);
-int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
-int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
-int AssignInsnOffsets(CompilationUnit* cu);
-int GetInsnSize(LIR* lir);
-int S2d(int low_reg, int high_reg);
-int TargetReg(SpecialTargetRegister reg);
-LIR* FpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base, int offset, ThrowKind kind);
-LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg);
-LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest, OpSize size, int s_reg);
-LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg);
-LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale, OpSize size);
-LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg);
-LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
-LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int val_lo, int val_hi);
-LIR* LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
-LIR* OpBranchUnconditional(CompilationUnit* cu, OpKind op);
-LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2, LIR* target);
-LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value, LIR* target);
-LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
-LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target);
-LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide);
-LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
-LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
-LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
-LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
-LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
-LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
-LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
-LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
-LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2);
-LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2, int shift);
-LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2, int shift);
-LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
-LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
-LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
-LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
-LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
-LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src, OpSize size);
-LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo, int r_src_hi);
-LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale, OpSize size);
-LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale, int displacement, int r_src, int r_src_hi, OpSize size, int s_reg);
-LIR* StoreMultiple(CompilationUnit *cu, int rBase, int r_mask);
-RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc);
-RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
-RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit, bool is_div);
-RegLocation LoadArg(CompilationUnit* cu, RegLocation loc);
-RegLocation LocCReturn();
-RegLocation LocCReturnDouble();
-RegLocation LocCReturnFloat();
-RegLocation LocCReturnWide();
-std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
-uint32_t FpRegMask();
-uint32_t FpRegMask();
-uint64_t GetPCUseDefEncoding();
-void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
-void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
-void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
-void GenExitSequence(CompilationUnit* cu);
-void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
-void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src, RegLocation rl_result, int lit, int first_bit, int second_bit);
-void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
-void GenPrintLabel(CompilationUnit *cu, MIR* mir);
-void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src);
-void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
-void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
-void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
-void AdjustSpillMask(CompilationUnit* cu);
-void ClobberCalleeSave(CompilationUnit *cu);
-void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-void FlushReg(CompilationUnit* cu, int reg);
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
-void CompilerInitializeRegAlloc(CompilationUnit* cu);
-void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
-void NopLIR( LIR* lir);
-void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset);
-void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo, int src_hi);
-void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
-void OpTlsCmp(CompilationUnit* cu, int offset, int val);
-bool BranchUnconditional(LIR* lir);
-void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
-void SpillCoreRegs(CompilationUnit* cu);
-void UnSpillCoreRegs(CompilationUnit* cu);
-X86ConditionCode X86ConditionEncoding(ConditionCode cond);
-uint64_t GetTargetInstFlags(int opcode);
-const char* GetTargetInstName(int opcode);
-const char* GetTargetInstFmt(int opcode);
diff --git a/src/compiler/codegen/x86/assemble_x86.cc b/src/compiler/codegen/x86/assemble_x86.cc
index 2363c20..1e04e18 100644
--- a/src/compiler/codegen/x86/assemble_x86.cc
+++ b/src/compiler/codegen/x86/assemble_x86.cc
@@ -15,13 +15,14 @@
  */
 
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../codegen_util.h"
 
 namespace art {
 
 #define MAX_ASSEMBLER_RETRIES 50
 
-X86EncodingMap EncodingMap[kX86Last] = {
+const X86EncodingMap X86Codegen::EncodingMap[kX86Last] = {
   { kX8632BitData, kData,    IS_UNARY_OP,            { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data",  "0x!0d" },
   { kX86Bkpt,      kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
   { kX86Nop,       kNop,     IS_UNARY_OP,            { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop",   "" },
@@ -329,7 +330,7 @@
   { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr",      "!0r,!1d" },
 };
 
-static size_t ComputeSize(X86EncodingMap* entry, int displacement, bool has_sib) {
+static size_t ComputeSize(const X86EncodingMap* entry, int displacement, bool has_sib) {
   size_t size = 0;
   if (entry->skeleton.prefix1 > 0) {
     ++size;
@@ -358,8 +359,8 @@
   return size;
 }
 
-int GetInsnSize(LIR* lir) {
-  X86EncodingMap* entry = &EncodingMap[lir->opcode];
+int X86Codegen::GetInsnSize(LIR* lir) {
+  const X86EncodingMap* entry = &X86Codegen::EncodingMap[lir->opcode];
   switch (entry->kind) {
     case kData:
       return 4;  // 4 bytes of data
@@ -498,7 +499,7 @@
     case kMacro:
       DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
       return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
-          ComputeSize(&EncodingMap[kX86Sub32RI], 0, false) -
+          ComputeSize(&X86Codegen::EncodingMap[kX86Sub32RI], 0, false) -
           (lir->operands[0] == rAX  ? 1 : 0);  // shorter ax encoding
     default:
       break;
@@ -1173,12 +1174,14 @@
   DCHECK_LT(reg, 8);
   cu->code_buffer.push_back(0x58 + reg);  // pop reg
 
-  EmitRegImm(cu, &EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+  EmitRegImm(cu, &X86Codegen::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
 }
 
 static void EmitUnimplemented(CompilationUnit* cu, const X86EncodingMap* entry, LIR* lir) {
-  UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " " << BuildInsnString(entry->fmt, lir, 0);
-  for (int i = 0; i < GetInsnSize(lir); ++i) {
+  Codegen* cg = cu->cg.get();
+  UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
+                         << cg->BuildInsnString(entry->fmt, lir, 0);
+  for (int i = 0; i < cg->GetInsnSize(lir); ++i) {
     cu->code_buffer.push_back(0xCC);  // push breakpoint instruction - int 3
   }
 }
@@ -1189,7 +1192,7 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
+AssemblerStatus X86Codegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
@@ -1305,7 +1308,7 @@
       continue;
     }
     CHECK_EQ(static_cast<size_t>(lir->offset), cu->code_buffer.size());
-    const X86EncodingMap *entry = &EncodingMap[lir->opcode];
+    const X86EncodingMap *entry = &X86Codegen::EncodingMap[lir->opcode];
     size_t starting_cbuf_size = cu->code_buffer.size();
     switch (entry->kind) {
       case kData:  // 4 bytes of data
@@ -1409,7 +1412,7 @@
     }
     CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
              cu->code_buffer.size() - starting_cbuf_size)
-        << "Instruction size mismatch for entry: " << EncodingMap[lir->opcode].name;
+        << "Instruction size mismatch for entry: " << X86Codegen::EncodingMap[lir->opcode].name;
   }
   return res;
 }
@@ -1418,7 +1421,7 @@
  * Target-dependent offset assignment.
  * independent.
  */
-int AssignInsnOffsets(CompilationUnit* cu)
+int X86Codegen::AssignInsnOffsets(CompilationUnit* cu)
 {
     LIR* x86_lir;
     int offset = 0;
diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc
index e24831d..80de901 100644
--- a/src/compiler/codegen/x86/call_x86.cc
+++ b/src/compiler/codegen/x86/call_x86.cc
@@ -17,13 +17,14 @@
 /* This file contains codegen for the X86 ISA */
 
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                    SpecialCaseHandler special_case)
+void X86Codegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case)
 {
   // TODO
 }
@@ -32,10 +33,7 @@
  * The sparse table in the literal pool is an array of <key,displacement>
  * pairs.
  */
-BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
-                      bool split, bool create, BasicBlock** immed_pred_block_p);
-void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void X86Codegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -47,9 +45,7 @@
   rl_src = LoadValue(cu, rl_src, kCoreReg);
   for (int i = 0; i < entries; i++) {
     int key = keys[i];
-    BasicBlock* case_block = FindBlock(cu,
-                                       cu->current_dalvik_offset + targets[i],
-                                       false, false, NULL);
+    BasicBlock* case_block = FindBlock(cu, cu->current_dalvik_offset + targets[i]);
     LIR* label_list = cu->block_label_list;
     OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
                    &label_list[case_block->id]);
@@ -72,8 +68,7 @@
  * jmp  r_start_of_method
  * done:
  */
-void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
-                     RegLocation rl_src)
+void X86Codegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -122,8 +117,6 @@
   branch_over->target = target;
 }
 
-void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset,
-                             int arg0, int arg1, bool safepoint_pc);
 /*
  * Array data table format:
  *  ushort ident = 0x0300   magic value
@@ -134,8 +127,7 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                      RegLocation rl_src)
+void X86Codegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   // Add the table to the list - we'll process it later
@@ -160,7 +152,7 @@
                           rX86_ARG1, true);
 }
 
-void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Codegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   LoadValueDirectFixed(cu, rl_src, rCX);  // Get obj
@@ -178,7 +170,7 @@
   branch->target = NewLIR0(cu, kPseudoTargetLabel);
 }
 
-void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Codegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   LoadValueDirectFixed(cu, rl_src, rAX);  // Get obj
@@ -202,7 +194,7 @@
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void X86Codegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
 {
   int reg_card_base = AllocTemp(cu);
   int reg_card_no = AllocTemp(cu);
@@ -217,8 +209,7 @@
   FreeTemp(cu, reg_card_no);
 }
 
-void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                      RegLocation rl_method)
+void X86Codegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
 {
   /*
    * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live.  Let the register
@@ -261,7 +252,7 @@
   FreeTemp(cu, rX86_ARG2);
 }
 
-void GenExitSequence(CompilationUnit* cu) {
+void X86Codegen::GenExitSequence(CompilationUnit* cu) {
   /*
    * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
diff --git a/src/compiler/codegen/x86/codegen_x86.h b/src/compiler/codegen/x86/codegen_x86.h
new file mode 100644
index 0000000..2a01d9a
--- /dev/null
+++ b/src/compiler/codegen/x86/codegen_x86.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
+#define ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
+
+#include "../../compiler_internals.h"
+
+namespace art {
+
+class X86Codegen : public Codegen {
+  public:
+    // Required for target - codegen helpers.
+    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit);
+    virtual int LoadHelper(CompilationUnit* cu, int offset);
+    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+                              OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+                                  int r_dest_hi, int s_reg);
+    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+                                 OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg);
+    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+    virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+                                       int val_lo, int val_hi);
+    virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
+    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+                               OpSize size);
+    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+                                   int r_src_hi);
+    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+                                 OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg);
+    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg);
+    virtual bool SameRegType(int reg1, int reg2);
+    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int S2d(int low_reg, int high_reg);
+    virtual int TargetReg(SpecialTargetRegister reg);
+    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegLocation LocCReturn();
+    virtual RegLocation LocCReturnDouble();
+    virtual RegLocation LocCReturnFloat();
+    virtual RegLocation LocCReturnWide();
+    virtual uint32_t FpRegMask();
+    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+    virtual void AdjustSpillMask(CompilationUnit* cu);
+    virtual void ClobberCalleeSave(CompilationUnit *cu);
+    virtual void FlushReg(CompilationUnit* cu, int reg);
+    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+    virtual void FreeCallTemps(CompilationUnit* cu);
+    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps(CompilationUnit* cu);
+    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual const char* GetTargetInstFmt(int opcode);
+    virtual const char* GetTargetInstName(int opcode);
+    virtual int AssignInsnOffsets(CompilationUnit* cu);
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    virtual uint64_t GetPCUseDefEncoding();
+    virtual uint64_t GetTargetInstFlags(int opcode);
+    virtual int GetInsnSize(LIR* lir);
+    virtual bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2);
+    virtual bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2);
+    virtual bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src);
+    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+    virtual bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2);
+    virtual bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind);
+    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div);
+    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div);
+    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2);
+    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+                                  RegLocation rl_method);
+    virtual void GenExitSequence(CompilationUnit* cu);
+    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+                                  RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double);
+    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit);
+    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+                                 RegLocation rl_src);
+    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case);
+
+    // Single operation generators.
+    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                             LIR* target);
+    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+                                LIR* target);
+    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+                                LIR* target);
+    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+                             int r_src2);
+    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+                       int offset);
+    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+                               int src_hi);
+    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+    void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
+    void SpillCoreRegs(CompilationUnit* cu);
+    void UnSpillCoreRegs(CompilationUnit* cu);
+    static const X86EncodingMap EncodingMap[kX86Last];
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
diff --git a/src/compiler/codegen/x86/fp_x86.cc b/src/compiler/codegen/x86/fp_x86.cc
index def4896..14f8b92 100644
--- a/src/compiler/codegen/x86/fp_x86.cc
+++ b/src/compiler/codegen/x86/fp_x86.cc
@@ -15,13 +15,14 @@
  */
 
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
 namespace art {
 
-bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
-                     RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+                                 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   X86OpCode op = kX86Nop;
   RegLocation rl_result;
 
@@ -70,8 +71,8 @@
   return false;
 }
 
-bool GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
-                      RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   X86OpCode op = kX86Nop;
   RegLocation rl_result;
 
@@ -119,8 +120,8 @@
   return false;
 }
 
-bool GenConversion(CompilationUnit *cu, Instruction::Code opcode,
-                   RegLocation rl_dest, RegLocation rl_src) {
+bool X86Codegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) {
   RegisterClass rcSrc = kFPReg;
   X86OpCode op = kX86Nop;
   int src_reg;
@@ -212,8 +213,8 @@
   return false;
 }
 
-bool GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
-              RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) {
   bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
   bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
   int src_reg1;
@@ -263,8 +264,8 @@
   return false;
 }
 
-void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                                bool gt_bias, bool is_double) {
+void X86Codegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) {
   LIR* label_list = cu->block_label_list;
   LIR* taken = &label_list[bb->taken->id];
   LIR* not_taken = &label_list[bb->fall_through->id];
@@ -333,7 +334,7 @@
   OpCondBranch(cu, ccode, taken);
 }
 
-void GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Codegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValue(cu, rl_src, kCoreReg);
@@ -342,7 +343,7 @@
   StoreValue(cu, rl_dest, rl_result);
 }
 
-void GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Codegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
@@ -352,7 +353,7 @@
   StoreValueWide(cu, rl_dest, rl_result);
 }
 
-bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+bool X86Codegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
   DCHECK_NE(cu->instruction_set, kThumb2);
   return false;
 }
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index f6eaaf5..190208b 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -17,6 +17,7 @@
 /* This file contains codegen for the X86 ISA */
 
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
@@ -25,8 +26,8 @@
 /*
  * Perform register memory operation.
  */
-LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
-                    int reg1, int base, int offset, ThrowKind kind)
+LIR* X86Codegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+                                int reg1, int base, int offset, ThrowKind kind)
 {
   LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
                     cu->current_dalvik_offset, reg1, base, offset);
@@ -53,8 +54,8 @@
  * finish:
  *
  */
-void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+void X86Codegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -96,8 +97,8 @@
   return kX86CondO;
 }
 
-LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
-                 int src2, LIR* target)
+LIR* X86Codegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+                             LIR* target)
 {
   NewLIR2(cu, kX86Cmp32RR, src1, src2);
   X86ConditionCode cc = X86ConditionEncoding(cond);
@@ -107,8 +108,8 @@
   return branch;
 }
 
-LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
-                    int check_value, LIR* target)
+LIR* X86Codegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+                                int check_value, LIR* target)
 {
   if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
     // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
@@ -122,10 +123,10 @@
   return branch;
 }
 
-LIR* OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
 {
   if (X86_FPREG(r_dest) || X86_FPREG(r_src))
-    return FpRegCopy(cu, r_dest, r_src);
+    return OpFpRegCopy(cu, r_dest, r_src);
   LIR* res = RawLIR(cu, cu->current_dalvik_offset, kX86Mov32RR,
                     r_dest, r_src);
   if (r_dest == r_src) {
@@ -134,15 +135,15 @@
   return res;
 }
 
-LIR* OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
 {
   LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
   AppendLIR(cu, res);
   return res;
 }
 
-void OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
-                   int src_lo, int src_hi)
+void X86Codegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+                               int src_lo, int src_hi)
 {
   bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
   bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
@@ -177,7 +178,7 @@
   }
 }
 
-void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
+void X86Codegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
   LIR* label_list = cu->block_label_list;
   LIR* taken = &label_list[bb->taken->id];
   RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
@@ -216,19 +217,22 @@
   }
   OpCondBranch(cu, ccode, taken);
 }
-RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit, bool is_div)
+
+RegLocation X86Codegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+                                     int lit, bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
   return rl_dest;
 }
 
-RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div)
+RegLocation X86Codegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+                                  int reg_hi, bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRem for x86";
   return rl_dest;
 }
 
-bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool X86Codegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
 {
   DCHECK_EQ(cu->instruction_set, kX86);
   RegLocation rl_src1 = info->args[0];
@@ -249,41 +253,41 @@
   return true;
 }
 
-void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void X86Codegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
 {
   NewLIR5(cu, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
 }
 
-void OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void X86Codegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
 {
   NewLIR2(cu, kX86Cmp16TI8, offset, val);
 }
 
-bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+bool X86Codegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
   DCHECK_NE(cu->instruction_set, kThumb2);
   return false;
 }
 
-LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+LIR* X86Codegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
   LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
   return NULL;
 }
 
-LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Codegen::OpVldm(CompilationUnit* cu, int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVldm for x86";
   return NULL;
 }
 
-LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Codegen::OpVstm(CompilationUnit* cu, int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVstm for x86";
   return NULL;
 }
 
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                   RegLocation rl_result, int lit,
-                                   int first_bit, int second_bit)
+void X86Codegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+                                               RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit)
 {
   int t_reg = AllocTemp(cu);
   OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
@@ -294,7 +298,7 @@
   }
 }
 
-void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void X86Codegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
 {
   int t_reg = AllocTemp(cu);
   OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
@@ -303,33 +307,33 @@
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* X86Codegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
 {
   OpTlsCmp(cu, Thread::ThreadFlagsOffset().Int32Value(), 0);
   return OpCondBranch(cu, (target == NULL) ? kCondNe : kCondEq, target);
 }
 
 // Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* X86Codegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
 {
   OpRegImm(cu, kOpSub, reg, 1);
   return OpCmpImmBranch(cu, c_code, reg, 0, target);
 }
 
-bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                        RegLocation rl_src, RegLocation rl_dest, int lit)
+bool X86Codegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
   return false;
 }
 
-LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide)
+LIR* X86Codegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
 {
   LOG(FATAL) << "Unexpected use of OpIT in x86";
   return NULL;
 }
-bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -344,8 +348,8 @@
   return false;
 }
 
-bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -360,8 +364,8 @@
   return false;
 }
 
-bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -376,8 +380,8 @@
   return false;
 }
 
-bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
-               RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -392,8 +396,8 @@
   return false;
 }
 
-bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+                            RegLocation rl_src1, RegLocation rl_src2)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -408,8 +412,7 @@
   return false;
 }
 
-bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
-                RegLocation rl_src)
+bool X86Codegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
   FlushAllRegs(cu);
   LockCallTemps(cu);  // Prepare for explicit register usage
@@ -424,7 +427,7 @@
   return false;
 }
 
-void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
+void X86Codegen::OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
diff --git a/src/compiler/codegen/x86/target_x86.cc b/src/compiler/codegen/x86/target_x86.cc
index ee5c215..c3c79f1 100644
--- a/src/compiler/codegen/x86/target_x86.cc
+++ b/src/compiler/codegen/x86/target_x86.cc
@@ -16,6 +16,7 @@
 
 #include "../../compiler_internals.h"
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../ralloc_util.h"
 #include "../codegen_util.h"
 
@@ -45,32 +46,32 @@
 #endif
 };
 
-RegLocation LocCReturn()
+RegLocation X86Codegen::LocCReturn()
 {
   RegLocation res = X86_LOC_C_RETURN;
   return res;
 }
 
-RegLocation LocCReturnWide()
+RegLocation X86Codegen::LocCReturnWide()
 {
   RegLocation res = X86_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation LocCReturnFloat()
+RegLocation X86Codegen::LocCReturnFloat()
 {
   RegLocation res = X86_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation LocCReturnDouble()
+RegLocation X86Codegen::LocCReturnDouble()
 {
   RegLocation res = X86_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int TargetReg(SpecialTargetRegister reg) {
+int X86Codegen::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rX86_SELF; break;
@@ -95,37 +96,19 @@
 }
 
 // Create a double from a pair of singles.
-int S2d(int low_reg, int high_reg)
+int X86Codegen::S2d(int low_reg, int high_reg)
 {
   return X86_S2D(low_reg, high_reg);
 }
 
-// Is reg a single or double?
-bool FpReg(int reg)
-{
-  return X86_FPREG(reg);
-}
-
-// Is reg a single?
-bool SingleReg(int reg)
-{
-  return X86_SINGLEREG(reg);
-}
-
-// Is reg a double?
-bool DoubleReg(int reg)
-{
-  return X86_DOUBLEREG(reg);
-}
-
 // Return mask to strip off fp reg flags and bias.
-uint32_t FpRegMask()
+uint32_t X86Codegen::FpRegMask()
 {
   return X86_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool SameRegType(int reg1, int reg2)
+bool X86Codegen::SameRegType(int reg1, int reg2)
 {
   return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
 }
@@ -133,7 +116,7 @@
 /*
  * Decode the register id.
  */
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t X86Codegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
 {
   uint64_t seed;
   int shift;
@@ -149,7 +132,7 @@
   return (seed << shift);
 }
 
-uint64_t GetPCUseDefEncoding()
+uint64_t X86Codegen::GetPCUseDefEncoding()
 {
   /*
    * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
@@ -159,12 +142,12 @@
   return 0ULL;
 }
 
-void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void X86Codegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
 {
   DCHECK_EQ(cu->instruction_set, kX86);
 
   // X86-specific resource map setup here.
-  uint64_t flags = EncodingMap[lir->opcode].flags;
+  uint64_t flags = X86Codegen::EncodingMap[lir->opcode].flags;
 
   if (flags & REG_USE_SP) {
     lir->use_mask |= ENCODE_X86_REG_SP;
@@ -223,7 +206,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.cc.
  */
-std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+std::string X86Codegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
   std::string buf;
   size_t i = 0;
   size_t fmt_len = strlen(fmt);
@@ -281,7 +264,7 @@
   return buf;
 }
 
-void DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+void X86Codegen::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -322,7 +305,8 @@
     LOG(INFO) << prefix << ": " <<  buf;
   }
 }
-void AdjustSpillMask(CompilationUnit* cu) {
+
+void X86Codegen::AdjustSpillMask(CompilationUnit* cu) {
   // Adjustment for LR spilling, x86 has no LR so nothing to do here
   cu->core_spill_mask |= (1 << rRET);
   cu->num_core_spills++;
@@ -334,7 +318,7 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+void X86Codegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
 {
   UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
 #if 0
@@ -342,7 +326,7 @@
 #endif
 }
 
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void X86Codegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
 {
   RegisterInfo* info1 = GetRegInfo(cu, reg1);
   RegisterInfo* info2 = GetRegInfo(cu, reg2);
@@ -364,7 +348,7 @@
   }
 }
 
-void FlushReg(CompilationUnit* cu, int reg)
+void X86Codegen::FlushReg(CompilationUnit* cu, int reg)
 {
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
@@ -375,19 +359,19 @@
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool IsFpReg(int reg) {
+bool X86Codegen::IsFpReg(int reg) {
   return X86_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cu)
+void X86Codegen::ClobberCalleeSave(CompilationUnit *cu)
 {
   Clobber(cu, rAX);
   Clobber(cu, rCX);
   Clobber(cu, rDX);
 }
 
-RegLocation GetReturnWideAlt(CompilationUnit* cu) {
+RegLocation X86Codegen::GetReturnWideAlt(CompilationUnit* cu) {
   RegLocation res = LocCReturnWide();
   CHECK(res.low_reg == rAX);
   CHECK(res.high_reg == rDX);
@@ -399,7 +383,7 @@
   return res;
 }
 
-RegLocation GetReturnAlt(CompilationUnit* cu)
+RegLocation X86Codegen::GetReturnAlt(CompilationUnit* cu)
 {
   RegLocation res = LocCReturn();
   res.low_reg = rDX;
@@ -408,14 +392,14 @@
   return res;
 }
 
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
+RegisterInfo* X86Codegen::GetRegInfo(CompilationUnit* cu, int reg)
 {
   return X86_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & X86_FP_REG_MASK]
                     : &cu->reg_pool->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu)
+void X86Codegen::LockCallTemps(CompilationUnit* cu)
 {
   LockTemp(cu, rX86_ARG0);
   LockTemp(cu, rX86_ARG1);
@@ -424,7 +408,7 @@
 }
 
 /* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cu)
+void X86Codegen::FreeCallTemps(CompilationUnit* cu)
 {
   FreeTemp(cu, rX86_ARG0);
   FreeTemp(cu, rX86_ARG1);
@@ -432,13 +416,7 @@
   FreeTemp(cu, rX86_ARG3);
 }
 
-/* Architecture-specific initializations and checks go here */
-bool ArchVariantInit(void)
-{
-  return true;
-}
-
-void GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+void X86Codegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
   // TODO: optimize fences
@@ -449,7 +427,7 @@
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+int X86Codegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
                           int reg_class)
 {
   int high_reg;
@@ -469,14 +447,14 @@
   return res;
 }
 
-int AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
+int X86Codegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
     return AllocTempFloat(cu);
   }
   return AllocTemp(cu);
 }
 
-void CompilerInitializeRegAlloc(CompilationUnit* cu) {
+void X86Codegen::CompilerInitializeRegAlloc(CompilationUnit* cu) {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
@@ -524,7 +502,7 @@
   }
 }
 
-void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+void X86Codegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
                      RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
@@ -535,7 +513,7 @@
   }
 }
 
-void SpillCoreRegs(CompilationUnit* cu) {
+void X86Codegen::SpillCoreRegs(CompilationUnit* cu) {
   if (cu->num_core_spills == 0) {
     return;
   }
@@ -550,7 +528,7 @@
   }
 }
 
-void UnSpillCoreRegs(CompilationUnit* cu) {
+void X86Codegen::UnSpillCoreRegs(CompilationUnit* cu) {
   if (cu->num_core_spills == 0) {
     return;
   }
@@ -565,46 +543,44 @@
   }
 }
 
-bool BranchUnconditional(LIR* lir)
+bool X86Codegen::IsUnconditionalBranch(LIR* lir)
 {
   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
 }
 
 /* Common initialization routine for an architecture family */
-bool ArchInit() {
-  int i;
-
-  for (i = 0; i < kX86Last; i++) {
-    if (EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
+bool InitX86Codegen(CompilationUnit* cu) {
+  cu->cg.reset(new X86Codegen());
+  for (int i = 0; i < kX86Last; i++) {
+    if (X86Codegen::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << X86Codegen::EncodingMap[i].name
                  << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(EncodingMap[i].opcode);
+                 << static_cast<int>(X86Codegen::EncodingMap[i].opcode);
     }
   }
-
-  return ArchVariantInit();
+  return true;
 }
 
 // Not used in x86
-int LoadHelper(CompilationUnit* cu, int offset)
+int X86Codegen::LoadHelper(CompilationUnit* cu, int offset)
 {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return INVALID_REG;
 }
 
-uint64_t GetTargetInstFlags(int opcode)
+uint64_t X86Codegen::GetTargetInstFlags(int opcode)
 {
-  return EncodingMap[opcode].flags;
+  return X86Codegen::EncodingMap[opcode].flags;
 }
 
-const char* GetTargetInstName(int opcode)
+const char* X86Codegen::GetTargetInstName(int opcode)
 {
-  return EncodingMap[opcode].name;
+  return X86Codegen::EncodingMap[opcode].name;
 }
 
-const char* GetTargetInstFmt(int opcode)
+const char* X86Codegen::GetTargetInstFmt(int opcode)
 {
-  return EncodingMap[opcode].fmt;
+  return X86Codegen::EncodingMap[opcode].fmt;
 }
 
 } // namespace art
diff --git a/src/compiler/codegen/x86/utility_x86.cc b/src/compiler/codegen/x86/utility_x86.cc
index 22037f3..bdbc547 100644
--- a/src/compiler/codegen/x86/utility_x86.cc
+++ b/src/compiler/codegen/x86/utility_x86.cc
@@ -15,6 +15,7 @@
  */
 
 #include "x86_lir.h"
+#include "codegen_x86.h"
 #include "../codegen_util.h"
 #include "../ralloc_util.h"
 
@@ -22,13 +23,7 @@
 
 /* This file contains codegen for the X86 ISA */
 
-void GenBarrier(CompilationUnit *cu);
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg);
-LIR *LoadWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest);
-LIR *StoreWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_src);
-LIR *LoadConstant(CompilationUnit *cu, int r_dest, int value);
-
-LIR *FpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
 {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
@@ -64,7 +59,7 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR *LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+LIR* X86Codegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
 {
   int r_dest_save = r_dest;
   if (X86_FPREG(r_dest)) {
@@ -91,16 +86,14 @@
   return res;
 }
 
-LIR* OpBranchUnconditional(CompilationUnit *cu, OpKind op)
+LIR* X86Codegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
 {
-  CHECK_EQ(op, kOpUncondBr);
-  return NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched */ );
+  LIR* res = NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+  res->target = target;
+  return res;
 }
 
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
-
-X86ConditionCode X86ConditionEncoding(ConditionCode cond);
-LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* X86Codegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
 {
   LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* offset to be patched */,
                         X86ConditionEncoding(cc));
@@ -108,7 +101,7 @@
   return branch;
 }
 
-LIR *OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+LIR* X86Codegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -121,7 +114,7 @@
   return NewLIR1(cu, opcode, r_dest_src);
 }
 
-LIR *OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
+LIR* X86Codegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
 {
   X86OpCode opcode = kX86Bkpt;
   bool byte_imm = IS_SIMM8(value);
@@ -148,7 +141,7 @@
   return NewLIR2(cu, opcode, r_dest_src1, value);
 }
 
-LIR *OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* X86Codegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
 {
     X86OpCode opcode = kX86Nop;
     bool src2_must_be_cx = false;
@@ -194,7 +187,7 @@
     return NewLIR2(cu, opcode, r_dest_src1, r_src2);
 }
 
-LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+LIR* X86Codegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
               int offset)
 {
   X86OpCode opcode = kX86Nop;
@@ -218,7 +211,7 @@
   return NewLIR3(cu, opcode, r_dest, rBase, offset);
 }
 
-LIR* OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
+LIR* X86Codegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
                  int r_src2)
 {
   if (r_dest != r_src1 && r_dest != r_src2) {
@@ -267,7 +260,7 @@
   }
 }
 
-LIR* OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
+LIR* X86Codegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
                  int value)
 {
   if (op == kOpMul) {
@@ -294,7 +287,7 @@
   return OpRegImm(cu, op, r_dest, value);
 }
 
-LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* X86Codegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -306,7 +299,7 @@
   return NewLIR1(cu, opcode, thread_offset);
 }
 
-LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* X86Codegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -318,8 +311,8 @@
   return NewLIR2(cu, opcode, rBase, disp);
 }
 
-LIR *LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
-                           int r_dest_hi, int val_lo, int val_hi)
+LIR* X86Codegen::LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
+                                       int r_dest_hi, int val_lo, int val_hi)
 {
     LIR *res;
     if (X86_FPREG(r_dest_lo)) {
@@ -345,22 +338,9 @@
     return res;
 }
 
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
-  UNIMPLEMENTED(FATAL) << "LoadMultiple";
-  NewLIR0(cu, kX86Bkpt);
-  return NULL;
-}
-
-LIR *StoreMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
-  UNIMPLEMENTED(FATAL) << "StoreMultiple";
-  NewLIR0(cu, kX86Bkpt);
-  return NULL;
-}
-
-LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                         int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg) {
+LIR* X86Codegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg) {
   LIR *load = NULL;
   LIR *load2 = NULL;
   bool is_array = r_index != INVALID_REG;
@@ -428,10 +408,10 @@
       }
     }
     if (rBase == rX86_SP) {
-      AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0))
-                              >> 2, true /* is_load */, is64bit);
+      AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              true /* is_load */, is64bit);
       if (pair) {
-        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+        AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
                                 true /* is_load */, is64bit);
       }
     }
@@ -458,26 +438,27 @@
 }
 
 /* Load value from base + scaled index. */
-LIR *LoadBaseIndexed(CompilationUnit *cu, int rBase,
+LIR* X86Codegen::LoadBaseIndexed(CompilationUnit *cu, int rBase,
                      int r_index, int r_dest, int scale, OpSize size) {
   return LoadBaseIndexedDisp(cu, rBase, r_index, scale, 0,
                              r_dest, INVALID_REG, size, INVALID_SREG);
 }
 
-LIR *LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Codegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
                   int r_dest, OpSize size, int s_reg) {
   return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
                              r_dest, INVALID_REG, size, s_reg);
 }
 
-LIR *LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Codegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
                       int r_dest_lo, int r_dest_hi, int s_reg) {
   return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
                              r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
-LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                          int displacement, int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* X86Codegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg) {
   LIR *store = NULL;
   LIR *store2 = NULL;
   bool is_array = r_index != INVALID_REG;
@@ -533,10 +514,10 @@
       store2 = NewLIR3(cu, opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
     }
     if (rBase == rX86_SP) {
-      AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
-                              >> 2, false /* is_load */, is64bit);
+      AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              false /* is_load */, is64bit);
       if (pair) {
-        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+        AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
                                 false /* is_load */, is64bit);
       }
     }
@@ -556,29 +537,29 @@
 }
 
 /* store value base base + scaled index. */
-LIR *StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+LIR* X86Codegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
                       int scale, OpSize size)
 {
   return StoreBaseIndexedDisp(cu, rBase, r_index, scale, 0,
                               r_src, INVALID_REG, size, INVALID_SREG);
 }
 
-LIR *StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
-                   int r_src, OpSize size)
+LIR* X86Codegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+                               int r_src, OpSize size)
 {
     return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0,
                                 displacement, r_src, INVALID_REG, size,
                                 INVALID_SREG);
 }
 
-LIR *StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
-                       int r_src_lo, int r_src_hi)
+LIR* X86Codegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+                                   int r_src_lo, int r_src_hi)
 {
   return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
                               r_src_lo, r_src_hi, kLong, INVALID_SREG);
 }
 
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
+void X86Codegen::LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
 {
   LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
 }
diff --git a/src/compiler/codegen/x86/x86_lir.h b/src/compiler/codegen/x86/x86_lir.h
index d58f587..edfcd4d 100644
--- a/src/compiler/codegen/x86/x86_lir.h
+++ b/src/compiler/codegen/x86/x86_lir.h
@@ -103,13 +103,13 @@
  * +========================+
  */
 
-/* Offset to distingish FP regs */
+// Offset to distingish FP regs.
 #define X86_FP_REG_OFFSET 32
-/* Offset to distinguish DP FP regs */
+// Offset to distinguish DP FP regs.
 #define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
-/* Offset to distingish the extra regs */
+// Offset to distingish the extra regs.
 #define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
-/* Reg types */
+// Reg types.
 #define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
 #define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
 #define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
@@ -127,7 +127,7 @@
 /* Mask to strip off fp flags */
 #define X86_FP_REG_MASK 0xF
 
-/* RegisterLocation templates return values (rAX, rAX/rDX or XMM0) */
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
 //                               location,     wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg,     s_reg_low
 #define X86_LOC_C_RETURN             {kLocPhysReg, 0,    0,       0,     0,  0,    0,   0,        1,    rAX,    INVALID_REG, INVALID_SREG, INVALID_SREG}
 #define X86_LOC_C_RETURN_WIDE        {kLocPhysReg, 1,    0,       0,     0,  0,    0,   0,        1,    rAX,    rDX,         INVALID_SREG, INVALID_SREG}
@@ -137,7 +137,7 @@
 enum X86ResourceEncodingPos {
   kX86GPReg0   = 0,
   kX86RegSP    = 4,
-  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15
+  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
   kX86FPRegEnd   = 32,
   kX86RegEnd   = kX86FPRegEnd,
 };
@@ -145,10 +145,6 @@
 #define ENCODE_X86_REG_LIST(N)      (static_cast<uint64_t>(N))
 #define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
 
-/*
- * Annotate special-purpose core registers:
- */
-
 enum X86NativeRegisterPool {
   r0     = 0,
   rAX    = r0,
@@ -169,7 +165,7 @@
   r7     = 7,
   rDI    = r7,
 #ifndef TARGET_REX_SUPPORT
-  rRET   = 8,  // fake return address register for core spill mask
+  rRET   = 8,  // fake return address register for core spill mask.
 #else
   r8     = 8,
   r9     = 9,
@@ -179,7 +175,7 @@
   r13    = 13,
   r14    = 14,
   r15    = 15,
-  rRET   = 16,  // fake return address register for core spill mask
+  rRET   = 16,  // fake return address register for core spill mask.
 #endif
   fr0  =  0 + X86_FP_REG_OFFSET,
   fr1  =  1 + X86_FP_REG_OFFSET,
@@ -199,10 +195,6 @@
   fr15 = 15 + X86_FP_REG_OFFSET,
 };
 
-/*
- * Target-independent aliases
- */
-
 #define rX86_ARG0 rAX
 #define rX86_ARG1 rCX
 #define rX86_ARG2 rDX
@@ -227,7 +219,7 @@
  */
 enum X86OpCode {
   kX86First = 0,
-  kX8632BitData = kX86First, /* data [31..0] */
+  kX8632BitData = kX86First, // data [31..0].
   kX86Bkpt,
   kX86Nop,
   // Define groups of binary operations
@@ -427,22 +419,24 @@
   const char* fmt;
 };
 
-extern X86EncodingMap EncodingMap[kX86Last];
 
 // FIXME: mem barrier type - what do we do for x86?
 #define kSY 0
 #define kST 0
 
-/* Offsets of high and low halves of a 64bit value */
+// Offsets of high and low halves of a 64bit value.
 #define LOWORD_OFFSET 0
 #define HIWORD_OFFSET 4
 
-/* Segment override instruction prefix used for quick TLS access to Thread::Current() */
+// Segment override instruction prefix used for quick TLS access to Thread::Current().
 #define THREAD_PREFIX 0x64
 
 #define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
 #define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
 
+extern X86EncodingMap EncodingMap[kX86Last];
+extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
+
 }  // namespace art
 
 #endif  // ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_
diff --git a/src/compiler/compiler_enums.h b/src/compiler/compiler_enums.h
index 65c34d2..6d8b27e 100644
--- a/src/compiler/compiler_enums.h
+++ b/src/compiler/compiler_enums.h
@@ -28,8 +28,8 @@
 };
 
 enum SpecialTargetRegister {
-  kSelf,            // Thread
-  kSuspend,         // Used to reduce suspend checks
+  kSelf,            // Thread pointer.
+  kSuspend,         // Used to reduce suspend checks for some targets.
   kLr,
   kPc,
   kSp,
@@ -70,15 +70,15 @@
 
 enum ResourceEncodingPos {
   kMustNotAlias = 63,
-  kHeapRef = 62,          // Default memory reference type
-  kLiteral = 61,          // Literal pool memory reference
-  kDalvikReg = 60,        // Dalvik v_reg memory reference
+  kHeapRef = 62,          // Default memory reference type.
+  kLiteral = 61,          // Literal pool memory reference.
+  kDalvikReg = 60,        // Dalvik v_reg memory reference.
   kFPStatus = 59,
   kCCode = 58,
   kLowestCommonResource = kCCode
 };
 
-/* Shared pseudo opcodes - must be < 0 */
+// Shared pseudo opcodes - must be < 0.
 enum LIRPseudoOpcode {
   kPseudoExportedPC = -18,
   kPseudoSafepointPC = -17,
@@ -122,15 +122,15 @@
   kMIRNullCheckOnly,
   kMIRIgnoreRangeCheck,
   kMIRRangeCheckOnly,
-  kMIRInlined,                        // Invoke is inlined (ie dead)
-  kMIRInlinedPred,                    // Invoke is inlined via prediction
-  kMIRCallee,                         // Instruction is inlined from callee
+  kMIRInlined,                        // Invoke is inlined (ie dead).
+  kMIRInlinedPred,                    // Invoke is inlined via prediction.
+  kMIRCallee,                         // Instruction is inlined from callee.
   kMIRIgnoreSuspendCheck,
   kMIRDup,
-  kMIRMark,                           // Temporary node mark
+  kMIRMark,                           // Temporary node mark.
 };
 
-/* For successor_block_list */
+// For successor_block_list.
 enum BlockListType {
   kNotUsed = 0,
   kCatch,
@@ -328,17 +328,17 @@
 
 /* Customized node traversal orders for different needs */
 enum DataFlowAnalysisMode {
-  kAllNodes = 0,              // All nodes
-  kReachableNodes,            // All reachable nodes
-  kPreOrderDFSTraversal,      // Depth-First-Search / Pre-Order
-  kPostOrderDFSTraversal,     // Depth-First-Search / Post-Order
-  kPostOrderDOMTraversal,     // Dominator tree / Post-Order
-  kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order
+  kAllNodes = 0,              // All nodes.
+  kReachableNodes,            // All reachable nodes.
+  kPreOrderDFSTraversal,      // Depth-First-Search / Pre-Order.
+  kPostOrderDFSTraversal,     // Depth-First-Search / Post-Order.
+  kPostOrderDOMTraversal,     // Dominator tree / Post-Order.
+  kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order.
 };
 
 std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& mode);
 
-// Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers")
+// Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
 enum MemBarrierKind {
   kLoadStore,
   kLoadLoad,
@@ -348,6 +348,51 @@
 
 std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
 
+enum OpFeatureFlags {
+  kIsBranch = 0,
+  kNoOperand,
+  kIsUnaryOp,
+  kIsBinaryOp,
+  kIsTertiaryOp,
+  kIsQuadOp,
+  kIsQuinOp,
+  kIsSextupleOp,
+  kIsIT,
+  kMemLoad,
+  kMemStore,
+  kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
+  kRegDef0,
+  kRegDef1,
+  kRegDefA,
+  kRegDefD,
+  kRegDefFPCSList0,
+  kRegDefFPCSList2,
+  kRegDefList0,
+  kRegDefList1,
+  kRegDefList2,
+  kRegDefLR,
+  kRegDefSP,
+  kRegUse0,
+  kRegUse1,
+  kRegUse2,
+  kRegUse3,
+  kRegUse4,
+  kRegUseA,
+  kRegUseC,
+  kRegUseD,
+  kRegUseFPCSList0,
+  kRegUseFPCSList2,
+  kRegUseList0,
+  kRegUseList1,
+  kRegUseLR,
+  kRegUsePC,
+  kRegUseSP,
+  kSetsCCodes,
+  kUsesCCodes
+};
+
+std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& flag);
+
 }  // namespace art
 
 #endif // ART_SRC_COMPILER_COMPILERENUMS_H_
diff --git a/src/compiler/compiler_internals.h b/src/compiler/compiler_internals.h
index c62d8c6..9ed3f8d 100644
--- a/src/compiler/compiler_internals.h
+++ b/src/compiler/compiler_internals.h
@@ -36,6 +36,6 @@
 #include "ralloc.h"
 #include "compiler_utility.h"
 #include "compiler_ir.h"
-#include "codegen/compiler_codegen.h"
+#include "codegen/codegen.h"
 
 #endif  // ART_SRC_COMPILER_COMPILER_INTERNAL_H_
diff --git a/src/compiler/compiler_ir.h b/src/compiler/compiler_ir.h
index e7a5e73..2923587 100644
--- a/src/compiler/compiler_ir.h
+++ b/src/compiler/compiler_ir.h
@@ -36,7 +36,7 @@
 #define EXERCISE_SLOWEST_STRING_PATH (cu->enable_debug & \
   (1 << kDebugSlowestStringPath))
 
-// Minimum field size to contain Dalvik v_reg number
+// Minimum field size to contain Dalvik v_reg number.
 #define VREG_NUM_WIDTH 16
 
 struct ArenaBitVector;
@@ -55,17 +55,17 @@
   RegLocationType location:3;
   unsigned wide:1;
   unsigned defined:1;   // Do we know the type?
-  unsigned is_const:1;   // Constant, value in cu->constant_values[]
+  unsigned is_const:1;  // Constant, value in cu->constant_values[].
   unsigned fp:1;        // Floating point?
   unsigned core:1;      // Non-floating point?
-  unsigned ref:1;       // Something GC cares about
-  unsigned high_word:1;  // High word of pair?
+  unsigned ref:1;       // Something GC cares about.
+  unsigned high_word:1; // High word of pair?
   unsigned home:1;      // Does this represent the home location?
-  uint8_t low_reg;            // First physical register
-  uint8_t high_reg;           // 2nd physical register (if wide)
-  int32_t s_reg_low;      // SSA name for low Dalvik word
-  int32_t orig_sreg;     // TODO: remove after Bitcode gen complete
-                        // and consolodate usage w/ s_reg_low
+  uint8_t low_reg;      // First physical register.
+  uint8_t high_reg;     // 2nd physical register (if wide).
+  int32_t s_reg_low;    // SSA name for low Dalvik word.
+  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
+                        // and consolodate usage w/ s_reg_low.
 };
 
 struct CompilerTemp {
@@ -74,19 +74,19 @@
 };
 
 struct CallInfo {
-  int num_arg_words;      // Note: word count, not arg count
-  RegLocation* args;    // One for each word of arguments
-  RegLocation result;   // Eventual target of MOVE_RESULT
+  int num_arg_words;    // Note: word count, not arg count.
+  RegLocation* args;    // One for each word of arguments.
+  RegLocation result;   // Eventual target of MOVE_RESULT.
   int opt_flags;
   InvokeType type;
   uint32_t dex_idx;
-  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray
+  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
   uintptr_t direct_code;
   uintptr_t direct_method;
-  RegLocation target;    // Target of following move_result
+  RegLocation target;    // Target of following move_result.
   bool skip_this;
   bool is_range;
-  int offset;            // Dalvik offset
+  int offset;            // Dalvik offset.
 };
 
  /*
@@ -97,15 +97,15 @@
  */
 struct RegisterInfo {
   int reg;                    // Reg number
-  bool in_use;                 // Has it been allocated?
-  bool is_temp;                // Can allocate as temp?
+  bool in_use;                // Has it been allocated?
+  bool is_temp;               // Can allocate as temp?
   bool pair;                  // Part of a register pair?
-  int partner;                // If pair, other reg of pair
+  int partner;                // If pair, other reg of pair.
   bool live;                  // Is there an associated SSA name?
   bool dirty;                 // If live, is it dirty?
-  int s_reg;                   // Name of live value
-  LIR *def_start;              // Starting inst in last def sequence
-  LIR *def_end;                // Ending inst in last def sequence
+  int s_reg;                  // Name of live value.
+  LIR *def_start;             // Starting inst in last def sequence.
+  LIR *def_end;               // Ending inst in last def sequence.
 };
 
 struct RegisterPool {
@@ -138,21 +138,21 @@
 #define MANY_BLOCKS_INITIALIZER 1000 /* Threshold for switching dataflow off */
 #define MANY_BLOCKS 4000 /* Non-initializer threshold */
 
-/* Utility macros to traverse the LIR list */
+// Utility macros to traverse the LIR list.
 #define NEXT_LIR(lir) (lir->next)
 #define PREV_LIR(lir) (lir->prev)
 
-/* Defines for alias_info (tracks Dalvik register references) */
+// Defines for alias_info (tracks Dalvik register references).
 #define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
 #define DECODE_ALIAS_INFO_WIDE_FLAG     (0x80000000)
 #define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
 #define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
 
-/* Common resource macros */
+// Common resource macros.
 #define ENCODE_CCODE            (1ULL << kCCode)
 #define ENCODE_FP_STATUS        (1ULL << kFPStatus)
 
-/* Abstract memory locations */
+// Abstract memory locations.
 #define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
 #define ENCODE_LITERAL          (1ULL << kLiteral)
 #define ENCODE_HEAP_REF         (1ULL << kHeapRef)
@@ -165,22 +165,22 @@
 #define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
 
 struct LIR {
-  int offset;                        // Offset of this instruction
-  int dalvik_offset;                  // Offset of Dalvik opcode
+  int offset;               // Offset of this instruction.
+  int dalvik_offset;        // Offset of Dalvik opcode.
   LIR* next;
   LIR* prev;
   LIR* target;
   int opcode;
-  int operands[5];            // [0..4] = [dest, src1, src2, extra, extra2]
+  int operands[5];          // [0..4] = [dest, src1, src2, extra, extra2].
   struct {
-    bool is_nop:1;           // LIR is optimized away
-    bool pcRelFixup:1;      // May need pc-relative fixup
-    unsigned int size:5;    // in bytes
+    bool is_nop:1;          // LIR is optimized away.
+    bool pcRelFixup:1;      // May need pc-relative fixup.
+    unsigned int size:5;    // Note: size is in bytes.
     unsigned int unused:25;
   } flags;
-  int alias_info;              // For Dalvik register & litpool disambiguation
-  uint64_t use_mask;           // Resource mask for use
-  uint64_t def_mask;           // Resource mask for def
+  int alias_info;           // For Dalvik register & litpool disambiguation.
+  uint64_t use_mask;        // Resource mask for use.
+  uint64_t def_mask;        // Resource mask for def.
 };
 
 extern const char* extended_mir_op_names[kMirOpLast - kMirOpFirst];
@@ -214,9 +214,9 @@
   SSARepresentation* ssa_rep;
   int optimization_flags;
   union {
-    // Used to quickly locate all Phi opcodes
+    // Used to quickly locate all Phi opcodes.
     MIR* phi_next;
-    // Establish link between two halves of throwing instructions
+    // Establish link between two halves of throwing instructions.
     MIR* throw_insn;
   } meta;
 };
@@ -239,14 +239,14 @@
   MIR* last_mir_insn;
   BasicBlock* fall_through;
   BasicBlock* taken;
-  BasicBlock* i_dom;            // Immediate dominator
+  BasicBlock* i_dom;                // Immediate dominator.
   BasicBlockDataFlow* data_flow_info;
   GrowableList* predecessors;
   ArenaBitVector* dominators;
-  ArenaBitVector* i_dominated;         // Set nodes being immediately dominated
-  ArenaBitVector* dom_frontier;        // Dominance frontier
-  struct {                            // For one-to-many successors like
-    BlockListType block_list_type;    // switch and exception handling
+  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
+  ArenaBitVector* dom_frontier;     // Dominance frontier.
+  struct {                          // For one-to-many successors like.
+    BlockListType block_list_type;  // switch and exception handling.
     GrowableList blocks;
   } successor_block_list;
 };
@@ -266,6 +266,7 @@
 struct RegisterPool;
 struct ArenaMemBlock;
 struct Memstats;
+class Codegen;
 
 #define NOTVISITED (-1)
 
@@ -360,29 +361,30 @@
 #ifndef NDEBUG
       live_sreg(0),
 #endif
-      opcode_count(NULL) {}
+      opcode_count(NULL),
+      cg(NULL) {}
 
   int num_blocks;
   GrowableList block_list;
-  Compiler* compiler;            // Compiler driving this compiler
-  ClassLinker* class_linker;     // Linker to resolve fields and methods
-  const DexFile* dex_file;       // DexFile containing the method being compiled
-  jobject class_loader;          // compiling method's class loader
-  uint32_t method_idx;                // compiling method's index into method_ids of DexFile
-  const DexFile::CodeItem* code_item;  // compiling method's DexFile code_item
-  uint32_t access_flags;              // compiling method's access flags
-  InvokeType invoke_type;             // compiling method's invocation type
-  const char* shorty;                 // compiling method's shorty
+  Compiler* compiler;                  // Compiler driving this compiler.
+  ClassLinker* class_linker;           // Linker to resolve fields and methods.
+  const DexFile* dex_file;             // DexFile containing the method being compiled.
+  jobject class_loader;                // compiling method's class loader.
+  uint32_t method_idx;                 // compiling method's index into method_ids of DexFile.
+  const DexFile::CodeItem* code_item;  // compiling method's DexFile code_item.
+  uint32_t access_flags;               // compiling method's access flags.
+  InvokeType invoke_type;              // compiling method's invocation type.
+  const char* shorty;                  // compiling method's shorty.
   LIR* first_lir_insn;
   LIR* last_lir_insn;
-  LIR* literal_list;                   // Constants
-  LIR* method_literal_list;             // Method literals requiring patching
-  LIR* code_literal_list;               // Code literals requiring patching
-  uint32_t disable_opt;                // opt_control_vector flags
-  uint32_t enable_debug;               // debugControlVector flags
-  int data_offset;                     // starting offset of literal pool
-  int total_size;                      // header + code size
-  AssemblerStatus assembler_status;    // Success or fix and retry
+  LIR* literal_list;                   // Constants.
+  LIR* method_literal_list;            // Method literals requiring patching.
+  LIR* code_literal_list;              // Code literals requiring patching.
+  uint32_t disable_opt;                // opt_control_vector flags.
+  uint32_t enable_debug;               // debugControlVector flags.
+  int data_offset;                     // starting offset of literal pool.
+  int total_size;                      // header + code size.
+  AssemblerStatus assembler_status;    // Success or fix and retry.
   int assembler_retries;
   std::vector<uint8_t> code_buffer;
   /*
@@ -401,46 +403,45 @@
   std::vector<uint32_t> fp_vmap_table;
   std::vector<uint8_t> native_gc_map;
   bool verbose;
-  bool has_loop;                       // Contains a loop
-  bool has_invoke;                     // Contains an invoke instruction
-  bool qd_mode;                        // Compile for code size/compile time
+  bool has_loop;                       // Contains a loop.
+  bool has_invoke;                     // Contains an invoke instruction.
+  bool qd_mode;                        // Compile for code size/compile time.
   RegisterPool* reg_pool;
   InstructionSet instruction_set;
-  /* Number of total regs used in the whole cu after SSA transformation */
+  // Number of total regs used in the whole cu after SSA transformation .
   int num_ssa_regs;
-  /* Map SSA reg i to the base virtual register/subscript */
+  // Map SSA reg i to the base virtual register/subscript.
   GrowableList* ssa_base_vregs;
   GrowableList* ssa_subscripts;
   GrowableList* ssa_strings;
 
-  /* The following are new data structures to support SSA representations */
-  /* Map original Dalvik virtual reg i to the current SSA name */
-  int* vreg_to_ssa_map;                  // length == method->registers_size
-  int* ssa_last_defs;                   // length == method->registers_size
-  ArenaBitVector* is_constant_v;        // length == num_ssa_reg
-  int* constant_values;                // length == num_ssa_reg
-  int* phi_alias_map;                   // length == num_ssa_reg
+  // Map original Dalvik virtual reg i to the current SSA name.
+  int* vreg_to_ssa_map;            // length == method->registers_size
+  int* ssa_last_defs;              // length == method->registers_size
+  ArenaBitVector* is_constant_v;   // length == num_ssa_reg
+  int* constant_values;            // length == num_ssa_reg
+  int* phi_alias_map;              // length == num_ssa_reg
   MIR* phi_list;
 
-  /* Use counts of ssa names */
-  GrowableList use_counts;             // Weighted by nesting depth
-  GrowableList raw_use_counts;          // Not weighted
+  // Use counts of ssa names.
+  GrowableList use_counts;         // Weighted by nesting depth
+  GrowableList raw_use_counts;     // Not weighted
 
-  /* Optimization support */
+  // Optimization support.
   GrowableList loop_headers;
 
-  /* Map SSA names to location */
+  // Map SSA names to location.
   RegLocation* reg_location;
 
-  /* Keep track of Dalvik v_reg to physical register mappings */
+  // Keep track of Dalvik v_reg to physical register mappings.
   PromotionMap* promotion_map;
 
-  /* SSA name for Method* */
+  // SSA name for Method*.
   int method_sreg;
-  RegLocation method_loc;            // Describes location of method*
+  RegLocation method_loc;          // Describes location of method*.
 
   int num_reachable_blocks;
-  int num_dalvik_registers;             // method->registers_size
+  int num_dalvik_registers;        // method->registers_size.
   BasicBlock* entry_block;
   BasicBlock* exit_block;
   BasicBlock* cur_block;
@@ -453,11 +454,11 @@
   GrowableList compiler_temps;
   int* i_dom_list;
   ArenaBitVector* try_block_addr;
-  ArenaBitVector** def_block_matrix;    // num_dalvik_register x num_blocks
+  ArenaBitVector** def_block_matrix;    // num_dalvik_register x num_blocks.
   ArenaBitVector* temp_block_v;
   ArenaBitVector* temp_dalvik_register_v;
-  ArenaBitVector* temp_ssa_register_v;   // num_ssa_regs
-  int* temp_ssa_block_id_v;               // working storage for Phi labels
+  ArenaBitVector* temp_ssa_register_v;  // num_ssa_regs.
+  int* temp_ssa_block_id_v;             // working storage for Phi labels.
   LIR* block_label_list;
   /*
    * Frame layout details.
@@ -467,7 +468,7 @@
    */
   int num_ins;
   int num_outs;
-  int num_regs;            // Unlike num_dalvik_registers, does not include ins
+  int num_regs;            // Unlike num_dalvik_registers, does not include ins.
   int num_core_spills;
   int num_fp_spills;
   int num_compiler_temps;
@@ -476,14 +477,13 @@
   unsigned int fp_spill_mask;
   unsigned int attrs;
   /*
-   * CLEANUP/RESTRUCTURE: The code generation utilities don't have a built-in
+   * TODO: The code generation utilities don't have a built-in
    * mechanism to propagate the original Dalvik opcode address to the
    * associated generated instructions.  For the trace compiler, this wasn't
    * necessary because the interpreter handled all throws and debugging
    * requests.  For now we'll handle this by placing the Dalvik offset
    * in the CompilationUnit struct before codegen for each instruction.
-   * The low-level LIR creation utilites will pull it from here.  Should
-   * be rewritten.
+   * The low-level LIR creation utilites will pull it from here.  Rework this.
    */
   int current_dalvik_offset;
   GrowableList switch_tables;
@@ -491,10 +491,10 @@
   const uint16_t* insns;
   uint32_t insns_size;
   bool disable_dataflow; // Skip dataflow analysis if possible
-  SafeMap<unsigned int, BasicBlock*> block_map; // FindBlock lookup cache
-  SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache
-  SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache
-  int def_count;         // Used to estimate number of SSA names
+  SafeMap<unsigned int, BasicBlock*> block_map; // FindBlock lookup cache.
+  SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache.
+  SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache.
+  int def_count;         // Used to estimate number of SSA names.
 
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
@@ -518,9 +518,9 @@
   std::string bitcode_filename;
   GrowableList llvm_values;
   int32_t temp_name;
-  SafeMap<llvm::BasicBlock*, LIR*> block_to_label_map; // llvm bb -> LIR label
-  SafeMap<int32_t, llvm::BasicBlock*> id_to_block_map; // block id -> llvm bb
-  SafeMap<llvm::Value*, RegLocation> loc_map; // llvm Value to loc rec
+  SafeMap<llvm::BasicBlock*, LIR*> block_to_label_map; // llvm bb -> LIR label.
+  SafeMap<int32_t, llvm::BasicBlock*> id_to_block_map; // block id -> llvm bb.
+  SafeMap<llvm::Value*, RegLocation> loc_map; // llvm Value to loc rec.
   int num_shadow_frame_entries;
   int* shadow_map;
   std::set<llvm::BasicBlock*> llvm_blocks;
@@ -533,22 +533,23 @@
   int live_sreg;
 #endif
   std::set<uint32_t> catches;
-  int* opcode_count;    // Count Dalvik opcodes for tuning
+  int* opcode_count;    // Count Dalvik opcodes for tuning.
+  UniquePtr<Codegen> cg;
 };
 
 struct SwitchTable {
   int offset;
-  const uint16_t* table;            // Original dex table
-  int vaddr;                  // Dalvik offset of switch opcode
-  LIR* anchor;                // Reference instruction for relative offsets
-  LIR** targets;              // Array of case targets
+  const uint16_t* table;      // Original dex table.
+  int vaddr;                  // Dalvik offset of switch opcode.
+  LIR* anchor;                // Reference instruction for relative offsets.
+  LIR** targets;              // Array of case targets.
 };
 
 struct FillArrayData {
   int offset;
-  const uint16_t* table;           // Original dex table
+  const uint16_t* table;      // Original dex table.
   int size;
-  int vaddr;                 // Dalvik offset of FILL_ARRAY_DATA opcode
+  int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
 };
 
 #define MAX_PATTERN_LEN 5
@@ -583,24 +584,6 @@
   {{Instruction::RETURN_WIDE}, kIdentity},
 };
 
-BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
-
-void AppendMIR(BasicBlock* bb, MIR* mir);
-
-void PrependMIR(BasicBlock* bb, MIR* mir);
-
-void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
-
-void AppendLIR(CompilationUnit* cu, LIR* lir);
-
-void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
-
-void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
-
-MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-/* Debug Utilities */
-void DumpCompilationUnit(CompilationUnit* cu);
-
 }  // namespace art
 
 #endif // ART_SRC_COMPILER_COMPILER_IR_H_
diff --git a/src/compiler/compiler_utility.cc b/src/compiler/compiler_utility.cc
index 757aa7d..d8a5956 100644
--- a/src/compiler/compiler_utility.cc
+++ b/src/compiler/compiler_utility.cc
@@ -684,4 +684,114 @@
   return cu->dex_file->GetShorty(method_id.proto_idx_);
 }
 
+/* Allocate a new basic block */
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
+{
+  BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cu, sizeof(BasicBlock), true, kAllocBB));
+  bb->block_type = block_type;
+  bb->id = block_id;
+  bb->predecessors = static_cast<GrowableList*>
+      (NewMem(cu, sizeof(GrowableList), false, kAllocPredecessors));
+  CompilerInitGrowableList(cu, bb->predecessors,
+                      (block_type == kExitBlock) ? 2048 : 2,
+                      kListPredecessors);
+  cu->block_id_map.Put(block_id, block_id);
+  return bb;
+}
+
+/* Insert an MIR instruction to the end of a basic block */
+void AppendMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->last_mir_insn->next = mir;
+    mir->prev = bb->last_mir_insn;
+    mir->next = NULL;
+    bb->last_mir_insn = mir;
+  }
+}
+
+/* Insert an MIR instruction to the head of a basic block */
+void PrependMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->first_mir_insn->prev = mir;
+    mir->next = bb->first_mir_insn;
+    mir->prev = NULL;
+    bb->first_mir_insn = mir;
+  }
+}
+
+/* Insert a MIR instruction after the specified MIR */
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
+{
+  new_mir->prev = current_mir;
+  new_mir->next = current_mir->next;
+  current_mir->next = new_mir;
+
+  if (new_mir->next) {
+    /* Is not the last MIR in the block */
+    new_mir->next->prev = new_mir;
+  } else {
+    /* Is the last MIR in the block */
+    bb->last_mir_insn = new_mir;
+  }
+}
+
+/*
+ * Append an LIR instruction to the LIR list maintained by a compilation
+ * unit
+ */
+void AppendLIR(CompilationUnit *cu, LIR* lir)
+{
+  if (cu->first_lir_insn == NULL) {
+    DCHECK(cu->last_lir_insn == NULL);
+     cu->last_lir_insn = cu->first_lir_insn = lir;
+    lir->prev = lir->next = NULL;
+  } else {
+    cu->last_lir_insn->next = lir;
+    lir->prev = cu->last_lir_insn;
+    lir->next = NULL;
+    cu->last_lir_insn = lir;
+  }
+}
+
+/*
+ * Insert an LIR instruction before the current instruction, which cannot be the
+ * first instruction.
+ *
+ * prev_lir <-> new_lir <-> current_lir
+ */
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir)
+{
+  DCHECK(current_lir->prev != NULL);
+  LIR *prev_lir = current_lir->prev;
+
+  prev_lir->next = new_lir;
+  new_lir->prev = prev_lir;
+  new_lir->next = current_lir;
+  current_lir->prev = new_lir;
+}
+
+/*
+ * Insert an LIR instruction after the current instruction, which cannot be the
+ * first instruction.
+ *
+ * current_lir -> new_lir -> old_next
+ */
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir)
+{
+  new_lir->prev = current_lir;
+  new_lir->next = current_lir->next;
+  current_lir->next = new_lir;
+  new_lir->next->prev = new_lir;
+}
+
 }  // namespace art
diff --git a/src/compiler/compiler_utility.h b/src/compiler/compiler_utility.h
index 027204b..87ace78 100644
--- a/src/compiler/compiler_utility.h
+++ b/src/compiler/compiler_utility.h
@@ -19,15 +19,16 @@
 
 #include <stdint.h>
 #include <stddef.h>
+#include "compiler_enums.h"
 
 namespace art {
 
 struct CompilationUnit;
 
-/* Each arena page has some overhead, so take a few bytes off */
+// Each arena page has some overhead, so take a few bytes off.
 #define ARENA_DEFAULT_SIZE ((2 * 1024 * 1024) - 256)
 
-/* Type of allocation for memory tuning */
+// Type of allocation for memory tuning.
 enum oat_alloc_kind {
   kAllocMisc,
   kAllocBB,
@@ -45,7 +46,7 @@
   kNumAllocKinds
 };
 
-/* Type of growable list for memory tuning */
+// Type of growable list for memory tuning.
 enum oat_list_kind {
   kListMisc = 0,
   kListBlockList,
@@ -62,7 +63,7 @@
   kNumListKinds
 };
 
-/* Type of growable bitmap for memory tuning */
+// Type of growable bitmap for memory tuning.
 enum oat_bit_map_kind {
   kBitMapMisc = 0,
   kBitMapUse,
@@ -83,10 +84,10 @@
   kNumBitMapKinds
 };
 
-/* Allocate the initial memory block for arena-based allocation */
+// Allocate the initial memory block for arena-based allocation.
 bool HeapInit(CompilationUnit* cu);
 
-/* Collect memory usage statistics */
+// Uncomment to collect memory usage statistics.
 //#define WITH_MEMSTATS
 
 struct ArenaMemBlock {
@@ -120,20 +121,18 @@
 
 /*
  * Expanding bitmap, used for tracking resources.  Bits are numbered starting
- * from zero.
- *
- * All operations on a BitVector are unsynchronized.
+ * from zero.  All operations on a BitVector are unsynchronized.
  */
 struct ArenaBitVector {
-  bool       expandable;     /* expand bitmap if we run out? */
-  uint32_t   storage_size;    /* current size, in 32-bit words */
+  bool       expandable;   // expand bitmap if we run out?
+  uint32_t   storage_size; // current size, in 32-bit words.
   uint32_t*  storage;
 #ifdef WITH_MEMSTATS
-  oat_bit_map_kind kind;      /* for memory use tuning */
+  oat_bit_map_kind kind;   // for memory use tuning.
 #endif
 };
 
-/* Handy iterator to walk through the bit positions set to 1 */
+// Handy iterator to walk through the bit positions set to 1.
 struct ArenaBitVectorIterator {
   ArenaBitVector* p_bits;
   uint32_t idx;
@@ -144,27 +143,24 @@
 
 #define BLOCK_NAME_LEN 80
 
-/* Forward declarations */
+// Forward declarations
 struct BasicBlock;
 struct CompilationUnit;
 struct LIR;
 struct RegLocation;
+struct MIR;
+enum BBType;
 
 void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
-                         size_t init_length, oat_list_kind kind = kListMisc);
-void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list,
-                           uintptr_t elem);
+                              size_t init_length, oat_list_kind kind = kListMisc);
+void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list, uintptr_t elem);
 void DeleteGrowableList(GrowableList* g_list, uintptr_t elem);
-void GrowableListIteratorInit(GrowableList* g_list,
-                                 GrowableListIterator* iterator);
+void GrowableListIteratorInit(GrowableList* g_list, GrowableListIterator* iterator);
 uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator);
 uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx);
-
-ArenaBitVector* AllocBitVector(CompilationUnit* cu,
-                                  unsigned int start_bits, bool expandable,
-                                  oat_bit_map_kind = kBitMapMisc);
-void BitVectorIteratorInit(ArenaBitVector* p_bits,
-                              ArenaBitVectorIterator* iterator);
+ArenaBitVector* AllocBitVector(CompilationUnit* cu, unsigned int start_bits, bool expandable,
+                               oat_bit_map_kind = kBitMapMisc);
+void BitVectorIteratorInit(ArenaBitVector* p_bits, ArenaBitVectorIterator* iterator);
 int BitVectorIteratorNext(ArenaBitVectorIterator* iterator);
 bool SetBit(CompilationUnit *cu, ArenaBitVector* p_bits, unsigned int num);
 bool ClearBit(ArenaBitVector* p_bits, unsigned int num);
@@ -175,21 +171,26 @@
 void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits);
 void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src);
 bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
-                            const ArenaBitVector* src2);
-bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1,
-                        const ArenaBitVector* src2);
-bool CompareBitVectors(const ArenaBitVector* src1,
-                          const ArenaBitVector* src2);
+                         const ArenaBitVector* src2);
+bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2);
+bool CompareBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
 bool TestBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
 int CountSetBits(const ArenaBitVector* p_bits);
-
 void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr);
 void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-void DumpBlockBitVector(const GrowableList* blocks, char* msg,
-                           const ArenaBitVector* bv, int length);
+void DumpBlockBitVector(const GrowableList* blocks, char* msg, const ArenaBitVector* bv,
+                        int length);
 void GetBlockName(BasicBlock* bb, char* name);
 const char* GetShortyFromTargetIdx(CompilationUnit*, int);
 void DumpMemStats(CompilationUnit* cu);
+void DumpCompilationUnit(CompilationUnit* cu);
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
+void AppendMIR(BasicBlock* bb, MIR* mir);
+void PrependMIR(BasicBlock* bb, MIR* mir);
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
+void AppendLIR(CompilationUnit *cu, LIR* lir);
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
 
 }  // namespace art
 
diff --git a/src/compiler/dataflow.h b/src/compiler/dataflow.h
index 00e6487..28b9a32 100644
--- a/src/compiler/dataflow.h
+++ b/src/compiler/dataflow.h
@@ -33,17 +33,17 @@
   kSetsConst,
   kFormat35c,
   kFormat3rc,
-  kNullCheckSrc0,        // Null check of uses[0]
-  kNullCheckSrc1,        // Null check of uses[1]
-  kNullCheckSrc2,        // Null check of uses[2]
-  kNullCheckOut0,        // Null check out outgoing arg0
-  kDstNonNull,           // May assume dst is non-null
-  kRetNonNull,           // May assume retval is non-null
-  kNullTransferSrc0,     // Object copy src[0] -> dst
-  kNullTransferSrcN,     // Phi null check state transfer
-  kRangeCheckSrc1,       // Range check of uses[1]
-  kRangeCheckSrc2,       // Range check of uses[2]
-  kRangeCheckSrc3,       // Range check of uses[3]
+  kNullCheckSrc0,        // Null check of uses[0].
+  kNullCheckSrc1,        // Null check of uses[1].
+  kNullCheckSrc2,        // Null check of uses[2].
+  kNullCheckOut0,        // Null check out outgoing arg0.
+  kDstNonNull,           // May assume dst is non-null.
+  kRetNonNull,           // May assume retval is non-null.
+  kNullTransferSrc0,     // Object copy src[0] -> dst.
+  kNullTransferSrcN,     // Phi null check state transfer.
+  kRangeCheckSrc1,       // Range check of uses[1].
+  kRangeCheckSrc2,       // Range check of uses[2].
+  kRangeCheckSrc3,       // Range check of uses[3].
   kFPA,
   kFPB,
   kFPC,
@@ -53,7 +53,7 @@
   kRefA,
   kRefB,
   kRefC,
-  kUsesMethodStar,       // Implicit use of Method*
+  kUsesMethodStar,       // Implicit use of Method*.
 };
 
 #define DF_NOP                  0
@@ -147,8 +147,8 @@
 struct ArrayAccessInfo {
   int array_reg;
   int iv_reg;
-  int max_c;                   // For DIV - will affect upper bound checking
-  int min_c;                   // For DIV - will affect lower bound checking
+  int max_c;                   // For DIV - will affect upper bound checking.
+  int min_c;                   // For DIV - will affect lower bound checking.
 };
 
 struct LoopInfo {
@@ -166,7 +166,8 @@
 bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb);
 void CompilerInitializeSSAConversion(CompilationUnit* cu);
 bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb);
-void DataFlowAnalysisDispatcher(CompilationUnit* cu, bool (*func)(CompilationUnit*, BasicBlock*), DataFlowAnalysisMode dfa_mode, bool is_iterative);
+void DataFlowAnalysisDispatcher(CompilationUnit* cu, bool (*func)(CompilationUnit*, BasicBlock*),
+                                DataFlowAnalysisMode dfa_mode, bool is_iterative);
 MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
 void NullCheckElimination(CompilationUnit *cu);
 void BasicBlockCombine(CompilationUnit* cu);
diff --git a/src/compiler/frontend.cc b/src/compiler/frontend.cc
index c05130b..b8c9b8e 100644
--- a/src/compiler/frontend.cc
+++ b/src/compiler/frontend.cc
@@ -22,8 +22,8 @@
 #include "object.h"
 #include "runtime.h"
 #include "codegen/codegen_util.h"
-#include "codegen/method_bitcode.h"
-#include "codegen/method_codegen_driver.h"
+#include "codegen/mir_to_gbc.h"
+#include "codegen/mir_to_lir.h"
 
 #include <llvm/Support/Threading.h>
 
@@ -748,8 +748,21 @@
 }
 
 void CompilerInit(CompilationUnit* cu, const Compiler& compiler) {
-  if (!ArchInit()) {
-    LOG(FATAL) << "Failed to initialize oat";
+  bool success = false;
+  switch (compiler.GetInstructionSet()) {
+    case kThumb2:
+      success = InitArmCodegen(cu);
+      break;
+    case kMips:
+      success = InitMipsCodegen(cu);
+      break;
+    case kX86:
+      success = InitX86Codegen(cu);
+      break;
+    default:;
+  }
+  if (!success) {
+    LOG(FATAL) << "Failed to initialize codegen for " << compiler.GetInstructionSet();
   }
   if (!HeapInit(cu)) {
     LOG(FATAL) << "Failed to initialize oat heap";
@@ -1107,7 +1120,7 @@
     DumpCheckStats(cu.get());
   }
 
-  CompilerInitializeRegAlloc(cu.get());  // Needs to happen after SSA naming
+  cu.get()->cg->CompilerInitializeRegAlloc(cu.get());  // Needs to happen after SSA naming
 
   /* Allocate Registers using simple local allocation scheme */
   SimpleRegAlloc(cu.get());
diff --git a/src/compiler/frontend.h b/src/compiler/frontend.h
index f7e76f8..fb32cdd 100644
--- a/src/compiler/frontend.h
+++ b/src/compiler/frontend.h
@@ -49,7 +49,7 @@
  */
 #define MAX_ASSEMBLER_RETRIES 50
 
-/* Suppress optimization if corresponding bit set */
+// Suppress optimization if corresponding bit set.
 enum opt_control_vector {
   kLoadStoreElimination = 0,
   kLoadHoisting,
@@ -64,7 +64,7 @@
   kPromoteCompilerTemps,
 };
 
-/* Force code generation paths for testing */
+// Force code generation paths for testing.
 enum debugControlVector {
   kDebugDisplayMissingTargets,
   kDebugVerbose,
@@ -86,14 +86,14 @@
 };
 
 enum OatMethodAttributes {
-  kIsCallee = 0,      /* Code is part of a callee (invoked by a hot trace) */
-  kIsHot,             /* Code is part of a hot trace */
-  kIsLeaf,            /* Method is leaf */
-  kIsEmpty,           /* Method is empty */
-  kIsThrowFree,       /* Method doesn't throw */
-  kIsGetter,          /* Method fits the getter pattern */
-  kIsSetter,          /* Method fits the setter pattern */
-  kCannotCompile,     /* Method cannot be compiled */
+  kIsCallee = 0,      // Code is part of a callee (invoked by a hot trace).
+  kIsHot,             // Code is part of a hot trace.
+  kIsLeaf,            // Method is leaf.
+  kIsEmpty,           // Method is empty.
+  kIsThrowFree,       // Method doesn't throw.
+  kIsGetter,          // Method fits the getter pattern.
+  kIsSetter,          // Method fits the setter pattern.
+  kCannotCompile,     // Method cannot be compiled.
 };
 
 #define METHOD_IS_CALLEE        (1 << kIsCallee)
@@ -128,7 +128,7 @@
 
   private:
     UniquePtr<llvm::LLVMContext> llvm_context_;
-    llvm::Module* llvm_module_; // Managed by context_
+    llvm::Module* llvm_module_; // Managed by context_.
     UniquePtr<art::greenland::IntrinsicHelper> intrinsic_helper_;
     UniquePtr<art::greenland::IRBuilder> ir_builder_;
 };
diff --git a/src/compiler/intermediate_rep.cc b/src/compiler/intermediate_rep.cc
deleted file mode 100644
index c06693e..0000000
--- a/src/compiler/intermediate_rep.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_internals.h"
-
-namespace art {
-
-/* Allocate a new basic block */
-BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
-{
-  BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cu, sizeof(BasicBlock), true, kAllocBB));
-  bb->block_type = block_type;
-  bb->id = block_id;
-  bb->predecessors = static_cast<GrowableList*>
-      (NewMem(cu, sizeof(GrowableList), false, kAllocPredecessors));
-  CompilerInitGrowableList(cu, bb->predecessors,
-                      (block_type == kExitBlock) ? 2048 : 2,
-                      kListPredecessors);
-  cu->block_id_map.Put(block_id, block_id);
-  return bb;
-}
-
-/* Insert an MIR instruction to the end of a basic block */
-void AppendMIR(BasicBlock* bb, MIR* mir)
-{
-  if (bb->first_mir_insn == NULL) {
-    DCHECK(bb->last_mir_insn == NULL);
-    bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
-  } else {
-    bb->last_mir_insn->next = mir;
-    mir->prev = bb->last_mir_insn;
-    mir->next = NULL;
-    bb->last_mir_insn = mir;
-  }
-}
-
-/* Insert an MIR instruction to the head of a basic block */
-void PrependMIR(BasicBlock* bb, MIR* mir)
-{
-  if (bb->first_mir_insn == NULL) {
-    DCHECK(bb->last_mir_insn == NULL);
-    bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
-  } else {
-    bb->first_mir_insn->prev = mir;
-    mir->next = bb->first_mir_insn;
-    mir->prev = NULL;
-    bb->first_mir_insn = mir;
-  }
-}
-
-/* Insert a MIR instruction after the specified MIR */
-void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
-{
-  new_mir->prev = current_mir;
-  new_mir->next = current_mir->next;
-  current_mir->next = new_mir;
-
-  if (new_mir->next) {
-    /* Is not the last MIR in the block */
-    new_mir->next->prev = new_mir;
-  } else {
-    /* Is the last MIR in the block */
-    bb->last_mir_insn = new_mir;
-  }
-}
-
-/*
- * Append an LIR instruction to the LIR list maintained by a compilation
- * unit
- */
-void AppendLIR(CompilationUnit *cu, LIR* lir)
-{
-  if (cu->first_lir_insn == NULL) {
-    DCHECK(cu->last_lir_insn == NULL);
-     cu->last_lir_insn = cu->first_lir_insn = lir;
-    lir->prev = lir->next = NULL;
-  } else {
-    cu->last_lir_insn->next = lir;
-    lir->prev = cu->last_lir_insn;
-    lir->next = NULL;
-    cu->last_lir_insn = lir;
-  }
-}
-
-/*
- * Insert an LIR instruction before the current instruction, which cannot be the
- * first instruction.
- *
- * prev_lir <-> new_lir <-> current_lir
- */
-void InsertLIRBefore(LIR* current_lir, LIR* new_lir)
-{
-  DCHECK(current_lir->prev != NULL);
-  LIR *prev_lir = current_lir->prev;
-
-  prev_lir->next = new_lir;
-  new_lir->prev = prev_lir;
-  new_lir->next = current_lir;
-  current_lir->prev = new_lir;
-}
-
-/*
- * Insert an LIR instruction after the current instruction, which cannot be the
- * first instruction.
- *
- * current_lir -> new_lir -> old_next
- */
-void InsertLIRAfter(LIR* current_lir, LIR* new_lir)
-{
-  new_lir->prev = current_lir;
-  new_lir->next = current_lir->next;
-  current_lir->next = new_lir;
-  new_lir->next->prev = new_lir;
-}
-
-}  // namespace art
diff --git a/src/compiler/ralloc.cc b/src/compiler/ralloc.cc
index c2e663e..8813193 100644
--- a/src/compiler/ralloc.cc
+++ b/src/compiler/ralloc.cc
@@ -364,18 +364,19 @@
 
 static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
 
-static void DumpRegLocTable(RegLocation* table, int count)
+static void DumpRegLocTable(CompilationUnit* cu, RegLocation* table, int count)
 {
+  Codegen* cg = cu->cg.get();
   for (int i = 0; i < count; i++) {
     LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c%d %c%d S%d",
         table[i].orig_sreg, storage_name[table[i].location],
         table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
         table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
         table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
-        IsFpReg(table[i].low_reg) ? 's' : 'r',
-        table[i].low_reg & FpRegMask(),
-        IsFpReg(table[i].high_reg) ? 's' : 'r',
-        table[i].high_reg & FpRegMask(), table[i].s_reg_low);
+        cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
+        table[i].low_reg & cg->FpRegMask(),
+        cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
+        table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
   }
 }
 
@@ -515,7 +516,7 @@
 
   if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
     LOG(INFO) << "After Promotion";
-    DumpRegLocTable(cu->reg_location, cu->num_ssa_regs);
+    DumpRegLocTable(cu, cu->reg_location, cu->num_ssa_regs);
   }
 
   /* Set the frame size */
diff --git a/src/compiler_llvm/compiler_llvm.cc b/src/compiler_llvm/compiler_llvm.cc
index a0264fe..3ec5e68 100644
--- a/src/compiler_llvm/compiler_llvm.cc
+++ b/src/compiler_llvm/compiler_llvm.cc
@@ -283,10 +283,10 @@
   return result;
 }
 
-extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& compiler,
-                                                        bool is_static,
-                                                        const char* shorty,
-                                                        uint32_t shorty_len) {
+extern "C" art::CompiledInvokeStub* ArtCreateLLVMInvokeStub(art::Compiler& compiler,
+                                                            bool is_static,
+                                                            const char* shorty,
+                                                            uint32_t shorty_len) {
   art::compiler_llvm::CompilerLLVM* compiler_llvm = ContextOf(compiler);
   art::CompiledInvokeStub* result = compiler_llvm->CreateInvokeStub(is_static, shorty);
   return result;
diff --git a/src/oat/jni/arm/jni_internal_arm.cc b/src/oat/jni/arm/jni_internal_arm.cc
index 48d649d..36d436f 100644
--- a/src/oat/jni/arm/jni_internal_arm.cc
+++ b/src/oat/jni/arm/jni_internal_arm.cc
@@ -160,7 +160,7 @@
 }  // namespace arm
 }  // namespace art
 
-extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& /*compiler*/, bool is_static,
+extern "C" art::CompiledInvokeStub* ArtCreateArmInvokeStub(art::Compiler& /*compiler*/, bool is_static,
                                                         const char* shorty, uint32_t shorty_len) {
   return art::arm::CreateInvokeStub(is_static, shorty, shorty_len);
 }
diff --git a/src/oat/jni/mips/jni_internal_mips.cc b/src/oat/jni/mips/jni_internal_mips.cc
index dd66be9..4cfeaa9 100644
--- a/src/oat/jni/mips/jni_internal_mips.cc
+++ b/src/oat/jni/mips/jni_internal_mips.cc
@@ -171,7 +171,7 @@
 }  // namespace mips
 }  // namespace art
 
-extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& /*compiler*/, bool is_static,
+extern "C" art::CompiledInvokeStub* ArtCreateMipsInvokeStub(art::Compiler& /*compiler*/, bool is_static,
                                                         const char* shorty, uint32_t shorty_len) {
   return art::mips::CreateInvokeStub(is_static, shorty, shorty_len);
 }
diff --git a/src/oat/jni/x86/jni_internal_x86.cc b/src/oat/jni/x86/jni_internal_x86.cc
index ca4a6ab..9d41eda 100644
--- a/src/oat/jni/x86/jni_internal_x86.cc
+++ b/src/oat/jni/x86/jni_internal_x86.cc
@@ -165,7 +165,7 @@
 }  // namespace x86
 }  // namespace art
 
-extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& /*compiler*/, bool is_static,
+extern "C" art::CompiledInvokeStub* ArtCreateX86InvokeStub(art::Compiler& /*compiler*/, bool is_static,
                                                         const char* shorty, uint32_t shorty_len) {
   return art::x86::CreateInvokeStub(is_static, shorty, shorty_len);
 }