Quick compiler: allocate doubles as doubles

Significant refactoring of register handling to unify usage across
all targets & 32/64 backends.

Reworked RegStorage encoding to allow expanded use of
x86 xmm registers; removed vector registers as a separate
register type.  Reworked RegisterInfo to describe aliased
physical registers.  Eliminated quite a bit of target-specific code
and generalized common code.

Use of RegStorage instead of int for registers now propagated down
to the NewLIRx() level.  In future CLs, the NewLIRx() routines will
be replaced with versions that are explicit about what kind of
operand they expect (RegStorage, displacement, etc.).  The goal
is to eventually use RegStorage all the way to the assembly phase.

TBD: MIPS needs verification.
TBD: Re-enable liveness tracking.

Change-Id: I388c006d5fa9b3ea72db4e37a19ce257f2a15964
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index ba4b5c3..cbb2c30 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -27,6 +27,15 @@
   kAnyReg,
 };
 
+enum BitsUsed {
+  kSize32Bits,
+  kSize64Bits,
+  kSize128Bits,
+  kSize256Bits,
+  kSize512Bits,
+  kSize1024Bits,
+};
+
 enum SpecialTargetRegister {
   kSelf,            // Thread pointer.
   kSuspend,         // Used to reduce suspend checks for some targets.
@@ -56,17 +65,6 @@
   kLocInvalid
 };
 
-/**
- * Support for vector registers.  Initially used for x86 floats.  This will be used
- * to replace the assumption that a double takes up 2 single FP registers
- */
-enum VectorLengthType {
-  kVectorNotUsed = 0,   // This value is NOT in a vector register.
-  kVectorLength4,       // The value occupies 4 bytes in a vector register.
-  kVectorLength8,       // The value occupies 8 bytes in a vector register.
-  kVectorLength16       // The value occupies 16 bytes in a vector register (unused now).
-};
-
 enum BBType {
   kNullBlock,
   kEntryBlock,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index ed2ecac..1a9379a 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -40,13 +40,13 @@
 
 /* Default optimizer/debug setting for the compiler. */
 static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
-  (1 << kLoadStoreElimination) |
+  (1 << kLoadStoreElimination) |  // TODO: this pass has been broken for awhile - fix or delete.
   // (1 << kLoadHoisting) |
   // (1 << kSuppressLoads) |
   // (1 << kNullCheckElimination) |
   // (1 << kClassInitCheckElimination) |
   // (1 << kPromoteRegs) |
-  // (1 << kTrackLiveTemps) |
+  (1 << kTrackLiveTemps) |        // FIXME: disable until liveness issue fixed.
   // (1 << kSafeOptimizations) |
   // (1 << kBBOpt) |
   // (1 << kMatch) |
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index b926503..c728d84 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -368,13 +368,10 @@
   unsigned ref:1;       // Something GC cares about.
   unsigned high_word:1;  // High word of pair?
   unsigned home:1;      // Does this represent the home location?
-  VectorLengthType vec_len:3;  // TODO: remove.  Is this value in a vector register, and how big is it?
   RegStorage reg;       // Encoded physical registers.
   int16_t s_reg_low;    // SSA name for low Dalvik word.
   int16_t orig_sreg;    // TODO: remove after Bitcode gen complete
                         // and consolidate usage w/ s_reg_low.
-
-  bool IsVectorScalar() const { return vec_len == kVectorLength4 || vec_len == kVectorLength8;}
 };
 
 /*
@@ -400,8 +397,8 @@
 };
 
 
-const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
-                             RegStorage(RegStorage::kInvalid), INVALID_SREG, INVALID_SREG};
+const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG,
+                             INVALID_SREG};
 
 class MIRGraph {
  public:
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9f15cd4..5c1bdf4 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -239,7 +239,7 @@
 
 // FIXME - will probably need to revisit all uses of this, as type not defined.
 static const RegLocation temp_loc = {kLocCompilerTemp,
-                                     0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/, kVectorNotUsed,
+                                     0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
                                      RegStorage(), INVALID_SREG, INVALID_SREG};
 
 CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index c9acd66..e384f6b 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -93,29 +93,8 @@
  * +========================+
  */
 
-// Offset to distingish FP regs.
-#define ARM_FP_REG_OFFSET 32
-// Offset to distinguish DP FP regs.
-#define ARM_FP_DOUBLE 64
 // First FP callee save.
 #define ARM_FP_CALLEE_SAVE_BASE 16
-// Reg types.
-#define ARM_REGTYPE(x) (x & (ARM_FP_REG_OFFSET | ARM_FP_DOUBLE))
-#define ARM_FPREG(x) ((x & ARM_FP_REG_OFFSET) == ARM_FP_REG_OFFSET)
-#define ARM_LOWREG(x) ((x & 0x7) == x)
-#define ARM_DOUBLEREG(x) ((x & ARM_FP_DOUBLE) == ARM_FP_DOUBLE)
-#define ARM_SINGLEREG(x) (ARM_FPREG(x) && !ARM_DOUBLEREG(x))
-
-/*
- * Note: the low register of a floating point pair is sufficient to
- * create the name of a double, but require both names to be passed to
- * allow for asserts to verify that the pair is consecutive if significant
- * rework is done in this area.  Also, it is a good reminder in the calling
- * code that reg locations always describe doubles as a pair of singles.
- */
-#define ARM_S2D(x, y) ((x) | ARM_FP_DOUBLE)
-// Mask to strip off fp flags.
-#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
 
 enum ArmResourceEncodingPos {
   kArmGPReg0   = 0,
@@ -134,135 +113,197 @@
 #define ENCODE_ARM_REG_FPCS_LIST(N) (static_cast<uint64_t>(N) << kArmFPReg16)
 
 enum ArmNativeRegisterPool {
-  r0   = 0,
-  r1   = 1,
-  r2   = 2,
-  r3   = 3,
-  rARM_SUSPEND = 4,
-  r5   = 5,
-  r6   = 6,
-  r7   = 7,
-  r8   = 8,
-  rARM_SELF  = 9,
-  r10  = 10,
-  r11  = 11,
-  r12  = 12,
-  r13sp  = 13,
-  rARM_SP  = 13,
-  r14lr  = 14,
-  rARM_LR  = 14,
-  r15pc  = 15,
-  rARM_PC  = 15,
-  fr0  =  0 + ARM_FP_REG_OFFSET,
-  fr1  =  1 + ARM_FP_REG_OFFSET,
-  fr2  =  2 + ARM_FP_REG_OFFSET,
-  fr3  =  3 + ARM_FP_REG_OFFSET,
-  fr4  =  4 + ARM_FP_REG_OFFSET,
-  fr5  =  5 + ARM_FP_REG_OFFSET,
-  fr6  =  6 + ARM_FP_REG_OFFSET,
-  fr7  =  7 + ARM_FP_REG_OFFSET,
-  fr8  =  8 + ARM_FP_REG_OFFSET,
-  fr9  =  9 + ARM_FP_REG_OFFSET,
-  fr10 = 10 + ARM_FP_REG_OFFSET,
-  fr11 = 11 + ARM_FP_REG_OFFSET,
-  fr12 = 12 + ARM_FP_REG_OFFSET,
-  fr13 = 13 + ARM_FP_REG_OFFSET,
-  fr14 = 14 + ARM_FP_REG_OFFSET,
-  fr15 = 15 + ARM_FP_REG_OFFSET,
-  fr16 = 16 + ARM_FP_REG_OFFSET,
-  fr17 = 17 + ARM_FP_REG_OFFSET,
-  fr18 = 18 + ARM_FP_REG_OFFSET,
-  fr19 = 19 + ARM_FP_REG_OFFSET,
-  fr20 = 20 + ARM_FP_REG_OFFSET,
-  fr21 = 21 + ARM_FP_REG_OFFSET,
-  fr22 = 22 + ARM_FP_REG_OFFSET,
-  fr23 = 23 + ARM_FP_REG_OFFSET,
-  fr24 = 24 + ARM_FP_REG_OFFSET,
-  fr25 = 25 + ARM_FP_REG_OFFSET,
-  fr26 = 26 + ARM_FP_REG_OFFSET,
-  fr27 = 27 + ARM_FP_REG_OFFSET,
-  fr28 = 28 + ARM_FP_REG_OFFSET,
-  fr29 = 29 + ARM_FP_REG_OFFSET,
-  fr30 = 30 + ARM_FP_REG_OFFSET,
-  fr31 = 31 + ARM_FP_REG_OFFSET,
-  dr0 = fr0 + ARM_FP_DOUBLE,
-  dr1 = fr2 + ARM_FP_DOUBLE,
-  dr2 = fr4 + ARM_FP_DOUBLE,
-  dr3 = fr6 + ARM_FP_DOUBLE,
-  dr4 = fr8 + ARM_FP_DOUBLE,
-  dr5 = fr10 + ARM_FP_DOUBLE,
-  dr6 = fr12 + ARM_FP_DOUBLE,
-  dr7 = fr14 + ARM_FP_DOUBLE,
-  dr8 = fr16 + ARM_FP_DOUBLE,
-  dr9 = fr18 + ARM_FP_DOUBLE,
-  dr10 = fr20 + ARM_FP_DOUBLE,
-  dr11 = fr22 + ARM_FP_DOUBLE,
-  dr12 = fr24 + ARM_FP_DOUBLE,
-  dr13 = fr26 + ARM_FP_DOUBLE,
-  dr14 = fr28 + ARM_FP_DOUBLE,
-  dr15 = fr30 + ARM_FP_DOUBLE,
+  r0           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
+  r1           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
+  r2           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
+  r3           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
+  rARM_SUSPEND = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
+  r5           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
+  r6           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
+  r7           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
+  r8           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
+  rARM_SELF    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
+  r10          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  r11          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  r12          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  r13sp        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  rARM_SP      = r13sp,
+  r14lr        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  rARM_LR      = r14lr,
+  r15pc        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  rARM_PC      = r15pc,
+
+  fr0          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
+  fr1          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
+  fr2          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  2,
+  fr3          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  3,
+  fr4          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  4,
+  fr5          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  5,
+  fr6          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  6,
+  fr7          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  7,
+  fr8          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  8,
+  fr9          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  9,
+  fr10         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+  fr11         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+  fr12         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+  fr13         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+  fr14         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+  fr15         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+  fr16         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+  fr17         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+  fr18         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+  fr19         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+  fr20         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+  fr21         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+  fr22         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+  fr23         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+  fr24         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+  fr25         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+  fr26         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+  fr27         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+  fr28         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+  fr29         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+  fr30         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+  fr31         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
+  dr0          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
+  dr1          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
+  dr2          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
+  dr3          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
+  dr4          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
+  dr5          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
+  dr6          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
+  dr7          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
+  dr8          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
+  dr9          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
+  dr10         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+  dr11         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+  dr12         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+  dr13         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+  dr14         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+  dr15         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+#if 0
+  // Enable when def/use and runtime able to handle these.
+  dr16         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+  dr17         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+  dr18         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+  dr19         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+  dr20         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+  dr21         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+  dr22         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+  dr23         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+  dr24         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+  dr25         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+  dr26         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+  dr27         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+  dr28         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+  dr29         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+  dr30         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+  dr31         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
+#endif
 };
 
-// TODO: clean this up; reduce use of or eliminate macros
+constexpr RegStorage rs_r0(RegStorage::kValid | r0);
+constexpr RegStorage rs_r1(RegStorage::kValid | r1);
+constexpr RegStorage rs_r2(RegStorage::kValid | r2);
+constexpr RegStorage rs_r3(RegStorage::kValid | r3);
+constexpr RegStorage rs_rARM_SUSPEND(RegStorage::kValid | rARM_SUSPEND);
+constexpr RegStorage rs_r5(RegStorage::kValid | r5);
+constexpr RegStorage rs_r6(RegStorage::kValid | r6);
+constexpr RegStorage rs_r7(RegStorage::kValid | r7);
+constexpr RegStorage rs_r8(RegStorage::kValid | r8);
+constexpr RegStorage rs_rARM_SELF(RegStorage::kValid | rARM_SELF);
+constexpr RegStorage rs_r10(RegStorage::kValid | r10);
+constexpr RegStorage rs_r11(RegStorage::kValid | r11);
+constexpr RegStorage rs_r12(RegStorage::kValid | r12);
+constexpr RegStorage rs_r13sp(RegStorage::kValid | r13sp);
+constexpr RegStorage rs_rARM_SP(RegStorage::kValid | rARM_SP);
+constexpr RegStorage rs_r14lr(RegStorage::kValid | r14lr);
+constexpr RegStorage rs_rARM_LR(RegStorage::kValid | rARM_LR);
+constexpr RegStorage rs_r15pc(RegStorage::kValid | r15pc);
+constexpr RegStorage rs_rARM_PC(RegStorage::kValid | rARM_PC);
+constexpr RegStorage rs_invalid(RegStorage::kInvalid);
 
-const RegStorage rs_r0(RegStorage::k32BitSolo, r0);
-const RegStorage rs_r1(RegStorage::k32BitSolo, r1);
-const RegStorage rs_r2(RegStorage::k32BitSolo, r2);
-const RegStorage rs_r3(RegStorage::k32BitSolo, r3);
-const RegStorage rs_rARM_SUSPEND(RegStorage::k32BitSolo, rARM_SUSPEND);
-const RegStorage rs_r5(RegStorage::k32BitSolo, r5);
-const RegStorage rs_r6(RegStorage::k32BitSolo, r6);
-const RegStorage rs_r7(RegStorage::k32BitSolo, r7);
-const RegStorage rs_r8(RegStorage::k32BitSolo, r8);
-const RegStorage rs_rARM_SELF(RegStorage::k32BitSolo, rARM_SELF);
-const RegStorage rs_r10(RegStorage::k32BitSolo, r10);
-const RegStorage rs_r11(RegStorage::k32BitSolo, r11);
-const RegStorage rs_r12(RegStorage::k32BitSolo, r12);
-const RegStorage rs_r13sp(RegStorage::k32BitSolo, r13sp);
-const RegStorage rs_rARM_SP(RegStorage::k32BitSolo, rARM_SP);
-const RegStorage rs_r14lr(RegStorage::k32BitSolo, r14lr);
-const RegStorage rs_rARM_LR(RegStorage::k32BitSolo, rARM_LR);
-const RegStorage rs_r15pc(RegStorage::k32BitSolo, r15pc);
-const RegStorage rs_rARM_PC(RegStorage::k32BitSolo, rARM_PC);
-const RegStorage rs_invalid(RegStorage::kInvalid);
+constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
+constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
+constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
+constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
+constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
+constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
+constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
+constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
+constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
+constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
+constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
+constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
+constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
+constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
+constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
+constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
+constexpr RegStorage rs_fr16(RegStorage::kValid | fr16);
+constexpr RegStorage rs_fr17(RegStorage::kValid | fr17);
+constexpr RegStorage rs_fr18(RegStorage::kValid | fr18);
+constexpr RegStorage rs_fr19(RegStorage::kValid | fr19);
+constexpr RegStorage rs_fr20(RegStorage::kValid | fr20);
+constexpr RegStorage rs_fr21(RegStorage::kValid | fr21);
+constexpr RegStorage rs_fr22(RegStorage::kValid | fr22);
+constexpr RegStorage rs_fr23(RegStorage::kValid | fr23);
+constexpr RegStorage rs_fr24(RegStorage::kValid | fr24);
+constexpr RegStorage rs_fr25(RegStorage::kValid | fr25);
+constexpr RegStorage rs_fr26(RegStorage::kValid | fr26);
+constexpr RegStorage rs_fr27(RegStorage::kValid | fr27);
+constexpr RegStorage rs_fr28(RegStorage::kValid | fr28);
+constexpr RegStorage rs_fr29(RegStorage::kValid | fr29);
+constexpr RegStorage rs_fr30(RegStorage::kValid | fr30);
+constexpr RegStorage rs_fr31(RegStorage::kValid | fr31);
 
-// Target-independent aliases.
-#define rARM_ARG0 r0
-#define rs_rARM_ARG0 rs_r0
-#define rARM_ARG1 r1
-#define rs_rARM_ARG1 rs_r1
-#define rARM_ARG2 r2
-#define rs_rARM_ARG2 rs_r2
-#define rARM_ARG3 r3
-#define rs_rARM_ARG3 rs_r3
-#define rARM_FARG0 r0
-#define rs_ARM_FARG0 rs_r0
-#define rARM_FARG1 r1
-#define rs_rARM_FARG1 rs_r1
-#define rARM_FARG2 r2
-#define rs_rARM_FARG2 rs_r2
-#define rARM_FARG3 r3
-#define rs_rARM_FARG3 rs_r3
-#define rARM_RET0 r0
-#define rs_rARM_RET0 rs_r0
-#define rARM_RET1 r1
-#define rs_rARM_RET1 rs_r1
-#define rARM_INVOKE_TGT rARM_LR
-#define rs_rARM_INVOKE_TGT rs_rARM_LR
-#define rARM_COUNT RegStorage::kInvalidRegVal
+constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
+constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
+constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
+constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
+constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
+constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
+constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
+constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
+constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
+constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
+constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
+constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
+constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
+constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
+constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
+constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
+#if 0
+constexpr RegStorage rs_dr16(RegStorage::kValid | dr16);
+constexpr RegStorage rs_dr17(RegStorage::kValid | dr17);
+constexpr RegStorage rs_dr18(RegStorage::kValid | dr18);
+constexpr RegStorage rs_dr19(RegStorage::kValid | dr19);
+constexpr RegStorage rs_dr20(RegStorage::kValid | dr20);
+constexpr RegStorage rs_dr21(RegStorage::kValid | dr21);
+constexpr RegStorage rs_dr22(RegStorage::kValid | dr22);
+constexpr RegStorage rs_dr23(RegStorage::kValid | dr23);
+constexpr RegStorage rs_dr24(RegStorage::kValid | dr24);
+constexpr RegStorage rs_dr25(RegStorage::kValid | dr25);
+constexpr RegStorage rs_dr26(RegStorage::kValid | dr26);
+constexpr RegStorage rs_dr27(RegStorage::kValid | dr27);
+constexpr RegStorage rs_dr28(RegStorage::kValid | dr28);
+constexpr RegStorage rs_dr29(RegStorage::kValid | dr29);
+constexpr RegStorage rs_dr30(RegStorage::kValid | dr30);
+constexpr RegStorage rs_dr31(RegStorage::kValid | dr31);
+#endif
 
 // RegisterLocation templates return values (r0, or r0/r1).
 const RegLocation arm_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
 const RegLocation arm_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
 const RegLocation arm_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
 const RegLocation arm_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
 
 enum ArmShiftEncodings {
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index f77b0a6..cac766d 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1137,24 +1137,25 @@
                 bits |= value;
                 break;
               case kFmtDfp: {
-                DCHECK(ARM_DOUBLEREG(operand));
-                DCHECK_EQ((operand & 0x1), 0U);
-                uint32_t reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+                DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
+                uint32_t reg_num = RegStorage::RegNum(operand);
                 /* Snag the 1-bit slice and position it */
-                value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
+                value = ((reg_num & 0x10) >> 4) << encoder->field_loc[i].end;
                 /* Extract and position the 4-bit slice */
-                value |= (reg_name & 0x0f) << encoder->field_loc[i].start;
+                value |= (reg_num & 0x0f) << encoder->field_loc[i].start;
                 bits |= value;
                 break;
               }
-              case kFmtSfp:
-                DCHECK(ARM_SINGLEREG(operand));
+              case kFmtSfp: {
+                DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
+                uint32_t reg_num = RegStorage::RegNum(operand);
                 /* Snag the 1-bit slice and position it */
-                value = (operand & 0x1) << encoder->field_loc[i].end;
+                value = (reg_num & 0x1) << encoder->field_loc[i].end;
                 /* Extract and position the 4-bit slice */
-                value |= ((operand & 0x1e) >> 1) << encoder->field_loc[i].start;
+                value |= ((reg_num & 0x1e) >> 1) << encoder->field_loc[i].start;
                 bits |= value;
                 break;
+              }
               case kFmtImm12:
               case kFmtModImm:
                 value = ((operand & 0x800) >> 11) << 26;
@@ -1217,8 +1218,8 @@
   AssignDataOffsets();
 
   /*
-   * Note: generation must be 1 on first pass (to distinguish from initialized state of 0 for non-visited nodes).
-   * Start at zero here, and bit will be flipped to 1 on entry to the loop.
+   * Note: generation must be 1 on first pass (to distinguish from initialized state of 0 for
+   * non-visited nodes).  Start at zero here, and bit will be flipped to 1 on entry to the loop.
    */
   int generation = 0;
   while (true) {
@@ -1244,7 +1245,7 @@
         case kFixupNone:
           break;
         case kFixupVLoad:
-          if (lir->operands[1] != r15pc) {
+          if (lir->operands[1] != rs_r15pc.GetReg()) {
             break;
           }
           // NOTE: intentional fallthrough.
@@ -1285,7 +1286,8 @@
              * happens.
              */
             int base_reg = ((lir->opcode == kThumb2LdrdPcRel8) ||
-                            (lir->opcode == kThumb2LdrPcRel12)) ?  lir->operands[0] : rARM_LR;
+                            (lir->opcode == kThumb2LdrPcRel12)) ?  lir->operands[0] :
+                            rs_rARM_LR.GetReg();
 
             // Add new Adr to generate the address.
             LIR* new_adr = RawLIR(lir->dalvik_offset, kThumb2Adr,
@@ -1500,7 +1502,8 @@
           EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
           LIR* target = lir->target;
           int32_t target_disp = (tab_rec != NULL) ?  tab_rec->offset + offset_adjustment
-              : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
+              : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
+              offset_adjustment);
           int32_t disp = target_disp - ((lir->offset + 4) & ~3);
           if (disp < 4096) {
             lir->operands[1] = disp;
@@ -1533,12 +1536,12 @@
             prev_lir = new_mov16H;  // Now we've got a new prev.
 
             offset_adjustment -= lir->flags.size;
-            if (ARM_LOWREG(lir->operands[0])) {
+            if (RegStorage::RegNum(lir->operands[0]) < 8) {
               lir->opcode = kThumbAddRRLH;
             } else {
               lir->opcode = kThumbAddRRHH;
             }
-            lir->operands[1] = rARM_PC;
+            lir->operands[1] = rs_rARM_PC.GetReg();
             lir->flags.size = EncodingMap[lir->opcode].size;
             offset_adjustment += lir->flags.size;
             // Must stay in fixup list and have offset updated; will be used by LST/HSP pair.
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 9cb56cf..163c0fe 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -54,8 +54,7 @@
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint32_t size = table[1];
-  tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
-                                                     kArenaAllocLIR));
+  tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -78,7 +77,7 @@
   // Establish loop branch target
   LIR* target = NewLIR0(kPseudoTargetLabel);
   // Load next key/disp
-  NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetReg()) | (1 << r_disp.GetReg()));
+  NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetRegNum()) | (1 << r_disp.GetRegNum()));
   OpRegReg(kOpCmp, r_key, rl_src.reg);
   // Go if match. NOTE: No instruction set switch here - must stay Thumb2
   LIR* it = OpIT(kCondEq, "");
@@ -168,7 +167,7 @@
   LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData).Int32Value(),
                rs_rARM_LR);
   // Materialize a pointer to the fill data image
-  NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
+  NewLIR3(kThumb2Adr, rs_r1.GetReg(), 0, WrapPointer(tab_rec));
   ClobberCallerSave();
   LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
   MarkSafepointPC(call_inst);
@@ -195,10 +194,12 @@
       }
     }
     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-    NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
+    NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
+        mirror::Object::MonitorOffset().Int32Value() >> 2);
     MarkPossibleNullPointerException(opt_flags);
     LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
-    NewLIR4(kThumb2Strex, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
+    NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
+        mirror::Object::MonitorOffset().Int32Value() >> 2);
     LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
 
 
@@ -221,16 +222,19 @@
     // Explicit null-check as slow-path is entered using an IT.
     GenNullCheck(rs_r0, opt_flags);
     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-    NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
+    NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
+        mirror::Object::MonitorOffset().Int32Value() >> 2);
     MarkPossibleNullPointerException(opt_flags);
     OpRegImm(kOpCmp, rs_r1, 0);
     LIR* it = OpIT(kCondEq, "");
-    NewLIR4(kThumb2Strex/*eq*/, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
+    NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
+        mirror::Object::MonitorOffset().Int32Value() >> 2);
     OpEndIT(it);
     OpRegImm(kOpCmp, rs_r1, 0);
     it = OpIT(kCondNe, "T");
     // Go expensive route - artLockObjectFromCode(self, obj);
-    LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), rs_rARM_LR);
+    LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(),
+                       rs_rARM_LR);
     ClobberCallerSave();
     LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
     OpEndIT(it);
@@ -339,10 +343,10 @@
    * expanding the frame or flushing.  This leaves the utility
    * code with a single temp: r12.  This should be enough.
    */
-  LockTemp(r0);
-  LockTemp(r1);
-  LockTemp(r2);
-  LockTemp(r3);
+  LockTemp(rs_r0);
+  LockTemp(rs_r1);
+  LockTemp(rs_r2);
+  LockTemp(rs_r3);
 
   /*
    * We can safely skip the stack overflow check if we're
@@ -433,10 +437,10 @@
 
   FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(r0);
-  FreeTemp(r1);
-  FreeTemp(r2);
-  FreeTemp(r3);
+  FreeTemp(rs_r0);
+  FreeTemp(rs_r1);
+  FreeTemp(rs_r2);
+  FreeTemp(rs_r3);
 }
 
 void ArmMir2Lir::GenExitSequence() {
@@ -445,8 +449,8 @@
    * In the exit path, r0/r1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(r0);
-  LockTemp(r1);
+  LockTemp(rs_r0);
+  LockTemp(rs_r1);
 
   NewLIR0(kPseudoMethodExit);
   OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4));
@@ -454,20 +458,20 @@
   if (num_fp_spills_) {
     NewLIR1(kThumb2VPopCS, num_fp_spills_);
   }
-  if (core_spill_mask_ & (1 << rARM_LR)) {
+  if (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) {
     /* Unspill rARM_LR to rARM_PC */
-    core_spill_mask_ &= ~(1 << rARM_LR);
-    core_spill_mask_ |= (1 << rARM_PC);
+    core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum());
+    core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum());
   }
   NewLIR1(kThumb2Pop, core_spill_mask_);
-  if (!(core_spill_mask_ & (1 << rARM_PC))) {
+  if (!(core_spill_mask_ & (1 << rs_rARM_PC.GetRegNum()))) {
     /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
-    NewLIR1(kThumbBx, rARM_LR);
+    NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
   }
 }
 
 void ArmMir2Lir::GenSpecialExitSequence() {
-  NewLIR1(kThumbBx, rARM_LR);
+  NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 646859c..9d1723a 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -38,7 +38,7 @@
     LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                          OpSize size);
     LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+                             RegStorage r_dest, OpSize size, int s_reg);
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
@@ -46,16 +46,12 @@
     LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                           OpSize size);
     LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+                              RegStorage r_src, OpSize size, int s_reg);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
-    bool IsFpReg(int reg);
-    bool IsFpReg(RegStorage reg);
-    bool SameRegType(int reg1, int reg2);
     RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
     RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
-    int S2d(int low_reg, int high_reg);
     RegStorage TargetReg(SpecialTargetRegister reg);
     RegStorage GetArgMappingToPhysicalReg(int arg_num);
     RegLocation GetReturnAlt();
@@ -64,17 +60,16 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint32_t FpRegMask();
-    uint64_t GetRegMaskCommon(int reg);
+    uint64_t GetRegMaskCommon(RegStorage reg);
     void AdjustSpillMask();
     void ClobberCallerSave();
-    void FlushReg(RegStorage reg);
-    void FlushRegWide(RegStorage reg);
     void FreeCallTemps();
     void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
     void LockCallTemps();
-    void MarkPreservedSingle(int v_reg, int reg);
+    void MarkPreservedSingle(int v_reg, RegStorage reg);
+    void MarkPreservedDouble(int v_reg, RegStorage reg);
     void CompilerInitializeRegAlloc();
+    RegStorage AllocPreservedDouble(int s_reg);
 
     // Required for target - miscellaneous.
     void AssembleLIR();
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index d72f596..bb02f74 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -111,13 +111,11 @@
   rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
-          S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
+  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
-void ArmMir2Lir::GenConversion(Instruction::Code opcode,
-                               RegLocation rl_dest, RegLocation rl_src) {
+void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) {
   int op = kThumbBkpt;
   int src_reg;
   RegLocation rl_result;
@@ -143,19 +141,16 @@
       break;
     case Instruction::LONG_TO_DOUBLE: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
+      RegStorage src_low = rl_src.reg.DoubleToLowSingle();
+      RegStorage src_high = rl_src.reg.DoubleToHighSingle();
       rl_result = EvalLoc(rl_dest, kFPReg, true);
-      // TODO: fix AllocTempDouble to return a k64BitSolo double reg and lose the ARM_FP_DOUBLE.
       RegStorage tmp1 = AllocTempDouble();
       RegStorage tmp2 = AllocTempDouble();
 
-      // FIXME: needs 64-bit register cleanup.
-      NewLIR2(kThumb2VcvtF64S32, tmp1.GetLowReg() | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
-      NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-              (src_reg & ~ARM_FP_DOUBLE));
+      NewLIR2(kThumb2VcvtF64S32, tmp1.GetReg(), src_high.GetReg());
+      NewLIR2(kThumb2VcvtF64U32, rl_result.reg.GetReg(), src_low.GetReg());
       LoadConstantWide(tmp2, 0x41f0000000000000LL);
-      NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-              tmp1.GetLowReg() | ARM_FP_DOUBLE, tmp2.GetLowReg() | ARM_FP_DOUBLE);
+      NewLIR3(kThumb2VmlaF64, rl_result.reg.GetReg(), tmp1.GetReg(), tmp2.GetReg());
       FreeTemp(tmp1);
       FreeTemp(tmp2);
       StoreValueWide(rl_dest, rl_result);
@@ -166,23 +161,20 @@
       return;
     case Instruction::LONG_TO_FLOAT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
+      RegStorage src_low = rl_src.reg.DoubleToLowSingle();
+      RegStorage src_high = rl_src.reg.DoubleToHighSingle();
       rl_result = EvalLoc(rl_dest, kFPReg, true);
       // Allocate temp registers.
       RegStorage high_val = AllocTempDouble();
       RegStorage low_val = AllocTempDouble();
       RegStorage const_val = AllocTempDouble();
       // Long to double.
-      NewLIR2(kThumb2VcvtF64S32, high_val.GetLowReg() | ARM_FP_DOUBLE,
-              (src_reg & ~ARM_FP_DOUBLE) + 1);
-      NewLIR2(kThumb2VcvtF64U32, low_val.GetLowReg() | ARM_FP_DOUBLE,
-              (src_reg & ~ARM_FP_DOUBLE));
+      NewLIR2(kThumb2VcvtF64S32, high_val.GetReg(), src_high.GetReg());
+      NewLIR2(kThumb2VcvtF64U32, low_val.GetReg(), src_low.GetReg());
       LoadConstantWide(const_val, INT64_C(0x41f0000000000000));
-      NewLIR3(kThumb2VmlaF64, low_val.GetLowReg() | ARM_FP_DOUBLE,
-              high_val.GetLowReg() | ARM_FP_DOUBLE,
-              const_val.GetLowReg() | ARM_FP_DOUBLE);
+      NewLIR3(kThumb2VmlaF64, low_val.GetReg(), high_val.GetReg(), const_val.GetReg());
       // Double to float.
-      NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val.GetLowReg() | ARM_FP_DOUBLE);
+      NewLIR2(kThumb2VcvtDF, rl_result.reg.GetReg(), low_val.GetReg());
       // Free temp registers.
       FreeTemp(high_val);
       FreeTemp(low_val);
@@ -199,14 +191,14 @@
   }
   if (rl_src.wide) {
     rl_src = LoadValueWide(rl_src, kFPReg);
-    src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
+    src_reg = rl_src.reg.GetReg();
   } else {
     rl_src = LoadValue(rl_src, kFPReg);
     src_reg = rl_src.reg.GetReg();
   }
   if (rl_dest.wide) {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
+    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
     StoreValueWide(rl_dest, rl_result);
   } else {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
@@ -225,8 +217,7 @@
     rl_src2 = mir_graph_->GetSrcWide(mir, 2);
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
+    NewLIR2(kThumb2Vcmpd, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
     rl_src1 = mir_graph_->GetSrc(mir, 0);
     rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -300,8 +291,7 @@
     ClobberSReg(rl_dest.s_reg_low);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
     LoadConstant(rl_result.reg, default_result);
-    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
+    NewLIR2(kThumb2Vcmpd, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
     rl_src1 = LoadValue(rl_src1, kFPReg);
     rl_src2 = LoadValue(rl_src2, kFPReg);
@@ -311,7 +301,7 @@
     LoadConstant(rl_result.reg, default_result);
     NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   }
-  DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
+  DCHECK(!rl_result.reg.IsFloat());
   NewLIR0(kThumb2Fmstat);
 
   LIR* it = OpIT((default_result == -1) ? kCondGt : kCondMi, "");
@@ -338,8 +328,7 @@
   RegLocation rl_result;
   rl_src = LoadValueWide(rl_src, kFPReg);
   rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+  NewLIR2(kThumb2Vnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -350,19 +339,16 @@
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  // TODO: shouldn't need S2d once 64bitSolo has proper double tag bit.
-  NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
-  NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()));
+  NewLIR2(kThumb2Vsqrtd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(kThumb2Vcmpd, rl_result.reg.GetReg(), rl_result.reg.GetReg());
   NewLIR0(kThumb2Fmstat);
   branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
   ClobberCallerSave();
   LockCallTemps();  // Using fixed registers
   RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pSqrt));
-  NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+  NewLIR3(kThumb2Fmrrd, rs_r0.GetReg(), rs_r1.GetReg(), rl_src.reg.GetReg());
   NewLIR1(kThumbBlxR, r_tgt.GetReg());
-  NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), r0, r1);
+  NewLIR3(kThumb2Fmdrr, rl_result.reg.GetReg(), rs_r0.GetReg(), rs_r1.GetReg());
   branch->target = NewLIR0(kPseudoTargetLabel);
   StoreValueWide(rl_dest, rl_result);
   return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index a2d6373..8391c03 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -322,7 +322,7 @@
    */
   bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
   skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
-  if (!skip && (ARM_LOWREG(reg.GetReg())) && (check_value == 0) &&
+  if (!skip && reg.Low8() && (check_value == 0) &&
      ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
     branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
                      reg.GetReg(), 0);
@@ -344,13 +344,13 @@
   if (r_src.IsPair()) {
     r_src = r_src.GetLow();
   }
-  if (ARM_FPREG(r_dest.GetReg()) || ARM_FPREG(r_src.GetReg()))
+  if (r_dest.IsFloat() || r_src.IsFloat())
     return OpFpRegCopy(r_dest, r_src);
-  if (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src.GetReg()))
+  if (r_dest.Low8() && r_src.Low8())
     opcode = kThumbMovRR;
-  else if (!ARM_LOWREG(r_dest.GetReg()) && !ARM_LOWREG(r_src.GetReg()))
+  else if (!r_dest.Low8() && !r_src.Low8())
      opcode = kThumbMovRR_H2H;
-  else if (ARM_LOWREG(r_dest.GetReg()))
+  else if (r_dest.Low8())
      opcode = kThumbMovRR_H2L;
   else
      opcode = kThumbMovRR_L2H;
@@ -370,21 +370,19 @@
 
 void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
   if (r_dest != r_src) {
-    bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
-    bool src_fp = ARM_FPREG(r_src.GetLowReg());
+    bool dest_fp = r_dest.IsFloat();
+    bool src_fp = r_src.IsFloat();
+    DCHECK(r_dest.Is64Bit());
+    DCHECK(r_src.Is64Bit());
     if (dest_fp) {
       if (src_fp) {
-        // FIXME: handle 64-bit solo's here.
-        OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
-                  RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
+        OpRegCopy(r_dest, r_src);
       } else {
-        NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
-                r_src.GetLowReg(), r_src.GetHighReg());
+        NewLIR3(kThumb2Fmdrr, r_dest.GetReg(), r_src.GetLowReg(), r_src.GetHighReg());
       }
     } else {
       if (src_fp) {
-        NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
-                r_src.GetHighReg()));
+        NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_src.GetReg());
       } else {
         // Handle overlap
         if (r_src.GetHighReg() == r_dest.GetLowReg()) {
@@ -747,16 +745,18 @@
   // around the potentially locked temp by using LR for r_ptr, unconditionally.
   // TODO: Pass information about the need for more temps to the stack frame generation
   // code so that we can rely on being able to allocate enough temps.
-  DCHECK(!reg_pool_->core_regs[rARM_LR].is_temp);
-  MarkTemp(rARM_LR);
-  FreeTemp(rARM_LR);
-  LockTemp(rARM_LR);
+  DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
+  MarkTemp(rs_rARM_LR);
+  FreeTemp(rs_rARM_LR);
+  LockTemp(rs_rARM_LR);
   bool load_early = true;
   if (is_long) {
-    int expected_reg = is_long ? rl_src_expected.reg.GetLowReg() : rl_src_expected.reg.GetReg();
-    int new_val_reg = is_long ? rl_src_new_value.reg.GetLowReg() : rl_src_new_value.reg.GetReg();
-    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !IsFpReg(expected_reg);
-    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !IsFpReg(new_val_reg);
+    RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
+        rl_src_expected.reg;
+    RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
+        rl_src_new_value.reg;
+    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
+    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
     bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
     bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
 
@@ -802,9 +802,9 @@
 
   // Free now unneeded rl_object and rl_offset to give more temps.
   ClobberSReg(rl_object.s_reg_low);
-  FreeTemp(rl_object.reg.GetReg());
+  FreeTemp(rl_object.reg);
   ClobberSReg(rl_offset.s_reg_low);
-  FreeTemp(rl_offset.reg.GetReg());
+  FreeTemp(rl_offset.reg);
 
   RegLocation rl_expected;
   if (!is_long) {
@@ -813,9 +813,9 @@
     rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
   } else {
     // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
-    int low_reg = AllocTemp().GetReg();
-    int high_reg = AllocTemp().GetReg();
-    rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+    RegStorage low_reg = AllocTemp();
+    RegStorage high_reg = AllocTemp();
+    rl_new_value.reg = RegStorage::MakeRegPair(low_reg, high_reg);
     rl_expected = rl_new_value;
   }
 
@@ -840,7 +840,7 @@
       LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
     }
     // Make sure we use ORR that sets the ccode
-    if (ARM_LOWREG(r_tmp.GetReg()) && ARM_LOWREG(r_tmp_high.GetReg())) {
+    if (r_tmp.Low8() && r_tmp_high.Low8()) {
       NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
     } else {
       NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
@@ -881,8 +881,8 @@
   StoreValue(rl_dest, rl_result);
 
   // Now, restore lr to its non-temp status.
-  Clobber(rARM_LR);
-  UnmarkTemp(rARM_LR);
+  Clobber(rs_rARM_LR);
+  UnmarkTemp(rs_rARM_LR);
   return true;
 }
 
@@ -891,11 +891,11 @@
 }
 
 LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
-  return NewLIR3(kThumb2Vldms, r_base.GetReg(), fr0, count);
+  return NewLIR3(kThumb2Vldms, r_base.GetReg(), rs_fr0.GetReg(), count);
 }
 
 LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
-  return NewLIR3(kThumb2Vstms, r_base.GetReg(), fr0, count);
+  return NewLIR3(kThumb2Vstms, r_base.GetReg(), rs_fr0.GetReg(), count);
 }
 
 void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
@@ -918,7 +918,7 @@
 
 // Test suspend flag, return target of taken suspend branch
 LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
-  NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
+  NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
   return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
 }
 
@@ -1012,9 +1012,9 @@
     RegStorage res_lo;
     RegStorage res_hi;
     bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
-        !IsTemp(rl_dest.reg.GetLowReg()) && !IsTemp(rl_dest.reg.GetHighReg());
-    bool src1_promoted = !IsTemp(rl_src1.reg.GetLowReg()) && !IsTemp(rl_src1.reg.GetHighReg());
-    bool src2_promoted = !IsTemp(rl_src2.reg.GetLowReg()) && !IsTemp(rl_src2.reg.GetHighReg());
+        !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+    bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+    bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
     // Check if rl_dest is *not* either operand and we have enough temp registers.
     if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
         (dest_promoted || src1_promoted || src2_promoted)) {
@@ -1036,10 +1036,10 @@
     }
 
     // Temporarily add LR to the temp pool, and assign it to tmp1
-    MarkTemp(rARM_LR);
-    FreeTemp(rARM_LR);
+    MarkTemp(rs_rARM_LR);
+    FreeTemp(rs_rARM_LR);
     RegStorage tmp1 = rs_rARM_LR;
-    LockTemp(rARM_LR);
+    LockTemp(rs_rARM_LR);
 
     if (rl_src1.reg == rl_src2.reg) {
       DCHECK(res_hi.Valid());
@@ -1054,7 +1054,7 @@
         DCHECK(!res_hi.Valid());
         DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
         DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-        FreeTemp(rl_src1.reg.GetHighReg());
+        FreeTemp(rl_src1.reg.GetHigh());
         res_hi = AllocTemp();
       }
       DCHECK(res_hi.Valid());
@@ -1073,8 +1073,8 @@
 
     // Now, restore lr to its non-temp status.
     FreeTemp(tmp1);
-    Clobber(rARM_LR);
-    UnmarkTemp(rARM_LR);
+    Clobber(rs_rARM_LR);
+    UnmarkTemp(rs_rARM_LR);
 
     if (reg_status != 0) {
       // We had manually allocated registers for rl_result.
@@ -1116,7 +1116,7 @@
  */
 void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
   RegLocation rl_result;
@@ -1158,7 +1158,7 @@
       // No special indexed operation, lea + load w/ displacement
       reg_ptr = AllocTemp();
       OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
-      FreeTemp(rl_index.reg.GetReg());
+      FreeTemp(rl_index.reg);
     }
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
@@ -1189,7 +1189,7 @@
     // Offset base, then use indexed load
     RegStorage reg_ptr = AllocTemp();
     OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-    FreeTemp(rl_array.reg.GetReg());
+    FreeTemp(rl_array.reg);
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
@@ -1209,7 +1209,7 @@
  */
 void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   bool constant_index = rl_index.is_const;
 
@@ -1234,8 +1234,8 @@
   bool allocated_reg_ptr_temp = false;
   if (constant_index) {
     reg_ptr = rl_array.reg;
-  } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
-    Clobber(rl_array.reg.GetReg());
+  } else if (IsTemp(rl_array.reg) && !card_mark) {
+    Clobber(rl_array.reg);
     reg_ptr = rl_array.reg;
   } else {
     allocated_reg_ptr_temp = true;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 305e89b..f59720b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -25,16 +25,41 @@
 
 namespace art {
 
-static int core_regs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
-                         r11, r12, rARM_SP, rARM_LR, rARM_PC};
-static int ReservedRegs[] = {rARM_SUSPEND, rARM_SELF, rARM_SP, rARM_LR, rARM_PC};
-static int FpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
-                       fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15,
-                       fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23,
-                       fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31};
-static int core_temps[] = {r0, r1, r2, r3, r12};
-static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
-                        fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
+// TODO: rework this when c++11 support allows.
+static const RegStorage core_regs_arr[] =
+    {rs_r0, rs_r1, rs_r2, rs_r3, rs_rARM_SUSPEND, rs_r5, rs_r6, rs_r7, rs_r8, rs_rARM_SELF,
+     rs_r10, rs_r11, rs_r12, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
+static const RegStorage sp_regs_arr[] =
+    {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, rs_fr8, rs_fr9, rs_fr10,
+     rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15, rs_fr16, rs_fr17, rs_fr18, rs_fr19, rs_fr20,
+     rs_fr21, rs_fr22, rs_fr23, rs_fr24, rs_fr25, rs_fr26, rs_fr27, rs_fr28, rs_fr29, rs_fr30,
+     rs_fr31};
+static const RegStorage dp_regs_arr[] =
+    {rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, rs_dr8, rs_dr9, rs_dr10,
+     rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15};
+static const RegStorage reserved_regs_arr[] =
+    {rs_rARM_SUSPEND, rs_rARM_SELF, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
+static const RegStorage core_temps_arr[] = {rs_r0, rs_r1, rs_r2, rs_r3, rs_r12};
+static const RegStorage sp_temps_arr[] =
+    {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, rs_fr8, rs_fr9, rs_fr10,
+     rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
+static const RegStorage dp_temps_arr[] =
+    {rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7};
+
+static const std::vector<RegStorage> core_regs(core_regs_arr,
+    core_regs_arr + sizeof(core_regs_arr) / sizeof(core_regs_arr[0]));
+static const std::vector<RegStorage> sp_regs(sp_regs_arr,
+    sp_regs_arr + sizeof(sp_regs_arr) / sizeof(sp_regs_arr[0]));
+static const std::vector<RegStorage> dp_regs(dp_regs_arr,
+    dp_regs_arr + sizeof(dp_regs_arr) / sizeof(dp_regs_arr[0]));
+static const std::vector<RegStorage> reserved_regs(reserved_regs_arr,
+    reserved_regs_arr + sizeof(reserved_regs_arr) / sizeof(reserved_regs_arr[0]));
+static const std::vector<RegStorage> core_temps(core_temps_arr,
+    core_temps_arr + sizeof(core_temps_arr) / sizeof(core_temps_arr[0]));
+static const std::vector<RegStorage> sp_temps(sp_temps_arr,
+    sp_temps_arr + sizeof(sp_temps_arr) / sizeof(sp_temps_arr[0]));
+static const std::vector<RegStorage> dp_temps(dp_temps_arr,
+    dp_temps_arr + sizeof(dp_temps_arr) / sizeof(dp_temps_arr[0]));
 
 RegLocation ArmMir2Lir::LocCReturn() {
   return arm_loc_c_return;
@@ -54,74 +79,61 @@
 
 // Return a target-dependent special register.
 RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
-  int res_reg = RegStorage::kInvalidRegVal;
+  RegStorage res_reg = RegStorage::InvalidReg();
   switch (reg) {
-    case kSelf: res_reg = rARM_SELF; break;
-    case kSuspend: res_reg =  rARM_SUSPEND; break;
-    case kLr: res_reg =  rARM_LR; break;
-    case kPc: res_reg =  rARM_PC; break;
-    case kSp: res_reg =  rARM_SP; break;
-    case kArg0: res_reg = rARM_ARG0; break;
-    case kArg1: res_reg = rARM_ARG1; break;
-    case kArg2: res_reg = rARM_ARG2; break;
-    case kArg3: res_reg = rARM_ARG3; break;
-    case kFArg0: res_reg = rARM_FARG0; break;
-    case kFArg1: res_reg = rARM_FARG1; break;
-    case kFArg2: res_reg = rARM_FARG2; break;
-    case kFArg3: res_reg = rARM_FARG3; break;
-    case kRet0: res_reg = rARM_RET0; break;
-    case kRet1: res_reg = rARM_RET1; break;
-    case kInvokeTgt: res_reg = rARM_INVOKE_TGT; break;
-    case kHiddenArg: res_reg = r12; break;
-    case kHiddenFpArg: res_reg = RegStorage::kInvalidRegVal; break;
-    case kCount: res_reg = rARM_COUNT; break;
+    case kSelf: res_reg = rs_rARM_SELF; break;
+    case kSuspend: res_reg =  rs_rARM_SUSPEND; break;
+    case kLr: res_reg =  rs_rARM_LR; break;
+    case kPc: res_reg =  rs_rARM_PC; break;
+    case kSp: res_reg =  rs_rARM_SP; break;
+    case kArg0: res_reg = rs_r0; break;
+    case kArg1: res_reg = rs_r1; break;
+    case kArg2: res_reg = rs_r2; break;
+    case kArg3: res_reg = rs_r3; break;
+    case kFArg0: res_reg = rs_r0; break;
+    case kFArg1: res_reg = rs_r1; break;
+    case kFArg2: res_reg = rs_r2; break;
+    case kFArg3: res_reg = rs_r3; break;
+    case kRet0: res_reg = rs_r0; break;
+    case kRet1: res_reg = rs_r1; break;
+    case kInvokeTgt: res_reg = rs_rARM_LR; break;
+    case kHiddenArg: res_reg = rs_r12; break;
+    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
+    case kCount: res_reg = RegStorage::InvalidReg(); break;
   }
-  return RegStorage::Solo32(res_reg);
+  return res_reg;
 }
 
 RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
   switch (arg_num) {
     case 0:
-      return rs_rARM_ARG1;
+      return rs_r1;
     case 1:
-      return rs_rARM_ARG2;
+      return rs_r2;
     case 2:
-      return rs_rARM_ARG3;
+      return rs_r3;
     default:
       return RegStorage::InvalidReg();
   }
 }
 
-// Create a double from a pair of singles.
-int ArmMir2Lir::S2d(int low_reg, int high_reg) {
-  return ARM_S2D(low_reg, high_reg);
-}
-
-// Return mask to strip off fp reg flags and bias.
-uint32_t ArmMir2Lir::FpRegMask() {
-  return ARM_FP_REG_MASK;
-}
-
-// True if both regs single, both core or both double.
-bool ArmMir2Lir::SameRegType(int reg1, int reg2) {
-  return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2));
-}
-
 /*
  * Decode the register id.
  */
-uint64_t ArmMir2Lir::GetRegMaskCommon(int reg) {
+uint64_t ArmMir2Lir::GetRegMaskCommon(RegStorage reg) {
   uint64_t seed;
   int shift;
-  int reg_id;
-
-
-  reg_id = reg & 0x1f;
+  int reg_id = reg.GetRegNum();
   /* Each double register is equal to a pair of single-precision FP registers */
-  seed = ARM_DOUBLEREG(reg) ? 3 : 1;
+  if (reg.IsDouble()) {
+    seed = 0x3;
+    reg_id = reg_id << 1;
+  } else {
+    seed = 1;
+  }
   /* FP register starts at bit position 16 */
-  shift = ARM_FPREG(reg) ? kArmFPReg0 : 0;
+  shift = reg.IsFloat() ? kArmFPReg0 : 0;
   /* Expand the double register id into single offset */
   shift += reg_id;
   return (seed << shift);
@@ -196,7 +208,7 @@
     }
     /* Fixup for kThumbPush/lr and kThumbPop/pc */
     if (opcode == kThumbPush || opcode == kThumbPop) {
-      uint64_t r8Mask = GetRegMaskCommon(r8);
+      uint64_t r8Mask = GetRegMaskCommon(rs_r8);
       if ((opcode == kThumbPush) && (lir->u.m.use_mask & r8Mask)) {
         lir->u.m.use_mask &= ~r8Mask;
         lir->u.m.use_mask |= ENCODE_ARM_REG_LR;
@@ -274,9 +286,9 @@
     if (vector & 0x1) {
       int reg_id = i;
       if (opcode == kThumbPush && i == 8) {
-        reg_id = r14lr;
+        reg_id = rs_rARM_LR.GetRegNum();
       } else if (opcode == kThumbPop && i == 8) {
-        reg_id = r15pc;
+        reg_id = rs_rARM_PC.GetRegNum();
       }
       if (printed) {
         snprintf(buf + strlen(buf), buf_size - strlen(buf), ", r%d", reg_id);
@@ -391,10 +403,10 @@
              snprintf(tbuf, arraysize(tbuf), "%d [%#x]", operand, operand);
              break;
            case 's':
-             snprintf(tbuf, arraysize(tbuf), "s%d", operand & ARM_FP_REG_MASK);
+             snprintf(tbuf, arraysize(tbuf), "s%d", RegStorage::RegNum(operand));
              break;
            case 'S':
-             snprintf(tbuf, arraysize(tbuf), "d%d", (operand & ARM_FP_REG_MASK) >> 1);
+             snprintf(tbuf, arraysize(tbuf), "d%d", RegStorage::RegNum(operand));
              break;
            case 'h':
              snprintf(tbuf, arraysize(tbuf), "%04x", operand);
@@ -404,6 +416,7 @@
              snprintf(tbuf, arraysize(tbuf), "%d", operand);
              break;
            case 'C':
+             operand = RegStorage::RegNum(operand);
              DCHECK_LT(operand, static_cast<int>(
                  sizeof(core_reg_names)/sizeof(core_reg_names[0])));
              snprintf(tbuf, arraysize(tbuf), "%s", core_reg_names[operand]);
@@ -539,48 +552,46 @@
 
 RegStorage ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
-    return AllocTempFloat();
+    return AllocTempSingle();
   return AllocTemp();
 }
 
 void ArmMir2Lir::CompilerInitializeRegAlloc() {
-  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
-  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
-  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
-  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
-  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
-                                                       kArenaAllocRegAlloc));
-  reg_pool_->num_core_regs = num_regs;
-  reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
-      (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), kArenaAllocRegAlloc));
-  reg_pool_->num_fp_regs = num_fp_regs;
-  reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), kArenaAllocRegAlloc));
-  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
-  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
+  reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, sp_regs, dp_regs, reserved_regs,
+                                        core_temps, sp_temps, dp_temps);
 
-  // Keep special registers from being allocated
-  // Don't reserve the r4 if we are doing implicit suspend checks.
+  // Target-specific adjustments.
+
+  // Alias single precision floats to appropriate half of overlapping double.
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    int sp_reg_num = info->GetReg().GetRegNum();
+    int dp_reg_num = sp_reg_num >> 1;
+    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+    // Double precision register's master storage should refer to itself.
+    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+    // Redirect single precision's master storage to master.
+    info->SetMaster(dp_reg_info);
+    // Singles should show a single 32-bit mask bit, at first referring to the low half.
+    DCHECK_EQ(info->StorageMask(), 0x1U);
+    if (sp_reg_num & 1) {
+      // For odd singles, change to user the high word of the backing double.
+      info->SetStorageMask(0x2);
+    }
+  }
+
   // TODO: re-enable this when we can safely save r4 over the suspension code path.
   bool no_suspend = NO_SUSPEND;  // || !Runtime::Current()->ExplicitSuspendChecks();
-  for (int i = 0; i < num_reserved; i++) {
-    if (no_suspend && (ReservedRegs[i] == rARM_SUSPEND)) {
-      // Don't reserve the suspend register.
-      continue;
-    }
-    MarkInUse(ReservedRegs[i]);
-  }
-  // Mark temp regs - all others not in use can be used for promotion
-  for (int i = 0; i < num_temps; i++) {
-    MarkTemp(core_temps[i]);
-  }
-  for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(fp_temps[i]);
+  if (no_suspend) {
+    GetRegInfo(rs_rARM_SUSPEND)->MarkFree();
   }
 
-  // Start allocation at r2 in an attempt to avoid clobbering return values
-  reg_pool_->next_core_reg = r2;
+  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+  // TODO: adjust when we roll to hard float calling convention.
+  reg_pool_->next_core_reg_ = 2;
+  reg_pool_->next_sp_reg_ = 0;
+  reg_pool_->next_dp_reg_ = 0;
 }
 
 void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
@@ -602,7 +613,7 @@
  */
 
 void ArmMir2Lir::AdjustSpillMask() {
-  core_spill_mask_ |= (1 << rARM_LR);
+  core_spill_mask_ |= (1 << rs_rARM_LR.GetRegNum());
   num_core_spills_++;
 }
 
@@ -612,123 +623,99 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg) {
-  DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
-  reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
+void ArmMir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
+  DCHECK_GE(reg.GetRegNum(), ARM_FP_CALLEE_SAVE_BASE);
+  int adjusted_reg_num = reg.GetRegNum() - ARM_FP_CALLEE_SAVE_BASE;
   // Ensure fp_vmap_table is large enough
   int table_size = fp_vmap_table_.size();
-  for (int i = table_size; i < (reg + 1); i++) {
+  for (int i = table_size; i < (adjusted_reg_num + 1); i++) {
     fp_vmap_table_.push_back(INVALID_VREG);
   }
   // Add the current mapping
-  fp_vmap_table_[reg] = v_reg;
+  fp_vmap_table_[adjusted_reg_num] = v_reg;
   // Size of fp_vmap_table is high-water mark, use to set mask
   num_fp_spills_ = fp_vmap_table_.size();
   fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
 }
 
-void ArmMir2Lir::FlushRegWide(RegStorage reg) {
-  RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
-  RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
-  DCHECK(info1 && info2 && info1->pair && info2->pair &&
-       (info1->partner == info2->reg) &&
-       (info2->partner == info1->reg));
-  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
-    if (!(info1->is_temp && info2->is_temp)) {
-      /* Should not happen.  If it does, there's a problem in eval_loc */
-      LOG(FATAL) << "Long half-temp, half-promoted";
-    }
-
-    info1->dirty = false;
-    info2->dirty = false;
-    if (mir_graph_->SRegToVReg(info2->s_reg) <
-      mir_graph_->SRegToVReg(info1->s_reg))
-      info1 = info2;
-    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(rs_rARM_SP, VRegOffset(v_reg),
-                      RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
-  }
-}
-
-void ArmMir2Lir::FlushReg(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg.GetReg());
-  if (info->live && info->dirty) {
-    info->dirty = false;
-    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
-    StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, k32);
-  }
-}
-
-/* Give access to the target-dependent FP register encoding to common code */
-bool ArmMir2Lir::IsFpReg(int reg) {
-  return ARM_FPREG(reg);
-}
-
-bool ArmMir2Lir::IsFpReg(RegStorage reg) {
-  return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+void ArmMir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
+  // TEMP: perform as 2 singles.
+  int reg_num = reg.GetRegNum() << 1;
+  RegStorage lo = RegStorage::Solo32(RegStorage::kFloatingPoint | reg_num);
+  RegStorage hi = RegStorage::Solo32(RegStorage::kFloatingPoint | reg_num | 1);
+  MarkPreservedSingle(v_reg, lo);
+  MarkPreservedSingle(v_reg + 1, hi);
 }
 
 /* Clobber all regs that might be used by an external C call */
 void ArmMir2Lir::ClobberCallerSave() {
-  Clobber(r0);
-  Clobber(r1);
-  Clobber(r2);
-  Clobber(r3);
-  Clobber(r12);
-  Clobber(r14lr);
-  Clobber(fr0);
-  Clobber(fr1);
-  Clobber(fr2);
-  Clobber(fr3);
-  Clobber(fr4);
-  Clobber(fr5);
-  Clobber(fr6);
-  Clobber(fr7);
-  Clobber(fr8);
-  Clobber(fr9);
-  Clobber(fr10);
-  Clobber(fr11);
-  Clobber(fr12);
-  Clobber(fr13);
-  Clobber(fr14);
-  Clobber(fr15);
+  // TODO: rework this - it's gotten even more ugly.
+  Clobber(rs_r0);
+  Clobber(rs_r1);
+  Clobber(rs_r2);
+  Clobber(rs_r3);
+  Clobber(rs_r12);
+  Clobber(rs_r14lr);
+  Clobber(rs_fr0);
+  Clobber(rs_fr1);
+  Clobber(rs_fr2);
+  Clobber(rs_fr3);
+  Clobber(rs_fr4);
+  Clobber(rs_fr5);
+  Clobber(rs_fr6);
+  Clobber(rs_fr7);
+  Clobber(rs_fr8);
+  Clobber(rs_fr9);
+  Clobber(rs_fr10);
+  Clobber(rs_fr11);
+  Clobber(rs_fr12);
+  Clobber(rs_fr13);
+  Clobber(rs_fr14);
+  Clobber(rs_fr15);
+  Clobber(rs_dr0);
+  Clobber(rs_dr1);
+  Clobber(rs_dr2);
+  Clobber(rs_dr3);
+  Clobber(rs_dr4);
+  Clobber(rs_dr5);
+  Clobber(rs_dr6);
+  Clobber(rs_dr7);
 }
 
 RegLocation ArmMir2Lir::GetReturnWideAlt() {
   RegLocation res = LocCReturnWide();
-  res.reg.SetReg(r2);
-  res.reg.SetHighReg(r3);
-  Clobber(r2);
-  Clobber(r3);
-  MarkInUse(r2);
-  MarkInUse(r3);
-  MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
+  res.reg.SetLowReg(rs_r2.GetReg());
+  res.reg.SetHighReg(rs_r3.GetReg());
+  Clobber(rs_r2);
+  Clobber(rs_r3);
+  MarkInUse(rs_r2);
+  MarkInUse(rs_r3);
+  MarkWide(res.reg);
   return res;
 }
 
 RegLocation ArmMir2Lir::GetReturnAlt() {
   RegLocation res = LocCReturn();
-  res.reg.SetReg(r1);
-  Clobber(r1);
-  MarkInUse(r1);
+  res.reg.SetReg(rs_r1.GetReg());
+  Clobber(rs_r1);
+  MarkInUse(rs_r1);
   return res;
 }
 
 /* To be used when explicitly managing register use */
 void ArmMir2Lir::LockCallTemps() {
-  LockTemp(r0);
-  LockTemp(r1);
-  LockTemp(r2);
-  LockTemp(r3);
+  LockTemp(rs_r0);
+  LockTemp(rs_r1);
+  LockTemp(rs_r2);
+  LockTemp(rs_r3);
 }
 
 /* To be used when explicitly managing register use */
 void ArmMir2Lir::FreeCallTemps() {
-  FreeTemp(r0);
-  FreeTemp(r1);
-  FreeTemp(r2);
-  FreeTemp(r3);
+  FreeTemp(rs_r0);
+  FreeTemp(rs_r1);
+  FreeTemp(rs_r2);
+  FreeTemp(rs_r3);
 }
 
 RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<4> offset) {
@@ -758,4 +745,59 @@
   return ArmMir2Lir::EncodingMap[opcode].fmt;
 }
 
+/*
+ * Somewhat messy code here.  We want to allocate a pair of contiguous
+ * physical single-precision floating point registers starting with
+ * an even numbered reg.  It is possible that the paired s_reg (s_reg+1)
+ * has already been allocated - try to fit if possible.  Fail to
+ * allocate if we can't meet the requirements for the pair of
+ * s_reg<=sX[even] & (s_reg+1)<= sX+1.
+ */
+// TODO: needs rewrite to support non-backed 64-bit float regs.
+RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
+  RegStorage res;
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  int p_map_idx = SRegToPMap(s_reg);
+  if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
+    // Upper reg is already allocated.  Can we fit?
+    int high_reg = promotion_map_[p_map_idx+1].FpReg;
+    if ((high_reg & 1) == 0) {
+      // High reg is even - fail.
+      return res;  // Invalid.
+    }
+    // Is the low reg of the pair free?
+    // FIXME: rework.
+    RegisterInfo* p = GetRegInfo(RegStorage::FloatSolo32(high_reg - 1));
+    if (p->InUse() || p->IsTemp()) {
+      // Already allocated or not preserved - fail.
+      return res;  // Invalid.
+    }
+    // OK - good to go.
+    res = RegStorage::FloatSolo64(p->GetReg().GetRegNum() >> 1);
+    p->MarkInUse();
+    MarkPreservedSingle(v_reg, p->GetReg());
+  } else {
+    /*
+     * TODO: until runtime support is in, make sure we avoid promoting the same vreg to
+     * different underlying physical registers.
+     */
+    GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->dp_regs_);
+    for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+      if (!info->IsTemp() && !info->InUse()) {
+        res = info->GetReg();
+        info->MarkInUse();
+        MarkPreservedDouble(v_reg, info->GetReg());
+        break;
+      }
+    }
+  }
+  if (res.Valid()) {
+    promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx].FpReg = res.DoubleToLowSingle().GetReg();
+    promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx+1].FpReg = res.DoubleToHighSingle().GetReg();
+  }
+  return res;
+}
+
 }  // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 2e64f74..08acef7 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -69,7 +69,7 @@
 }
 
 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
-  DCHECK(ARM_SINGLEREG(r_dest));
+  DCHECK(RegStorage::IsSingle(r_dest));
   if (value == 0) {
     // TODO: we need better info about the target CPU.  a vector exclusive or
     //       would probably be better here if we could rely on its existance.
@@ -88,7 +88,7 @@
     data_target = AddWordData(&literal_list_, value);
   }
   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
-                          r_dest, r15pc, 0, 0, 0, data_target);
+                          r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
   SetMemRefType(load_pc_rel, true, kLiteral);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
@@ -173,12 +173,12 @@
   LIR* res;
   int mod_imm;
 
-  if (ARM_FPREG(r_dest.GetReg())) {
+  if (r_dest.IsFloat()) {
     return LoadFPConstantValue(r_dest.GetReg(), value);
   }
 
   /* See if the value can be constructed cheaply */
-  if (ARM_LOWREG(r_dest.GetReg()) && (value >= 0) && (value <= 255)) {
+  if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
     return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
   }
   /* Check Modified immediate special cases */
@@ -204,7 +204,7 @@
 }
 
 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
+  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly */);
   res->target = target;
   return res;
 }
@@ -237,7 +237,7 @@
 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
                                int shift) {
   bool thumb_form =
-      ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()));
+      ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdc:
@@ -256,9 +256,9 @@
     case kOpCmp:
       if (thumb_form)
         opcode = kThumbCmpRR;
-      else if ((shift == 0) && !ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
+      else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
         opcode = kThumbCmpHH;
-      else if ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()))
+      else if ((shift == 0) && r_dest_src1.Low8())
         opcode = kThumbCmpLH;
       else if (shift == 0)
         opcode = kThumbCmpHL;
@@ -270,11 +270,11 @@
       break;
     case kOpMov:
       DCHECK_EQ(shift, 0);
-      if (ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()))
+      if (r_dest_src1.Low8() && r_src2.Low8())
         opcode = kThumbMovRR;
-      else if (!ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
+      else if (!r_dest_src1.Low8() && !r_src2.Low8())
         opcode = kThumbMovRR_H2H;
-      else if (ARM_LOWREG(r_dest_src1.GetReg()))
+      else if (r_dest_src1.Low8())
         opcode = kThumbMovRR_H2L;
       else
         opcode = kThumbMovRR_L2H;
@@ -389,8 +389,7 @@
 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
                                   RegStorage r_src2, int shift) {
   ArmOpcode opcode = kThumbBkpt;
-  bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()) &&
-      ARM_LOWREG(r_src2.GetReg());
+  bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
   switch (op) {
     case kOpAdd:
       opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
@@ -466,7 +465,7 @@
   int32_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kThumbBkpt;
   ArmOpcode alt_opcode = kThumbBkpt;
-  bool all_low_regs = (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()));
+  bool all_low_regs = r_dest.Low8() && r_src1.Low8();
   int32_t mod_imm = ModifiedImmediate(value);
 
   switch (op) {
@@ -488,10 +487,9 @@
     case kOpRor:
       return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpAdd:
-      if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r13sp) &&
-        (value <= 1020) && ((value & 0x3) == 0)) {
+      if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
         return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
-      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r15pc) &&
+      } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
           (value <= 1020) && ((value & 0x3) == 0)) {
         return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
       }
@@ -601,7 +599,7 @@
 LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
   bool neg = (value < 0);
   int32_t abs_value = (neg) ? -value : value;
-  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1.GetReg()));
+  bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdd:
@@ -643,22 +641,24 @@
   LIR* res = NULL;
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
-  int target_reg = S2d(r_dest.GetLowReg(), r_dest.GetHighReg());
-  if (ARM_FPREG(r_dest.GetLowReg())) {
+  if (r_dest.IsFloat()) {
+    DCHECK(!r_dest.IsPair());
     if ((val_lo == 0) && (val_hi == 0)) {
       // TODO: we need better info about the target CPU.  a vector exclusive or
       //       would probably be better here if we could rely on its existance.
       // Load an immediate +2.0 (which encodes to 0)
-      NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
+      NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
       // +0.0 = +2.0 - +2.0
-      res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
+      res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
     } else {
       int encoded_imm = EncodeImmDouble(value);
       if (encoded_imm >= 0) {
-        res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
+        res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
       }
     }
   } else {
+    // NOTE: Arm32 assumption here.
+    DCHECK(r_dest.IsPair());
     if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
       res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
       LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
@@ -670,13 +670,13 @@
     if (data_target == NULL) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
-    if (ARM_FPREG(r_dest.GetLowReg())) {
+    if (r_dest.IsFloat()) {
       res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
-                   target_reg, r15pc, 0, 0, 0, data_target);
+                   r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
     } else {
       DCHECK(r_dest.IsPair());
       res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
-                   r_dest.GetLowReg(), r_dest.GetHighReg(), r15pc, 0, 0, data_target);
+                   r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
     }
     SetMemRefType(res, true, kLiteral);
     AppendLIR(res);
@@ -690,22 +690,20 @@
 
 LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
-      ARM_LOWREG(r_dest.GetReg());
+  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
   LIR* load;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_dest.GetReg())) {
-    if (ARM_SINGLEREG(r_dest.GetReg())) {
+  if (r_dest.IsFloat()) {
+    if (r_dest.IsSingle()) {
       DCHECK((size == k32) || (size == kSingle) || (size == kReference));
       opcode = kThumb2Vldrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_dest.GetReg()));
+      DCHECK(r_dest.IsDouble());
       DCHECK((size == k64) || (size == kDouble));
-      DCHECK_EQ((r_dest.GetReg() & 0x1), 0);
       opcode = kThumb2Vldrd;
       size = kDouble;
     }
@@ -758,20 +756,19 @@
 
 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
-      ARM_LOWREG(r_src.GetReg());
+  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
   LIR* store = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_src.GetReg())) {
-    if (ARM_SINGLEREG(r_src.GetReg())) {
+  if (r_src.IsFloat()) {
+    if (r_src.IsSingle()) {
       DCHECK((size == k32) || (size == kSingle) || (size == kReference));
       opcode = kThumb2Vstrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsDouble());
       DCHECK((size == k64) || (size == kDouble));
       DCHECK_EQ((r_src.GetReg() & 0x1), 0);
       opcode = kThumb2Vstrd;
@@ -833,21 +830,16 @@
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low = r_dest.Is32Bit() && ARM_LOWREG(r_base.GetReg() && ARM_LOWREG(r_dest.GetReg()));
+  bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
   int encoded_disp = displacement;
   bool already_generated = false;
-  int dest_low_reg = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
   bool null_pointer_safepoint = false;
   switch (size) {
     case kDouble:
     // Intentional fall-though.
     case k64:
-      if (ARM_FPREG(dest_low_reg)) {
-        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
-        if (r_dest.IsPair()) {
-          DCHECK(ARM_FPREG(r_dest.GetHighReg()));
-          r_dest = RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg()));
-        }
+      if (r_dest.IsFloat()) {
+        DCHECK(!r_dest.IsPair());
         opcode = kThumb2Vldrd;
         if (displacement <= 1020) {
           short_form = true;
@@ -870,7 +862,7 @@
     case k32:
     // Intentional fall-though.
     case kReference:
-      if (ARM_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = kThumb2Vldrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -878,13 +870,13 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r15pc) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
+          (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrPcRel;
-      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r13sp) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
+                 (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrSpRel;
@@ -940,7 +932,7 @@
     } else {
       RegStorage reg_offset = AllocTemp();
       LoadConstant(reg_offset, encoded_disp);
-      if (ARM_FPREG(dest_low_reg)) {
+      if (r_dest.IsFloat()) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
         OpRegReg(kOpAdd, reg_offset, r_base);
         load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg);
@@ -985,15 +977,14 @@
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low = r_src.Is32Bit() && (ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_src.GetReg()));
+  bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
   int encoded_disp = displacement;
   bool already_generated = false;
-  int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
   bool null_pointer_safepoint = false;
   switch (size) {
     case k64:
     case kDouble:
-      if (!ARM_FPREG(src_low_reg)) {
+      if (!r_src.IsFloat()) {
         if (displacement <= 1020) {
           store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
                           displacement >> 2);
@@ -1004,11 +995,7 @@
         }
         already_generated = true;
       } else {
-        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
-        if (r_src.IsPair()) {
-          DCHECK(ARM_FPREG(r_src.GetHighReg()));
-          r_src = RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg()));
-        }
+        DCHECK(!r_src.IsPair());
         opcode = kThumb2Vstrd;
         if (displacement <= 1020) {
           short_form = true;
@@ -1017,10 +1004,12 @@
       }
       break;
     case kSingle:
+    // Intentional fall-through.
     case k32:
+    // Intentional fall-through.
     case kReference:
-      if (ARM_FPREG(r_src.GetReg())) {
-        DCHECK(ARM_SINGLEREG(r_src.GetReg()));
+      if (r_src.IsFloat()) {
+        DCHECK(r_src.IsSingle());
         opcode = kThumb2Vstrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -1028,8 +1017,7 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_src.GetReg()) && (r_base == rs_r13sp) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbStrSpRel;
@@ -1074,7 +1062,7 @@
     } else {
       RegStorage r_scratch = AllocTemp();
       LoadConstant(r_scratch, encoded_disp);
-      if (ARM_FPREG(src_low_reg)) {
+      if (r_src.IsFloat()) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
         OpRegReg(kOpAdd, r_scratch, r_base);
         store = StoreBaseDispBody(r_scratch, 0, r_src, size);
@@ -1113,14 +1101,14 @@
 
 LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
-  DCHECK_EQ(ARM_DOUBLEREG(r_dest.GetReg()), ARM_DOUBLEREG(r_src.GetReg()));
-  if (ARM_DOUBLEREG(r_dest.GetReg())) {
+  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+  if (r_dest.IsDouble()) {
     opcode = kThumb2Vmovd;
   } else {
-    if (ARM_SINGLEREG(r_dest.GetReg())) {
-      opcode = ARM_SINGLEREG(r_src.GetReg()) ? kThumb2Vmovs : kThumb2Fmsr;
+    if (r_dest.IsSingle()) {
+      opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
     } else {
-      DCHECK(ARM_SINGLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsSingle());
       opcode = kThumb2Fmrs;
     }
   }
@@ -1142,8 +1130,7 @@
 }
 
 LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, RegStorage r_src_hi,
-                                      OpSize size, int s_reg) {
+                                      int displacement, RegStorage r_src, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
   return NULL;
 }
@@ -1154,8 +1141,7 @@
 }
 
 LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, RegStorage r_dest_hi,
-                                     OpSize size, int s_reg) {
+                                     int displacement, RegStorage r_dest, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
   return NULL;
 }
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 0596d4f..3961954 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -254,7 +254,7 @@
     PromotionMap v_reg_map = promotion_map_[i];
     std::string buf;
     if (v_reg_map.fp_location == kLocPhysReg) {
-      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
+      StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.FpReg));
     }
 
     std::string buf3;
@@ -942,7 +942,7 @@
       switch_tables_(arena, 4, kGrowableArraySwitchTables),
       fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
       tempreg_info_(arena, 20, kGrowableArrayMisc),
-      reginfo_map_(arena, 64, kGrowableArrayMisc),
+      reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc),
       pointer_storage_(arena, 128, kGrowableArrayMisc),
       data_offset_(0),
       total_size_(0),
@@ -1185,8 +1185,19 @@
 
 RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
   loc.wide = false;
-  if (loc.reg.IsPair()) {
-    loc.reg = loc.reg.GetLow();
+  if (loc.location == kLocPhysReg) {
+    if (loc.reg.IsPair()) {
+      loc.reg = loc.reg.GetLow();
+    } else {
+      // FIXME: temp workaround.
+      // Issue here: how do we narrow to a 32-bit value in 64-bit container?
+      // Probably the wrong thing to narrow the RegStorage container here.  That
+      // should be a target decision.  At the RegLocation level, we're only
+      // modifying the view of the Dalvik value - this is orthogonal to the storage
+      // container size.  Consider this a temp workaround.
+      DCHECK(loc.reg.IsDouble());
+      loc.reg = loc.reg.DoubleToLowSingle();
+    }
   }
   return loc;
 }
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3cc2ba0..2cd17cc 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -701,7 +701,7 @@
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
   if (field_info.FastGet() && !SLOW_FIELD_PATH) {
     RegLocation rl_result;
-    RegisterClass reg_class = oat_reg_class_by_size(size);
+    RegisterClass reg_class = RegClassBySize(size);
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
@@ -774,7 +774,7 @@
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
   if (field_info.FastPut() && !SLOW_FIELD_PATH) {
-    RegisterClass reg_class = oat_reg_class_by_size(size);
+    RegisterClass reg_class = RegClassBySize(size);
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 93a23a6..4ecfeb9 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -358,7 +358,7 @@
   rl_src.location = kLocPhysReg;
   rl_src.reg = TargetReg(kArg0);
   rl_src.home = false;
-  MarkLive(rl_src.reg, rl_src.s_reg_low);
+  MarkLive(rl_src);
   if (rl_method.wide) {
     StoreValueWide(rl_method, rl_src);
   } else {
@@ -753,7 +753,8 @@
       // Wide spans, we need the 2nd half of uses[2].
       rl_arg = UpdateLocWide(rl_use2);
       if (rl_arg.location == kLocPhysReg) {
-        reg = rl_arg.reg.GetHigh();
+        // NOTE: not correct for 64-bit core regs, but this needs rewriting for hard-float.
+        reg = rl_arg.reg.IsPair() ? rl_arg.reg.GetHigh() : rl_arg.reg.DoubleToHighSingle();
       } else {
         // kArg2 & rArg3 can safely be used here
         reg = TargetReg(kArg3);
@@ -768,34 +769,28 @@
     }
     // Loop through the rest
     while (next_use < info->num_arg_words) {
-      RegStorage low_reg;
-      RegStorage high_reg;
+      RegStorage arg_reg;
       rl_arg = info->args[next_use];
       rl_arg = UpdateRawLoc(rl_arg);
       if (rl_arg.location == kLocPhysReg) {
-        if (rl_arg.wide) {
-          low_reg = rl_arg.reg.GetLow();
-          high_reg = rl_arg.reg.GetHigh();
-        } else {
-          low_reg = rl_arg.reg;
-        }
+        arg_reg = rl_arg.reg;
       } else {
-        low_reg = TargetReg(kArg2);
+        arg_reg = rl_arg.wide ? RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)) :
+            TargetReg(kArg2);
         if (rl_arg.wide) {
-          high_reg = TargetReg(kArg3);
-          LoadValueDirectWideFixed(rl_arg, RegStorage::MakeRegPair(low_reg, high_reg));
+          LoadValueDirectWideFixed(rl_arg, arg_reg);
         } else {
-          LoadValueDirectFixed(rl_arg, low_reg);
+          LoadValueDirectFixed(rl_arg, arg_reg);
         }
         call_state = next_call_insn(cu_, info, call_state, target_method,
                                     vtable_idx, direct_code, direct_method, type);
       }
       int outs_offset = (next_use + 1) * 4;
       if (rl_arg.wide) {
-        StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
+        StoreBaseDispWide(TargetReg(kSp), outs_offset, arg_reg);
         next_use += 2;
       } else {
-        Store32Disp(TargetReg(kSp), outs_offset, low_reg);
+        Store32Disp(TargetReg(kSp), outs_offset, arg_reg);
         next_use++;
       }
       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
@@ -926,7 +921,7 @@
         // Allocate a free xmm temp. Since we are working through the calling sequence,
         // we expect to have an xmm temporary available.
         RegStorage temp = AllocTempDouble();
-        CHECK_GT(temp.GetLowReg(), 0);
+        DCHECK(temp.Valid());
 
         LIR* ld1 = nullptr;
         LIR* ld2 = nullptr;
@@ -989,9 +984,7 @@
         }
 
         // Free the temporary used for the data movement.
-        // CLEANUP: temp is currently a bogus pair, elmiminate extra free when updated.
-        FreeTemp(temp.GetLow());
-        FreeTemp(temp.GetHigh());
+        FreeTemp(temp);
       } else {
         // Moving 32-bits via general purpose register.
         bytes_to_move = sizeof(uint32_t);
@@ -1136,8 +1129,8 @@
   if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
     LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
   } else {
-    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
-                        RegStorage::InvalidReg(), kUnsignedHalf, INVALID_SREG);
+    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf,
+                        INVALID_SREG);
   }
   FreeTemp(reg_off);
   FreeTemp(reg_ptr);
@@ -1409,7 +1402,7 @@
     Load32Disp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
   } else {
     CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
+    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg, offset);
   }
   StoreValue(rl_dest, rl_result);
   return true;
@@ -1432,13 +1425,12 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_long) {
     if (cu_->instruction_set == kX86) {
-      LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(),
-                          rl_result.reg.GetHigh(), k64, INVALID_SREG);
+      LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64, INVALID_SREG);
     } else {
       RegStorage rl_temp_offset = AllocTemp();
       OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
       LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG);
-      FreeTemp(rl_temp_offset.GetReg());
+      FreeTemp(rl_temp_offset);
     }
   } else {
     LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
@@ -1480,13 +1472,12 @@
   if (is_long) {
     rl_value = LoadValueWide(rl_src_value, kCoreReg);
     if (cu_->instruction_set == kX86) {
-      StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(),
-                           rl_value.reg.GetHigh(), k64, INVALID_SREG);
+      StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64, INVALID_SREG);
     } else {
       RegStorage rl_temp_offset = AllocTemp();
       OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
       StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg);
-      FreeTemp(rl_temp_offset.GetReg());
+      FreeTemp(rl_temp_offset);
     }
   } else {
     rl_value = LoadValue(rl_src_value, kCoreReg);
@@ -1494,7 +1485,7 @@
   }
 
   // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
-  FreeTemp(rl_offset.reg.GetReg());
+  FreeTemp(rl_offset.reg);
 
   if (is_volatile) {
     // A load might follow the volatile store so insert a StoreLoad barrier.
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 9808f7f..e6911cd 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -143,7 +143,7 @@
   if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
     LoadValueDirect(rl_src, rl_src.reg);
     rl_src.location = kLocPhysReg;
-    MarkLive(rl_src.reg, rl_src.s_reg_low);
+    MarkLive(rl_src);
   }
   return rl_src;
 }
@@ -184,12 +184,12 @@
   }
 
   // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest.reg, rl_dest.s_reg_low);
+  MarkLive(rl_dest);
   MarkDirty(rl_dest);
 
 
   ResetDefLoc(rl_dest);
-  if (IsDirty(rl_dest.reg) && oat_live_out(rl_dest.s_reg_low)) {
+  if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
     def_start = last_lir_insn_;
     Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
     MarkClean(rl_dest);
@@ -207,13 +207,7 @@
   if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
     LoadValueDirectWide(rl_src, rl_src.reg);
     rl_src.location = kLocPhysReg;
-    MarkLive(rl_src.reg.GetLow(), rl_src.s_reg_low);
-    if (rl_src.reg.GetLowReg() != rl_src.reg.GetHighReg()) {
-      MarkLive(rl_src.reg.GetHigh(), GetSRegHi(rl_src.s_reg_low));
-    } else {
-      // This must be an x86 vector register value.
-      DCHECK(IsFpReg(rl_src.reg) && (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64));
-    }
+    MarkLive(rl_src);
   }
   return rl_src;
 }
@@ -254,24 +248,13 @@
   }
 
   // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest.reg.GetLow(), rl_dest.s_reg_low);
-
-  // Does this wide value live in two registers (or one vector one)?
-  // FIXME: wide reg update.
-  if (rl_dest.reg.GetLowReg() != rl_dest.reg.GetHighReg()) {
-    MarkLive(rl_dest.reg.GetHigh(), GetSRegHi(rl_dest.s_reg_low));
-    MarkDirty(rl_dest);
-    MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg());
-  } else {
-    // This must be an x86 vector register value,
-    DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64));
-    MarkDirty(rl_dest);
-  }
-
+  MarkLive(rl_dest);
+  MarkWide(rl_dest.reg);
+  MarkDirty(rl_dest);
 
   ResetDefLocWide(rl_dest);
-  if (IsDirty(rl_dest.reg) && (oat_live_out(rl_dest.s_reg_low) ||
-      oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
+  if (IsDirty(rl_dest.reg) && (LiveOut(rl_dest.s_reg_low) ||
+      LiveOut(GetSRegHi(rl_dest.s_reg_low)))) {
     def_start = last_lir_insn_;
     DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
               mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
@@ -295,13 +278,12 @@
   }
 
   // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest.reg, rl_dest.s_reg_low);
+  MarkLive(rl_dest);
   MarkDirty(rl_dest);
 
 
   ResetDefLoc(rl_dest);
-  if (IsDirty(rl_dest.reg) &&
-      oat_live_out(rl_dest.s_reg_low)) {
+  if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
     LIR *def_start = last_lir_insn_;
     Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
     MarkClean(rl_dest);
@@ -314,7 +296,6 @@
 }
 
 void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
-  DCHECK_EQ(IsFpReg(rl_src.reg.GetLowReg()), IsFpReg(rl_src.reg.GetHighReg()));
   DCHECK(rl_dest.wide);
   DCHECK(rl_src.wide);
   DCHECK_EQ(rl_src.location, kLocPhysReg);
@@ -325,28 +306,17 @@
     // Just re-assign the registers.  Dest gets Src's regs.
     rl_dest.location = kLocPhysReg;
     rl_dest.reg = rl_src.reg;
-    Clobber(rl_src.reg.GetLowReg());
-    Clobber(rl_src.reg.GetHighReg());
+    Clobber(rl_src.reg);
   }
 
   // Dest is now live and dirty (until/if we flush it to home location).
-  MarkLive(rl_dest.reg.GetLow(), rl_dest.s_reg_low);
-
-  // Does this wide value live in two registers (or one vector one)?
-  // FIXME: wide reg.
-  if (rl_dest.reg.GetLowReg() != rl_dest.reg.GetHighReg()) {
-    MarkLive(rl_dest.reg.GetHigh(), GetSRegHi(rl_dest.s_reg_low));
-    MarkDirty(rl_dest);
-    MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg());
-  } else {
-    // This must be an x86 vector register value,
-    DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64));
-    MarkDirty(rl_dest);
-  }
+  MarkLive(rl_dest);
+  MarkWide(rl_dest.reg);
+  MarkDirty(rl_dest);
 
   ResetDefLocWide(rl_dest);
-  if (IsDirty(rl_dest.reg) && (oat_live_out(rl_dest.s_reg_low) ||
-      oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
+  if (IsDirty(rl_dest.reg) && (LiveOut(rl_dest.s_reg_low) ||
+      LiveOut(GetSRegHi(rl_dest.s_reg_low)))) {
     LIR *def_start = last_lir_insn_;
     DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
               mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
@@ -369,7 +339,7 @@
 RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
   DCHECK(!loc.wide);
   DCHECK(loc.location == kLocPhysReg);
-  DCHECK(!IsFpReg(loc.reg));
+  DCHECK(!loc.reg.IsFloat());
   if (IsTemp(loc.reg)) {
     Clobber(loc.reg);
   } else {
@@ -383,21 +353,20 @@
   return loc;
 }
 
-// FIXME: wide regs.
+// FIXME: will need an update for 64-bit core regs.
 RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
   DCHECK(loc.wide);
   DCHECK(loc.location == kLocPhysReg);
-  DCHECK(!IsFpReg(loc.reg.GetLowReg()));
-  DCHECK(!IsFpReg(loc.reg.GetHighReg()));
-  if (IsTemp(loc.reg.GetLowReg())) {
-    Clobber(loc.reg.GetLowReg());
+  DCHECK(!loc.reg.IsFloat());
+  if (IsTemp(loc.reg.GetLow())) {
+    Clobber(loc.reg.GetLow());
   } else {
     RegStorage temp_low = AllocTemp();
     OpRegCopy(temp_low, loc.reg.GetLow());
     loc.reg.SetLowReg(temp_low.GetReg());
   }
-  if (IsTemp(loc.reg.GetHighReg())) {
-    Clobber(loc.reg.GetHighReg());
+  if (IsTemp(loc.reg.GetHigh())) {
+    Clobber(loc.reg.GetHigh());
   } else {
     RegStorage temp_high = AllocTemp();
     OpRegCopy(temp_high, loc.reg.GetHigh());
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 4bdc9fa..4a918a1 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -163,7 +163,7 @@
           DCHECK(!(check_flags & IS_STORE));
           /* Same value && same register type */
           if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
-              SameRegType(check_lir->operands[0], native_reg_id)) {
+              RegStorage::SameRegType(check_lir->operands[0], native_reg_id)) {
             /*
              * Different destination register - insert
              * a move
@@ -179,7 +179,7 @@
           /* Must alias */
           if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
             /* Only optimize compatible registers */
-            bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
+            bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
             if ((is_this_lir_load && is_check_lir_load) ||
                 (!is_this_lir_load && is_check_lir_load)) {
               /* RAR or RAW */
diff --git a/compiler/dex/quick/mips/README.mips b/compiler/dex/quick/mips/README.mips
index 061c157..ff561fa 100644
--- a/compiler/dex/quick/mips/README.mips
+++ b/compiler/dex/quick/mips/README.mips
@@ -17,7 +17,7 @@
       code generation for switch tables, fill array data, 64-bit
       data handling and the register usage conventions.
 
-    o The memory model.  Verify that oatGenMemoryBarrier() generates the
+    o The memory model.  Verify that GenMemoryBarrier() generates the
       appropriate flavor of sync.
 
 Register promotion
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index a579254..baae319 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -672,16 +672,17 @@
           bits |= (value << encoder->field_loc[i].end);
           break;
         case kFmtDfp: {
-          DCHECK(MIPS_DOUBLEREG(operand));
+          // TODO: do we need to adjust now that we're using 64BitSolo?
+          DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
           DCHECK_EQ((operand & 0x1), 0U);
-          value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
               ((1 << (encoder->field_loc[i].end + 1)) - 1);
           bits |= value;
           break;
         }
         case kFmtSfp:
-          DCHECK(MIPS_SINGLEREG(operand));
-          value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+          DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
+          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
               ((1 << (encoder->field_loc[i].end + 1)) - 1);
           bits |= value;
           break;
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index df13882..3af3715 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -295,10 +295,10 @@
    * expanding the frame or flushing.  This leaves the utility
    * code with a single temp: r12.  This should be enough.
    */
-  LockTemp(rMIPS_ARG0);
-  LockTemp(rMIPS_ARG1);
-  LockTemp(rMIPS_ARG2);
-  LockTemp(rMIPS_ARG3);
+  LockTemp(rs_rMIPS_ARG0);
+  LockTemp(rs_rMIPS_ARG1);
+  LockTemp(rs_rMIPS_ARG2);
+  LockTemp(rs_rMIPS_ARG3);
 
   /*
    * We can safely skip the stack overflow check if we're
@@ -351,10 +351,10 @@
 
   FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(rMIPS_ARG0);
-  FreeTemp(rMIPS_ARG1);
-  FreeTemp(rMIPS_ARG2);
-  FreeTemp(rMIPS_ARG3);
+  FreeTemp(rs_rMIPS_ARG0);
+  FreeTemp(rs_rMIPS_ARG1);
+  FreeTemp(rs_rMIPS_ARG2);
+  FreeTemp(rs_rMIPS_ARG3);
 }
 
 void MipsMir2Lir::GenExitSequence() {
@@ -362,8 +362,8 @@
    * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(rMIPS_RET0);
-  LockTemp(rMIPS_RET1);
+  LockTemp(rs_rMIPS_RET0);
+  LockTemp(rs_rMIPS_RET1);
 
   NewLIR0(kPseudoMethodExit);
   UnSpillCoreRegs();
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 81d6782..7a8376e 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -39,7 +39,7 @@
     LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                          OpSize size);
     LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+                             RegStorage r_dest, OpSize size, int s_reg);
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
@@ -47,16 +47,12 @@
     LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                           OpSize size);
     LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+                              RegStorage r_src, OpSize size, int s_reg);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
-    bool IsFpReg(int reg);
-    bool IsFpReg(RegStorage reg);
-    bool SameRegType(int reg1, int reg2);
     RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
     RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
-    int S2d(int low_reg, int high_reg);
     RegStorage TargetReg(SpecialTargetRegister reg);
     RegStorage GetArgMappingToPhysicalReg(int arg_num);
     RegLocation GetReturnAlt();
@@ -65,16 +61,14 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint32_t FpRegMask();
-    uint64_t GetRegMaskCommon(int reg);
+    uint64_t GetRegMaskCommon(RegStorage reg);
     void AdjustSpillMask();
     void ClobberCallerSave();
-    void FlushReg(RegStorage reg);
-    void FlushRegWide(RegStorage reg);
     void FreeCallTemps();
     void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
     void LockCallTemps();
-    void MarkPreservedSingle(int v_reg, int reg);
+    void MarkPreservedSingle(int v_reg, RegStorage reg);
+    void MarkPreservedDouble(int v_reg, RegStorage reg);
     void CompilerInitializeRegAlloc();
 
     // Required for target - miscellaneous.
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index a479dc7..9fffb2f 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -111,15 +111,13 @@
   rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
-          S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
+  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
 void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
                                 RegLocation rl_src) {
   int op = kMipsNop;
-  int src_reg;
   RegLocation rl_result;
   switch (opcode) {
     case Instruction::INT_TO_FLOAT:
@@ -157,18 +155,14 @@
   }
   if (rl_src.wide) {
     rl_src = LoadValueWide(rl_src, kFPReg);
-    src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
   } else {
     rl_src = LoadValue(rl_src, kFPReg);
-    src_reg = rl_src.reg.GetReg();
   }
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   if (rl_dest.wide) {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
     StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
     StoreValue(rl_dest, rl_result);
   }
 }
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 7c0becd..1410e14 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -44,16 +44,16 @@
                              RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  int t0 = AllocTemp().GetReg();
-  int t1 = AllocTemp().GetReg();
+  RegStorage t0 = AllocTemp();
+  RegStorage t1 = AllocTemp();
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-  NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
-  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
+  NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+  NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
   LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
-  NewLIR3(kMipsSltu, t0, rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-  NewLIR3(kMipsSltu, t1, rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
-  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
+  NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+  NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
+  NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
   FreeTemp(t0);
   FreeTemp(t1);
   LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -114,13 +114,13 @@
   if (cmp_zero) {
     branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
   } else {
-    int t_reg = AllocTemp().GetReg();
+    RegStorage t_reg = AllocTemp();
     if (swapped) {
-      NewLIR3(slt_op, t_reg, src2.GetReg(), src1.GetReg());
+      NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
     } else {
-      NewLIR3(slt_op, t_reg, src1.GetReg(), src2.GetReg());
+      NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
     }
-    branch = NewLIR1(br_op, t_reg);
+    branch = NewLIR1(br_op, t_reg.GetReg());
     FreeTemp(t_reg);
   }
   branch->target = target;
@@ -167,7 +167,7 @@
   if (r_src.IsPair()) {
     r_src = r_src.GetLow();
   }
-  if (MIPS_FPREG(r_dest.GetReg()) || MIPS_FPREG(r_src.GetReg()))
+  if (r_dest.IsFloat() || r_src.IsFloat())
     return OpFpRegCopy(r_dest, r_src);
   LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
             r_dest.GetReg(), r_src.GetReg());
@@ -186,17 +186,15 @@
 
 void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
   if (r_dest != r_src) {
-    bool dest_fp = MIPS_FPREG(r_dest.GetLowReg());
-    bool src_fp = MIPS_FPREG(r_src.GetLowReg());
+    bool dest_fp = r_dest.IsFloat();
+    bool src_fp = r_src.IsFloat();
     if (dest_fp) {
       if (src_fp) {
-        // FIXME: handle this here - reserve OpRegCopy for 32-bit copies.
-        OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
-                  RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
-        } else {
-          /* note the operands are swapped for the mtc1 instr */
-          NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
-          NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
+        OpRegCopy(r_dest, r_src);
+      } else {
+         /* note the operands are swapped for the mtc1 instr */
+        NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
+        NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
       }
     } else {
       if (src_fp) {
@@ -238,9 +236,9 @@
 
 RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
                                        bool is_div) {
-  int t_reg = AllocTemp().GetReg();
-  NewLIR3(kMipsAddiu, t_reg, rZERO, lit);
-  NewLIR2(kMipsDiv, reg1.GetReg(), t_reg);
+  RegStorage t_reg = AllocTemp();
+  NewLIR3(kMipsAddiu, t_reg.GetReg(), rZERO, lit);
+  NewLIR2(kMipsDiv, reg1.GetReg(), t_reg.GetReg());
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_div) {
     NewLIR1(kMipsMflo, rl_result.reg.GetReg());
@@ -470,7 +468,7 @@
  */
 void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
   RegLocation rl_result;
@@ -496,7 +494,7 @@
   }
   /* reg_ptr -> array data */
   OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-  FreeTemp(rl_array.reg.GetReg());
+  FreeTemp(rl_array.reg);
   if ((size == k64) || (size == kDouble)) {
     if (scale) {
       RegStorage r_new_index = AllocTemp();
@@ -537,7 +535,7 @@
  */
 void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
 
@@ -551,8 +549,8 @@
   rl_index = LoadValue(rl_index, kCoreReg);
   RegStorage reg_ptr;
   bool allocated_reg_ptr_temp = false;
-  if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
-    Clobber(rl_array.reg.GetReg());
+  if (IsTemp(rl_array.reg) && !card_mark) {
+    Clobber(rl_array.reg);
     reg_ptr = rl_array.reg;
   } else {
     reg_ptr = AllocTemp();
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index c5150ee..5b2cb9d 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -86,26 +86,6 @@
  * +========================+
  */
 
-// Offset to distingish FP regs.
-#define MIPS_FP_REG_OFFSET 32
-// Offset to distinguish DP FP regs.
-#define MIPS_FP_DOUBLE 64
-// Reg types.
-#define MIPS_REGTYPE(x) (x & (MIPS_FP_REG_OFFSET | MIPS_FP_DOUBLE))
-#define MIPS_FPREG(x) ((x & MIPS_FP_REG_OFFSET) == MIPS_FP_REG_OFFSET)
-#define MIPS_DOUBLEREG(x) ((x & MIPS_FP_DOUBLE) == MIPS_FP_DOUBLE)
-#define MIPS_SINGLEREG(x) (MIPS_FPREG(x) && !MIPS_DOUBLEREG(x))
-// FIXME: out of date comment.
-/*
- * Note: the low register of a floating point pair is sufficient to
- * create the name of a double, but require both names to be passed to
- * allow for asserts to verify that the pair is consecutive if significant
- * rework is done in this area.  Also, it is a good reminder in the calling
- * code that reg locations always describe doubles as a pair of singles.
- */
-#define MIPS_S2D(x, y) ((x) | MIPS_FP_DOUBLE)
-// Mask to strip off fp flags.
-#define MIPS_FP_REG_MASK (MIPS_FP_REG_OFFSET-1)
 
 #define LOWORD_OFFSET 0
 #define HIWORD_OFFSET 4
@@ -159,135 +139,159 @@
 #define ENCODE_MIPS_REG_LO           (1ULL << kMipsRegLO)
 
 enum MipsNativeRegisterPool {
-  rZERO = 0,
-  rAT = 1,
-  rV0 = 2,
-  rV1 = 3,
-  rA0 = 4,
-  rA1 = 5,
-  rA2 = 6,
-  rA3 = 7,
-  rT0 = 8,
-  rT1 = 9,
-  rT2 = 10,
-  rT3 = 11,
-  rT4 = 12,
-  rT5 = 13,
-  rT6 = 14,
-  rT7 = 15,
-  rS0 = 16,
-  rS1 = 17,
-  rS2 = 18,
-  rS3 = 19,
-  rS4 = 20,
-  rS5 = 21,
-  rS6 = 22,
-  rS7 = 23,
-  rT8 = 24,
-  rT9 = 25,
-  rK0 = 26,
-  rK1 = 27,
-  rGP = 28,
-  rSP = 29,
-  rFP = 30,
-  rRA = 31,
+  rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
+  rAT   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
+  rV0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
+  rV1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
+  rA0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
+  rA1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
+  rA2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
+  rA3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
+  rT0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
+  rT1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
+  rT2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  rT3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  rT4   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  rT5   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  rT6   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  rT7   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  rS0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+  rS1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+  rS2   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+  rS3   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+  rS4   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+  rS5   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+  rS6   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+  rS7   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+  rT8   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+  rT9   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+  rK0   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+  rK1   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+  rGP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+  rSP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+  rFP   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+  rRA   = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
 
-  rF0 = 0 + MIPS_FP_REG_OFFSET,
-  rF1,
-  rF2,
-  rF3,
-  rF4,
-  rF5,
-  rF6,
-  rF7,
-  rF8,
-  rF9,
-  rF10,
-  rF11,
-  rF12,
-  rF13,
-  rF14,
-  rF15,
+  rF0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
+  rF1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
+  rF2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  2,
+  rF3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  3,
+  rF4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  4,
+  rF5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  5,
+  rF6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  6,
+  rF7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  7,
+  rF8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  8,
+  rF9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  9,
+  rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+  rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+  rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+  rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+  rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+  rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
 #if 0
   /*
    * TODO: The shared resource mask doesn't have enough bit positions to describe all
    * MIPS registers.  Expand it and enable use of fp registers 16 through 31.
    */
-  rF16,
-  rF17,
-  rF18,
-  rF19,
-  rF20,
-  rF21,
-  rF22,
-  rF23,
-  rF24,
-  rF25,
-  rF26,
-  rF27,
-  rF28,
-  rF29,
-  rF30,
-  rF31,
+  rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+  rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+  rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+  rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+  rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+  rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+  rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+  rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+  rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+  rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+  rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+  rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+  rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+  rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+  rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+  rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
 #endif
-  rDF0 = rF0 + MIPS_FP_DOUBLE,
-  rDF1 = rF2 + MIPS_FP_DOUBLE,
-  rDF2 = rF4 + MIPS_FP_DOUBLE,
-  rDF3 = rF6 + MIPS_FP_DOUBLE,
-  rDF4 = rF8 + MIPS_FP_DOUBLE,
-  rDF5 = rF10 + MIPS_FP_DOUBLE,
-  rDF6 = rF12 + MIPS_FP_DOUBLE,
-  rDF7 = rF14 + MIPS_FP_DOUBLE,
+  rD0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
+  rD1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
+  rD2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
+  rD3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
+  rD4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
+  rD5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
+  rD6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
+  rD7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
 #if 0  // TODO: expand resource mask to enable use of all MIPS fp registers.
-  rDF8 = rF16 + MIPS_FP_DOUBLE,
-  rDF9 = rF18 + MIPS_FP_DOUBLE,
-  rDF10 = rF20 + MIPS_FP_DOUBLE,
-  rDF11 = rF22 + MIPS_FP_DOUBLE,
-  rDF12 = rF24 + MIPS_FP_DOUBLE,
-  rDF13 = rF26 + MIPS_FP_DOUBLE,
-  rDF14 = rF28 + MIPS_FP_DOUBLE,
-  rDF15 = rF30 + MIPS_FP_DOUBLE,
+  rD8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
+  rD9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
+  rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+  rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+  rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+  rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+  rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+  rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
 #endif
 };
 
-const RegStorage rs_rZERO(RegStorage::k32BitSolo, rZERO);
-const RegStorage rs_rAT(RegStorage::k32BitSolo, rAT);
-const RegStorage rs_rV0(RegStorage::k32BitSolo, rV0);
-const RegStorage rs_rV1(RegStorage::k32BitSolo, rV1);
-const RegStorage rs_rA0(RegStorage::k32BitSolo, rA0);
-const RegStorage rs_rA1(RegStorage::k32BitSolo, rA1);
-const RegStorage rs_rA2(RegStorage::k32BitSolo, rA2);
-const RegStorage rs_rA3(RegStorage::k32BitSolo, rA3);
-const RegStorage rs_rT0(RegStorage::k32BitSolo, rT0);
-const RegStorage rs_rT1(RegStorage::k32BitSolo, rT1);
-const RegStorage rs_rT2(RegStorage::k32BitSolo, rT2);
-const RegStorage rs_rT3(RegStorage::k32BitSolo, rT3);
-const RegStorage rs_rT4(RegStorage::k32BitSolo, rT4);
-const RegStorage rs_rT5(RegStorage::k32BitSolo, rT5);
-const RegStorage rs_rT6(RegStorage::k32BitSolo, rT6);
-const RegStorage rs_rT7(RegStorage::k32BitSolo, rT7);
-const RegStorage rs_rS0(RegStorage::k32BitSolo, rS0);
-const RegStorage rs_rS1(RegStorage::k32BitSolo, rS1);
-const RegStorage rs_rS2(RegStorage::k32BitSolo, rS2);
-const RegStorage rs_rS3(RegStorage::k32BitSolo, rS3);
-const RegStorage rs_rS4(RegStorage::k32BitSolo, rS4);
-const RegStorage rs_rS5(RegStorage::k32BitSolo, rS5);
-const RegStorage rs_rS6(RegStorage::k32BitSolo, rS6);
-const RegStorage rs_rS7(RegStorage::k32BitSolo, rS7);
-const RegStorage rs_rT8(RegStorage::k32BitSolo, rT8);
-const RegStorage rs_rT9(RegStorage::k32BitSolo, rT9);
-const RegStorage rs_rK0(RegStorage::k32BitSolo, rK0);
-const RegStorage rs_rK1(RegStorage::k32BitSolo, rK1);
-const RegStorage rs_rGP(RegStorage::k32BitSolo, rGP);
-const RegStorage rs_rSP(RegStorage::k32BitSolo, rSP);
-const RegStorage rs_rFP(RegStorage::k32BitSolo, rFP);
-const RegStorage rs_rRA(RegStorage::k32BitSolo, rRA);
-const RegStorage rs_rF12(RegStorage::k32BitSolo, rF12);
-const RegStorage rs_rF13(RegStorage::k32BitSolo, rF13);
-const RegStorage rs_rF14(RegStorage::k32BitSolo, rF14);
-const RegStorage rs_rF15(RegStorage::k32BitSolo, rF15);
-const RegStorage rs_rF0(RegStorage::k32BitSolo, rF0);
-const RegStorage rs_rF1(RegStorage::k32BitSolo, rF1);
+constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
+constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
+constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
+constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
+constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
+constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
+constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
+constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
+constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
+constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
+constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
+constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
+constexpr RegStorage rs_rT4(RegStorage::kValid | rT4);
+constexpr RegStorage rs_rT5(RegStorage::kValid | rT5);
+constexpr RegStorage rs_rT6(RegStorage::kValid | rT6);
+constexpr RegStorage rs_rT7(RegStorage::kValid | rT7);
+constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
+constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
+constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
+constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
+constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
+constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
+constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
+constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
+constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
+constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
+constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
+constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
+constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
+constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
+constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
+constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
+
+constexpr RegStorage rs_rMIPS_LR(RegStorage::kInvalid);     // Not used for MIPS.
+constexpr RegStorage rs_rMIPS_PC(RegStorage::kInvalid);     // Not used for MIPS.
+constexpr RegStorage rs_rMIPS_COUNT(RegStorage::kInvalid);  // Not used for MIPS.
+
+constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
+constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
+constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
+constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
+constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
+constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
+constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
+constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
+constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
+constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
+constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
+constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
+constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
+constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
+constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
+constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
 
 // TODO: reduce/eliminate use of these.
 #define rMIPS_SUSPEND rS0
@@ -311,9 +315,9 @@
 #define rMIPS_FARG2 rFARG2
 #define rs_rMIPS_FARG2 rs_rFARG2
 #define rMIPS_FARG3 rFARG3
-#define rs_MIPS_FARG3 rs_rFARG3
+#define rs_rMIPS_FARG3 rs_rFARG3
 #define rMIPS_RET0 rRESULT0
-#define rs_MIPS_RET0 rs_rRESULT0
+#define rs_rMIPS_RET0 rs_rRESULT0
 #define rMIPS_RET1 rRESULT1
 #define rs_rMIPS_RET1 rs_rRESULT1
 #define rMIPS_INVOKE_TGT rT9
@@ -322,16 +326,17 @@
 
 // RegisterLocation templates return values (r_V0, or r_V0/r_V1).
 const RegLocation mips_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
 const RegLocation mips_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
 const RegLocation mips_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+// FIXME: move MIPS to k64Bitsolo for doubles
 const RegLocation mips_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
 
 enum MipsShiftEncodings {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 7f4cd5e..8d91aba 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -26,18 +26,40 @@
 
 namespace art {
 
-static int core_regs[] = {rZERO, rAT, rV0, rV1, rA0, rA1, rA2, rA3,
-                          rT0, rT1, rT2, rT3, rT4, rT5, rT6, rT7,
-                          rS0, rS1, rS2, rS3, rS4, rS5, rS6, rS7, rT8,
-                          rT9, rK0, rK1, rGP, rSP, rFP, rRA};
-static int ReservedRegs[] = {rZERO, rAT, rS0, rS1, rK0, rK1, rGP, rSP,
-                             rRA};
-static int core_temps[] = {rV0, rV1, rA0, rA1, rA2, rA3, rT0, rT1, rT2,
-                           rT3, rT4, rT5, rT6, rT7, rT8};
-static int FpRegs[] = {rF0, rF1, rF2, rF3, rF4, rF5, rF6, rF7,
-                       rF8, rF9, rF10, rF11, rF12, rF13, rF14, rF15};
-static int fp_temps[] = {rF0, rF1, rF2, rF3, rF4, rF5, rF6, rF7,
-                         rF8, rF9, rF10, rF11, rF12, rF13, rF14, rF15};
+static const RegStorage core_regs_arr[] =
+    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2,
+     rs_rT3, rs_rT4, rs_rT5, rs_rT6, rs_rT7, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
+     rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static RegStorage sp_regs_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
+static RegStorage dp_regs_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+static const RegStorage reserved_regs_arr[] =
+    {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static RegStorage core_temps_arr[] =
+    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rT4,
+     rs_rT5, rs_rT6, rs_rT7, rs_rT8};
+static RegStorage sp_temps_arr[] =
+    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
+static RegStorage dp_temps_arr[] =
+    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+
+static const std::vector<RegStorage> core_regs(core_regs_arr,
+    core_regs_arr + sizeof(core_regs_arr) / sizeof(core_regs_arr[0]));
+static const std::vector<RegStorage> sp_regs(sp_regs_arr,
+    sp_regs_arr + sizeof(sp_regs_arr) / sizeof(sp_regs_arr[0]));
+static const std::vector<RegStorage> dp_regs(dp_regs_arr,
+    dp_regs_arr + sizeof(dp_regs_arr) / sizeof(dp_regs_arr[0]));
+static const std::vector<RegStorage> reserved_regs(reserved_regs_arr,
+    reserved_regs_arr + sizeof(reserved_regs_arr) / sizeof(reserved_regs_arr[0]));
+static const std::vector<RegStorage> core_temps(core_temps_arr,
+    core_temps_arr + sizeof(core_temps_arr) / sizeof(core_temps_arr[0]));
+static const std::vector<RegStorage> sp_temps(sp_temps_arr,
+    sp_temps_arr + sizeof(sp_temps_arr) / sizeof(sp_temps_arr[0]));
+static const std::vector<RegStorage> dp_temps(dp_temps_arr,
+    dp_temps_arr + sizeof(dp_temps_arr) / sizeof(dp_temps_arr[0]));
 
 RegLocation MipsMir2Lir::LocCReturn() {
   return mips_loc_c_return;
@@ -57,29 +79,29 @@
 
 // Return a target-dependent special register.
 RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
-  int res_reg = RegStorage::kInvalidRegVal;
+  RegStorage res_reg;
   switch (reg) {
-    case kSelf: res_reg = rMIPS_SELF; break;
-    case kSuspend: res_reg =  rMIPS_SUSPEND; break;
-    case kLr: res_reg =  rMIPS_LR; break;
-    case kPc: res_reg =  rMIPS_PC; break;
-    case kSp: res_reg =  rMIPS_SP; break;
-    case kArg0: res_reg = rMIPS_ARG0; break;
-    case kArg1: res_reg = rMIPS_ARG1; break;
-    case kArg2: res_reg = rMIPS_ARG2; break;
-    case kArg3: res_reg = rMIPS_ARG3; break;
-    case kFArg0: res_reg = rMIPS_FARG0; break;
-    case kFArg1: res_reg = rMIPS_FARG1; break;
-    case kFArg2: res_reg = rMIPS_FARG2; break;
-    case kFArg3: res_reg = rMIPS_FARG3; break;
-    case kRet0: res_reg = rMIPS_RET0; break;
-    case kRet1: res_reg = rMIPS_RET1; break;
-    case kInvokeTgt: res_reg = rMIPS_INVOKE_TGT; break;
-    case kHiddenArg: res_reg = rT0; break;
-    case kHiddenFpArg: res_reg = RegStorage::kInvalidRegVal; break;
-    case kCount: res_reg = rMIPS_COUNT; break;
+    case kSelf: res_reg = rs_rMIPS_SELF; break;
+    case kSuspend: res_reg =  rs_rMIPS_SUSPEND; break;
+    case kLr: res_reg =  rs_rMIPS_LR; break;
+    case kPc: res_reg =  rs_rMIPS_PC; break;
+    case kSp: res_reg =  rs_rMIPS_SP; break;
+    case kArg0: res_reg = rs_rMIPS_ARG0; break;
+    case kArg1: res_reg = rs_rMIPS_ARG1; break;
+    case kArg2: res_reg = rs_rMIPS_ARG2; break;
+    case kArg3: res_reg = rs_rMIPS_ARG3; break;
+    case kFArg0: res_reg = rs_rMIPS_FARG0; break;
+    case kFArg1: res_reg = rs_rMIPS_FARG1; break;
+    case kFArg2: res_reg = rs_rMIPS_FARG2; break;
+    case kFArg3: res_reg = rs_rMIPS_FARG3; break;
+    case kRet0: res_reg = rs_rMIPS_RET0; break;
+    case kRet1: res_reg = rs_rMIPS_RET1; break;
+    case kInvokeTgt: res_reg = rs_rMIPS_INVOKE_TGT; break;
+    case kHiddenArg: res_reg = rs_rT0; break;
+    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
+    case kCount: res_reg = rs_rMIPS_COUNT; break;
   }
-  return RegStorage::Solo32(res_reg);
+  return res_reg;
 }
 
 RegStorage MipsMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
@@ -96,35 +118,22 @@
   }
 }
 
-// Create a double from a pair of singles.
-int MipsMir2Lir::S2d(int low_reg, int high_reg) {
-  return MIPS_S2D(low_reg, high_reg);
-}
-
-// Return mask to strip off fp reg flags and bias.
-uint32_t MipsMir2Lir::FpRegMask() {
-  return MIPS_FP_REG_MASK;
-}
-
-// True if both regs single, both core or both double.
-bool MipsMir2Lir::SameRegType(int reg1, int reg2) {
-  return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2));
-}
-
 /*
  * Decode the register id.
  */
-uint64_t MipsMir2Lir::GetRegMaskCommon(int reg) {
+uint64_t MipsMir2Lir::GetRegMaskCommon(RegStorage reg) {
   uint64_t seed;
   int shift;
-  int reg_id;
-
-
-  reg_id = reg & 0x1f;
+  int reg_id = reg.GetRegNum();
   /* Each double register is equal to a pair of single-precision FP registers */
-  seed = MIPS_DOUBLEREG(reg) ? 3 : 1;
-  /* FP register starts at bit position 16 */
-  shift = MIPS_FPREG(reg) ? kMipsFPReg0 : 0;
+  if (reg.IsDouble()) {
+    seed = 0x3;
+    reg_id = reg_id << 1;
+  } else {
+    seed = 1;
+  }
+  /* FP register starts at bit position 32 */
+  shift = reg.IsFloat() ? kMipsFPReg0 : 0;
   /* Expand the double register id into single offset */
   shift += reg_id;
   return (seed << shift);
@@ -209,11 +218,11 @@
              }
              break;
            case 's':
-             snprintf(tbuf, arraysize(tbuf), "$f%d", operand & MIPS_FP_REG_MASK);
+             snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
              break;
            case 'S':
-             DCHECK_EQ(((operand & MIPS_FP_REG_MASK) & 1), 0);
-             snprintf(tbuf, arraysize(tbuf), "$f%d", operand & MIPS_FP_REG_MASK);
+             DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
+             snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
              break;
            case 'h':
              snprintf(tbuf, arraysize(tbuf), "%04x", operand);
@@ -327,7 +336,7 @@
  */
 
 void MipsMir2Lir::AdjustSpillMask() {
-  core_spill_mask_ |= (1 << rRA);
+  core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
   num_core_spills_++;
 }
 
@@ -337,92 +346,63 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg) {
+void MipsMir2Lir::MarkPreservedSingle(int s_reg, RegStorage reg) {
   LOG(FATAL) << "No support yet for promoted FP regs";
 }
 
-void MipsMir2Lir::FlushRegWide(RegStorage reg) {
-  RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
-  RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
-  DCHECK(info1 && info2 && info1->pair && info2->pair &&
-         (info1->partner == info2->reg) &&
-         (info2->partner == info1->reg));
-  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
-    if (!(info1->is_temp && info2->is_temp)) {
-      /* Should not happen.  If it does, there's a problem in eval_loc */
-      LOG(FATAL) << "Long half-temp, half-promoted";
-    }
-
-    info1->dirty = false;
-    info2->dirty = false;
-    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
-      info1 = info2;
-    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(rs_rMIPS_SP, VRegOffset(v_reg),
-                      RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
-  }
-}
-
-void MipsMir2Lir::FlushReg(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg.GetReg());
-  if (info->live && info->dirty) {
-    info->dirty = false;
-    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
-    Store32Disp(rs_rMIPS_SP, VRegOffset(v_reg), reg);
-  }
-}
-
-/* Give access to the target-dependent FP register encoding to common code */
-bool MipsMir2Lir::IsFpReg(int reg) {
-  return MIPS_FPREG(reg);
-}
-
-bool MipsMir2Lir::IsFpReg(RegStorage reg) {
-  return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+void MipsMir2Lir::MarkPreservedDouble(int s_reg, RegStorage reg) {
+  LOG(FATAL) << "No support yet for promoted FP regs";
 }
 
 /* Clobber all regs that might be used by an external C call */
 void MipsMir2Lir::ClobberCallerSave() {
-  Clobber(rZERO);
-  Clobber(rAT);
-  Clobber(rV0);
-  Clobber(rV1);
-  Clobber(rA0);
-  Clobber(rA1);
-  Clobber(rA2);
-  Clobber(rA3);
-  Clobber(rT0);
-  Clobber(rT1);
-  Clobber(rT2);
-  Clobber(rT3);
-  Clobber(rT4);
-  Clobber(rT5);
-  Clobber(rT6);
-  Clobber(rT7);
-  Clobber(rT8);
-  Clobber(rT9);
-  Clobber(rK0);
-  Clobber(rK1);
-  Clobber(rGP);
-  Clobber(rFP);
-  Clobber(rRA);
-  Clobber(rF0);
-  Clobber(rF1);
-  Clobber(rF2);
-  Clobber(rF3);
-  Clobber(rF4);
-  Clobber(rF5);
-  Clobber(rF6);
-  Clobber(rF7);
-  Clobber(rF8);
-  Clobber(rF9);
-  Clobber(rF10);
-  Clobber(rF11);
-  Clobber(rF12);
-  Clobber(rF13);
-  Clobber(rF14);
-  Clobber(rF15);
+  Clobber(rs_rZERO);
+  Clobber(rs_rAT);
+  Clobber(rs_rV0);
+  Clobber(rs_rV1);
+  Clobber(rs_rA0);
+  Clobber(rs_rA1);
+  Clobber(rs_rA2);
+  Clobber(rs_rA3);
+  Clobber(rs_rT0);
+  Clobber(rs_rT1);
+  Clobber(rs_rT2);
+  Clobber(rs_rT3);
+  Clobber(rs_rT4);
+  Clobber(rs_rT5);
+  Clobber(rs_rT6);
+  Clobber(rs_rT7);
+  Clobber(rs_rT8);
+  Clobber(rs_rT9);
+  Clobber(rs_rK0);
+  Clobber(rs_rK1);
+  Clobber(rs_rGP);
+  Clobber(rs_rFP);
+  Clobber(rs_rRA);
+  Clobber(rs_rF0);
+  Clobber(rs_rF1);
+  Clobber(rs_rF2);
+  Clobber(rs_rF3);
+  Clobber(rs_rF4);
+  Clobber(rs_rF5);
+  Clobber(rs_rF6);
+  Clobber(rs_rF7);
+  Clobber(rs_rF8);
+  Clobber(rs_rF9);
+  Clobber(rs_rF10);
+  Clobber(rs_rF11);
+  Clobber(rs_rF12);
+  Clobber(rs_rF13);
+  Clobber(rs_rF14);
+  Clobber(rs_rF15);
+  Clobber(rs_rD0);
+  Clobber(rs_rD1);
+  Clobber(rs_rD2);
+  Clobber(rs_rD3);
+  Clobber(rs_rD4);
+  Clobber(rs_rD5);
+  Clobber(rs_rD6);
+  Clobber(rs_rD7);
 }
 
 RegLocation MipsMir2Lir::GetReturnWideAlt() {
@@ -439,18 +419,18 @@
 
 /* To be used when explicitly managing register use */
 void MipsMir2Lir::LockCallTemps() {
-  LockTemp(rMIPS_ARG0);
-  LockTemp(rMIPS_ARG1);
-  LockTemp(rMIPS_ARG2);
-  LockTemp(rMIPS_ARG3);
+  LockTemp(rs_rMIPS_ARG0);
+  LockTemp(rs_rMIPS_ARG1);
+  LockTemp(rs_rMIPS_ARG2);
+  LockTemp(rs_rMIPS_ARG3);
 }
 
 /* To be used when explicitly managing register use */
 void MipsMir2Lir::FreeCallTemps() {
-  FreeTemp(rMIPS_ARG0);
-  FreeTemp(rMIPS_ARG1);
-  FreeTemp(rMIPS_ARG2);
-  FreeTemp(rMIPS_ARG3);
+  FreeTemp(rs_rMIPS_ARG0);
+  FreeTemp(rs_rMIPS_ARG1);
+  FreeTemp(rs_rMIPS_ARG2);
+  FreeTemp(rs_rMIPS_ARG3);
 }
 
 void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
@@ -461,56 +441,52 @@
 
 // Alloc a pair of core registers, or a double.
 RegStorage MipsMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
-  int high_reg;
-  int low_reg;
-
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
     return AllocTempDouble();
   }
 
-  low_reg = AllocTemp().GetReg();
-  high_reg = AllocTemp().GetReg();
-  return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+  RegStorage low_reg = AllocTemp();
+  RegStorage high_reg = AllocTemp();
+  return RegStorage::MakeRegPair(low_reg, high_reg);
 }
 
 RegStorage MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    return AllocTempFloat();
-}
+    return AllocTempSingle();
+  }
   return AllocTemp();
 }
 
 void MipsMir2Lir::CompilerInitializeRegAlloc() {
-  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
-  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
-  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
-  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
-  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
-                                                       kArenaAllocRegAlloc));
-  reg_pool_->num_core_regs = num_regs;
-  reg_pool_->core_regs = static_cast<RegisterInfo*>
-     (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), kArenaAllocRegAlloc));
-  reg_pool_->num_fp_regs = num_fp_regs;
-  reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), kArenaAllocRegAlloc));
-  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
-  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
-  // Keep special registers from being allocated
-  for (int i = 0; i < num_reserved; i++) {
-    if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
-      // To measure cost of suspend check
-      continue;
+  reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, sp_regs, dp_regs, reserved_regs,
+                                        core_temps, sp_temps, dp_temps);
+
+  // Target-specific adjustments.
+
+  // Alias single precision floats to appropriate half of overlapping double.
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    int sp_reg_num = info->GetReg().GetRegNum();
+    int dp_reg_num = sp_reg_num >> 1;
+    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+    // Double precision register's master storage should refer to itself.
+    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+    // Redirect single precision's master storage to master.
+    info->SetMaster(dp_reg_info);
+    // Singles should show a single 32-bit mask bit, at first referring to the low half.
+    DCHECK_EQ(info->StorageMask(), 0x1U);
+    if (sp_reg_num & 1) {
+      // For odd singles, change to user the high word of the backing double.
+      info->SetStorageMask(0x2);
     }
-    MarkInUse(ReservedRegs[i]);
   }
-  // Mark temp regs - all others not in use can be used for promotion
-  for (int i = 0; i < num_temps; i++) {
-    MarkTemp(core_temps[i]);
-  }
-  for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(fp_temps[i]);
-  }
+
+  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+  // TODO: adjust when we roll to hard float calling convention.
+  reg_pool_->next_core_reg_ = 2;
+  reg_pool_->next_sp_reg_ = 2;
+  reg_pool_->next_dp_reg_ = 1;
 }
 
 void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index a865430..50b945a 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -24,12 +24,12 @@
 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
-  DCHECK_EQ(MIPS_DOUBLEREG(r_dest.GetReg()), MIPS_DOUBLEREG(r_src.GetReg()));
-  if (MIPS_DOUBLEREG(r_dest.GetReg())) {
+  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+  if (r_dest.IsDouble()) {
     opcode = kMipsFmovd;
   } else {
-    if (MIPS_SINGLEREG(r_dest.GetReg())) {
-      if (MIPS_SINGLEREG(r_src.GetReg())) {
+    if (r_dest.IsSingle()) {
+      if (r_src.IsSingle()) {
         opcode = kMipsFmovs;
       } else {
         /* note the operands are swapped for the mtc1 instr */
@@ -39,7 +39,7 @@
         opcode = kMipsMtc1;
       }
     } else {
-      DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsSingle());
       opcode = kMipsMfc1;
     }
   }
@@ -79,9 +79,9 @@
   LIR *res;
 
   RegStorage r_dest_save = r_dest;
-  int is_fp_reg = MIPS_FPREG(r_dest.GetReg());
+  int is_fp_reg = r_dest.IsFloat();
   if (is_fp_reg) {
-    DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
+    DCHECK(r_dest.IsSingle());
     r_dest = AllocTemp();
   }
 
@@ -355,8 +355,8 @@
   MipsOpCode opcode = kMipsNop;
   RegStorage t_reg = AllocTemp();
 
-  if (MIPS_FPREG(r_dest.GetReg())) {
-    DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
+  if (r_dest.IsFloat()) {
+    DCHECK(r_dest.IsSingle());
     DCHECK((size == k32) || (size == kSingle) || (size == kReference));
     size = kSingle;
   } else {
@@ -407,8 +407,8 @@
   MipsOpCode opcode = kMipsNop;
   RegStorage t_reg = AllocTemp();
 
-  if (MIPS_FPREG(r_src.GetReg())) {
-    DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
+  if (r_src.IsFloat()) {
+    DCHECK(r_src.IsSingle());
     DCHECK((size == k32) || (size == kSingle) || (size == kReference));
     size = kSingle;
   } else {
@@ -469,16 +469,16 @@
     case kDouble:
       pair = true;
       opcode = kMipsLw;
-      if (MIPS_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = kMipsFlwc1;
-        if (MIPS_DOUBLEREG(r_dest.GetReg())) {
-          // TODO: rework to use k64BitSolo
-          r_dest.SetReg(r_dest.GetReg() - MIPS_FP_DOUBLE);
+        if (r_dest.IsDouble()) {
+          int reg_num = (r_dest.GetRegNum() << 1) | RegStorage::kFloatingPoint;
+          r_dest = RegStorage(RegStorage::k64BitSolo, reg_num, reg_num + 1);
         } else {
-          DCHECK(MIPS_FPREG(r_dest_hi.GetReg()));
+          DCHECK(r_dest_hi.IsFloat());
           DCHECK_EQ(r_dest.GetReg(), r_dest_hi.GetReg() - 1);
+          r_dest_hi.SetReg(r_dest.GetReg() + 1);
         }
-        r_dest_hi.SetReg(r_dest.GetReg() + 1);
       }
       short_form = IS_SIMM16_2WORD(displacement);
       DCHECK_EQ((displacement & 0x3), 0);
@@ -487,9 +487,9 @@
     case kSingle:
     case kReference:
       opcode = kMipsLw;
-      if (MIPS_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = kMipsFlwc1;
-        DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
+        DCHECK(r_dest.IsSingle());
       }
       DCHECK_EQ((displacement & 0x3), 0);
       break;
@@ -567,22 +567,22 @@
   LIR *store2 = NULL;
   MipsOpCode opcode = kMipsNop;
   bool short_form = IS_SIMM16(displacement);
-  bool pair = false;
+  bool pair = r_src.IsPair();
 
   switch (size) {
     case k64:
     case kDouble:
-      pair = true;
       opcode = kMipsSw;
-      if (MIPS_FPREG(r_src.GetReg())) {
+      if (r_src.IsFloat()) {
         opcode = kMipsFswc1;
-        if (MIPS_DOUBLEREG(r_src.GetReg())) {
-          r_src.SetReg(r_src.GetReg() - MIPS_FP_DOUBLE);
+        if (r_src.IsDouble()) {
+          int reg_num = (r_src.GetRegNum() << 1) | RegStorage::kFloatingPoint;
+          r_src = RegStorage(RegStorage::k64BitPair, reg_num, reg_num + 1);
         } else {
-          DCHECK(MIPS_FPREG(r_src_hi.GetReg()));
+          DCHECK(r_src_hi.IsFloat());
           DCHECK_EQ(r_src.GetReg(), (r_src_hi.GetReg() - 1));
+          r_src_hi.SetReg(r_src.GetReg() + 1);
         }
-        r_src_hi.SetReg(r_src.GetReg() + 1);
       }
       short_form = IS_SIMM16_2WORD(displacement);
       DCHECK_EQ((displacement & 0x3), 0);
@@ -591,9 +591,9 @@
     case kSingle:
     case kReference:
       opcode = kMipsSw;
-      if (MIPS_FPREG(r_src.GetReg())) {
+      if (r_src.IsFloat()) {
         opcode = kMipsFswc1;
-        DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
+        DCHECK(r_src.IsSingle());
       }
       DCHECK_EQ((displacement & 0x3), 0);
       break;
@@ -665,8 +665,7 @@
 }
 
 LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                       int displacement, RegStorage r_src, RegStorage r_src_hi,
-                                       OpSize size, int s_reg) {
+                                       int displacement, RegStorage r_src, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
   return NULL;
 }
@@ -677,8 +676,7 @@
 }
 
 LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_dest, RegStorage r_dest_hi,
-                                      OpSize size, int s_reg) {
+                                      int displacement, RegStorage r_dest, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
   return NULL;
 }
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index b2362fc..f5d71c4 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -25,20 +25,21 @@
 
 /* Mark a temp register as dead.  Does not affect allocation state. */
 inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
-  if (p->is_temp) {
-    DCHECK(!(p->live && p->dirty))  << "Live & dirty temp in clobber";
-    p->live = false;
-    p->s_reg = INVALID_SREG;
-    p->def_start = NULL;
-    p->def_end = NULL;
-    if (p->pair) {
-      p->pair = false;
-      p = GetRegInfo(p->partner);
-      p->pair = false;
-      p->live = false;
-      p->s_reg = INVALID_SREG;
-      p->def_start = NULL;
-      p->def_end = NULL;
+  if (p->IsTemp()) {
+    DCHECK(!(p->IsLive() && p->IsDirty()))  << "Live & dirty temp in clobber";
+    p->SetIsLive(false);
+    p->SetSReg(INVALID_SREG);
+    p->ResetDefBody();
+    if (p->IsWide()) {
+      p->SetIsWide(false);
+      if (p->GetReg() != p->Partner()) {
+        // Register pair - deal with the other half.
+        p = GetRegInfo(p->Partner());
+        p->SetIsWide(false);
+        p->SetIsLive(false);
+        p->SetSReg(INVALID_SREG);
+        p->ResetDefBody();
+      }
     }
   }
 }
@@ -143,7 +144,9 @@
  * Mark the corresponding bit(s).
  */
 inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) {
-  *mask |= GetRegMaskCommon(reg);
+  DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
+  DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
+  *mask |= reginfo_map_.Get(reg)->DefUseMask();
 }
 
 /*
@@ -228,9 +231,11 @@
   SetupTargetResourceMasks(lir, flags);
 }
 
-inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(int reg) {
-  DCHECK(reginfo_map_.Get(reg) != NULL);
-  return reginfo_map_.Get(reg);
+inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(RegStorage reg) {
+  RegisterInfo* res = reg.IsPair() ? reginfo_map_.Get(reg.GetLowReg()) :
+      reginfo_map_.Get(reg.GetReg());
+  DCHECK(res != nullptr);
+  return res;
 }
 
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 107987e..c9e1950 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -991,8 +991,7 @@
     if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
       ClobberAllRegs();
       // Reset temp allocation to minimize differences when A/B testing.
-      reg_pool_->next_core_reg = 0;
-      reg_pool_->next_fp_reg = 0;
+      reg_pool_->ResetNextTemp();
     }
 
     if (cu_->disable_opt & (1 << kSuppressLoads)) {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9283a29..cb4396f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -241,32 +241,151 @@
     };
 
     /*
-     * Data structure tracking the mapping between a Dalvik register (pair) and a
-     * native register (pair). The idea is to reuse the previously loaded value
-     * if possible, otherwise to keep the value in a native register as long as
-     * possible.
+     * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits)
+     * and native register storage.  The primary purpose is to reuse previuosly
+     * loaded values, if possible, and otherwise to keep the value in register
+     * storage as long as possible.
+     *
+     * NOTE 1: wide_value refers to the width of the Dalvik value contained in
+     * this register (or pair).  For example, a 64-bit register containing a 32-bit
+     * Dalvik value would have wide_value==false even though the storage container itself
+     * is wide.  Similarly, a 32-bit register containing half of a 64-bit Dalvik value
+     * would have wide_value==true (and additionally would have its partner field set to the
+     * other half whose wide_value field would also be true.
+     *
+     * NOTE 2: In the case of a register pair, you can determine which of the partners
+     * is the low half by looking at the s_reg names.  The high s_reg will equal low_sreg + 1.
+     *
+     * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value
+     * will be true and partner==self.  s_reg refers to the low-order word of the Dalvik
+     * value, and the s_reg of the high word is implied (s_reg + 1).
+     *
+     * NOTE 4: The reg and is_temp fields should always be correct.  If is_temp is false no
+     * other fields have meaning. [perhaps not true, wide should work for promoted regs?]
+     * If is_temp==true and live==false, no other fields have
+     * meaning.  If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start
+     * and def_end describe the relationship between the temp register/register pair and
+     * the Dalvik value[s] described by s_reg/s_reg+1.
+     *
+     * The fields used_storage, master_storage and storage_mask are used to track allocation
+     * in light of potential aliasing.  For example, consider Arm's d2, which overlaps s4 & s5.
+     * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of
+     * storage use.  For s4, it would be 0x0000001; for s5 0x00000002.  These values should not
+     * change once initialized.  The "used_storage" field tracks current allocation status.
+     * Although each record contains this field, only the field from the largest member of
+     * an aliased group is used.  In our case, it would be d2's.  The master_storage pointer
+     * of d2, s4 and s5 would all point to d2's used_storage field.  Each bit in a used_storage
+     * represents 32 bits of storage.  d2's used_storage would be initialized to 0xfffffffc.
+     * Then, if we wanted to determine whether s4 could be allocated, we would "and"
+     * s4's storage_mask with s4's *master_storage.  If the result is zero, s4 is free and
+     * to allocate: *master_storage |= storage_mask.  To free, *master_storage &= ~storage_mask.
+     *
+     * For an X86 vector register example, storage_mask would be:
+     *    0x00000001 for 32-bit view of xmm1
+     *    0x00000003 for 64-bit view of xmm1
+     *    0x0000000f for 128-bit view of xmm1
+     *    0x000000ff for 256-bit view of ymm1   // future expansion, if needed
+     *    0x0000ffff for 512-bit view of ymm1   // future expansion, if needed
+     *    0xffffffff for 1024-bit view of ymm1  // future expansion, if needed
+     *
+     * NOTE: the x86 usage is still somewhat in flux.  There are competing notions of how
+     * to treat xmm registers:
+     *     1. Treat them all as 128-bits wide, but denote how much data used via bytes field.
+     *         o This more closely matches reality, but means you'd need to be able to get
+     *           to the associated RegisterInfo struct to figure out how it's being used.
+     *         o This is how 64-bit core registers will be used - always 64 bits, but the
+     *           "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage.
+     *     2. View the xmm registers based on contents.
+     *         o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would
+     *           be a k64BitVector.
+     *         o Note that the two uses above would be considered distinct registers (but with
+     *           the aliasing mechanism, we could detect interference).
+     *         o This is how aliased double and single float registers will be handled on
+     *           Arm and MIPS.
+     * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
+     * mechanism 2 for aliased float registers and x86 vector registers.
      */
-    struct RegisterInfo {
-      int reg;                    // Reg number
-      bool in_use;                // Has it been allocated?
-      bool is_temp;               // Can allocate as temp?
-      bool pair;                  // Part of a register pair?
-      int partner;                // If pair, other reg of pair.
-      bool live;                  // Is there an associated SSA name?
-      bool dirty;                 // If live, is it dirty?
-      int s_reg;                  // Name of live value.
-      LIR *def_start;             // Starting inst in last def sequence.
-      LIR *def_end;               // Ending inst in last def sequence.
+    class RegisterInfo {
+     public:
+      RegisterInfo(RegStorage r, uint64_t mask = ENCODE_ALL);
+      ~RegisterInfo() {}
+      static void* operator new(size_t size, ArenaAllocator* arena) {
+        return arena->Alloc(size, kArenaAllocRegAlloc);
+      }
+
+      bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; }
+      void MarkInUse() { master_->used_storage_ |= storage_mask_; }
+      void MarkFree() { master_->used_storage_ &= ~storage_mask_; }
+      RegStorage GetReg() { return reg_; }
+      void SetReg(RegStorage reg) { reg_ = reg; }
+      bool IsTemp() { return is_temp_; }
+      void SetIsTemp(bool val) { is_temp_ = val; }
+      bool IsWide() { return wide_value_; }
+      void SetIsWide(bool val) { wide_value_ = val; }
+      bool IsLive() { return live_; }
+      void SetIsLive(bool val) { live_ = val; }
+      bool IsDirty() { return dirty_; }
+      void SetIsDirty(bool val) { dirty_ = val; }
+      RegStorage Partner() { return partner_; }
+      void SetPartner(RegStorage partner) { partner_ = partner; }
+      int SReg() { return s_reg_; }
+      void SetSReg(int s_reg) { s_reg_ = s_reg; }
+      uint64_t DefUseMask() { return def_use_mask_; }
+      void SetDefUseMask(uint64_t def_use_mask) { def_use_mask_ = def_use_mask; }
+      RegisterInfo* Master() { return master_; }
+      void SetMaster(RegisterInfo* master) { master_ = master; }
+      uint32_t StorageMask() { return storage_mask_; }
+      void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; }
+      LIR* DefStart() { return def_start_; }
+      void SetDefStart(LIR* def_start) { def_start_ = def_start; }
+      LIR* DefEnd() { return def_end_; }
+      void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
+      void ResetDefBody() { def_start_ = def_end_ = nullptr; }
+
+
+     private:
+      RegStorage reg_;
+      bool is_temp_;               // Can allocate as temp?
+      bool wide_value_;            // Holds a Dalvik wide value (either itself, or part of a pair).
+      bool live_;                  // Is there an associated SSA name?
+      bool dirty_;                 // If live, is it dirty?
+      RegStorage partner_;         // If wide_value, other reg of pair or self if 64-bit register.
+      int s_reg_;                  // Name of live value.
+      uint64_t def_use_mask_;      // Resources for this element.
+      uint32_t used_storage_;      // 1 bit per 4 bytes of storage. Unused by aliases.
+      RegisterInfo* master_;       // Pointer to controlling storage mask.
+      uint32_t storage_mask_;      // Track allocation of sub-units.
+      LIR *def_start_;             // Starting inst in last def sequence.
+      LIR *def_end_;               // Ending inst in last def sequence.
     };
 
-    struct RegisterPool {
-       int num_core_regs;
-       RegisterInfo *core_regs;
-       int next_core_reg;
-       int num_fp_regs;
-       RegisterInfo *FPRegs;
-       int next_fp_reg;
-     };
+    class RegisterPool {
+     public:
+      RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, const std::vector<RegStorage>& core_regs,
+                   const std::vector<RegStorage>& sp_regs, const std::vector<RegStorage>& dp_regs,
+                   const std::vector<RegStorage>& reserved_regs,
+                   const std::vector<RegStorage>& core_temps,
+                   const std::vector<RegStorage>& sp_temps,
+                   const std::vector<RegStorage>& dp_temps);
+      ~RegisterPool() {}
+      static void* operator new(size_t size, ArenaAllocator* arena) {
+        return arena->Alloc(size, kArenaAllocRegAlloc);
+      }
+      void ResetNextTemp() {
+        next_core_reg_ = 0;
+        next_sp_reg_ = 0;
+        next_dp_reg_ = 0;
+      }
+      GrowableArray<RegisterInfo*> core_regs_;
+      int next_core_reg_;
+      GrowableArray<RegisterInfo*> sp_regs_;    // Single precision float.
+      int next_sp_reg_;
+      GrowableArray<RegisterInfo*> dp_regs_;    // Double precision float.
+      int next_dp_reg_;
+
+     private:
+      Mir2Lir* const m2l_;
+    };
 
     struct PromotionMap {
       RegLocationType core_location:3;
@@ -339,7 +458,14 @@
       return *reinterpret_cast<const int32_t*>(switch_data);
     }
 
-    RegisterClass oat_reg_class_by_size(OpSize size) {
+    /*
+     * TODO: this is a trace JIT vestige, and its use should be reconsidered.  At the time
+     * it was introduced, it was intended to be a quick best guess of type without having to
+     * take the time to do type analysis.  Currently, though, we have a much better idea of
+     * the types of Dalvik virtual registers.  Instead of using this for a best guess, why not
+     * just use our knowledge of type to select the most appropriate register class?
+     */
+    RegisterClass RegClassBySize(OpSize size) {
       return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
               size == kSignedByte) ? kCoreReg : kAnyReg;
     }
@@ -459,75 +585,63 @@
 
     // Shared by all targets - implemented in ralloc_util.cc
     int GetSRegHi(int lowSreg);
-    bool oat_live_out(int s_reg);
-    int oatSSASrc(MIR* mir, int num);
+    bool LiveOut(int s_reg);
     void SimpleRegAlloc();
     void ResetRegPool();
-    void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
-    void DumpRegPool(RegisterInfo* p, int num_regs);
+    void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
+    void DumpRegPool(GrowableArray<RegisterInfo*>* regs);
     void DumpCoreRegPool();
     void DumpFpRegPool();
+    void DumpRegPools();
     /* Mark a temp register as dead.  Does not affect allocation state. */
-    void Clobber(int reg) {
-      ClobberBody(GetRegInfo(reg));
-    }
     void Clobber(RegStorage reg);
-    void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
+    void ClobberSRegBody(GrowableArray<RegisterInfo*>* regs, int s_reg);
     void ClobberSReg(int s_reg);
     int SRegToPMap(int s_reg);
     void RecordCorePromotion(RegStorage reg, int s_reg);
     RegStorage AllocPreservedCoreReg(int s_reg);
-    void RecordFpPromotion(RegStorage reg, int s_reg);
+    void RecordSinglePromotion(RegStorage reg, int s_reg);
+    void RecordDoublePromotion(RegStorage reg, int s_reg);
     RegStorage AllocPreservedSingle(int s_reg);
-    RegStorage AllocPreservedDouble(int s_reg);
-    RegStorage AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
-    virtual RegStorage AllocTempDouble();
+    virtual RegStorage AllocPreservedDouble(int s_reg);
+    RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
     RegStorage AllocFreeTemp();
     RegStorage AllocTemp();
-    RegStorage AllocTempFloat();
-    RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg);
-    RegisterInfo* AllocLive(int s_reg, int reg_class);
-    void FreeTemp(int reg);
+    RegStorage AllocTempSingle();
+    RegStorage AllocTempDouble();
+    void FlushReg(RegStorage reg);
+    void FlushRegWide(RegStorage reg);
+    RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
+    RegStorage FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg);
     void FreeTemp(RegStorage reg);
-    RegisterInfo* IsLive(int reg);
     bool IsLive(RegStorage reg);
-    RegisterInfo* IsTemp(int reg);
     bool IsTemp(RegStorage reg);
-    RegisterInfo* IsPromoted(int reg);
     bool IsPromoted(RegStorage reg);
-    bool IsDirty(int reg);
     bool IsDirty(RegStorage reg);
-    void LockTemp(int reg);
     void LockTemp(RegStorage reg);
-    void ResetDef(int reg);
     void ResetDef(RegStorage reg);
-    void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
+    void NullifyRange(RegStorage reg, int s_reg);
     void MarkDef(RegLocation rl, LIR *start, LIR *finish);
     void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
     RegLocation WideToNarrow(RegLocation rl);
     void ResetDefLoc(RegLocation rl);
-    virtual void ResetDefLocWide(RegLocation rl);
+    void ResetDefLocWide(RegLocation rl);
     void ResetDefTracking();
     void ClobberAllRegs();
     void FlushSpecificReg(RegisterInfo* info);
-    void FlushAllRegsBody(RegisterInfo* info, int num_regs);
     void FlushAllRegs();
     bool RegClassMatches(int reg_class, RegStorage reg);
-    void MarkLive(RegStorage reg, int s_reg);
-    void MarkTemp(int reg);
+    void MarkLive(RegLocation loc);
+    void MarkLiveReg(RegStorage reg, int s_reg);
     void MarkTemp(RegStorage reg);
-    void UnmarkTemp(int reg);
     void UnmarkTemp(RegStorage reg);
-    void MarkPair(int low_reg, int high_reg);
+    void MarkWide(RegStorage reg);
     void MarkClean(RegLocation loc);
     void MarkDirty(RegLocation loc);
-    void MarkInUse(int reg);
     void MarkInUse(RegStorage reg);
-    void CopyRegInfo(int new_reg, int old_reg);
-    void CopyRegInfo(RegStorage new_reg, RegStorage old_reg);
     bool CheckCorePoolSanity();
     RegLocation UpdateLoc(RegLocation loc);
-    virtual RegLocation UpdateLocWide(RegLocation loc);
+    RegLocation UpdateLocWide(RegLocation loc);
     RegLocation UpdateRawLoc(RegLocation loc);
 
     /**
@@ -538,7 +652,7 @@
      * @param update Whether the liveness information should be updated.
      * @return Returns the properly typed temporary in physical register pairs.
      */
-    virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
+    RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
 
     /**
      * @brief Used to load register location into a typed temporary.
@@ -547,7 +661,7 @@
      * @param update Whether the liveness information should be updated.
      * @return Returns the properly typed temporary in physical register.
      */
-    virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
+    RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
 
     void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
     void DumpCounts(const RefCounts* arr, int size, const char* msg);
@@ -556,7 +670,7 @@
     int SRegOffset(int s_reg);
     RegLocation GetReturnWide(bool is_double);
     RegLocation GetReturn(bool is_float);
-    RegisterInfo* GetRegInfo(int reg);
+    RegisterInfo* GetRegInfo(RegStorage reg);
 
     // Shared by all targets - implemented in gen_common.cc.
     void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr);
@@ -868,8 +982,8 @@
     virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) = 0;
     virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, RegStorage r_dest_hi,
-                                     OpSize size, int s_reg) = 0;
+                                     int displacement, RegStorage r_dest, OpSize size,
+                                     int s_reg) = 0;
     virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
     virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
     virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
@@ -878,18 +992,13 @@
     virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) = 0;
     virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, RegStorage r_src_hi,
-                                      OpSize size, int s_reg) = 0;
+                                      int displacement, RegStorage r_src, OpSize size,
+                                      int s_reg) = 0;
     virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
 
     // Required for target - register utilities.
-    virtual bool IsFpReg(int reg) = 0;
-    virtual bool IsFpReg(RegStorage reg) = 0;
-    virtual bool SameRegType(int reg1, int reg2) = 0;
     virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class) = 0;
     virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0;
-    // TODO: elminate S2d.
-    virtual int S2d(int low_reg, int high_reg) = 0;
     virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0;
     virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0;
     virtual RegLocation GetReturnAlt() = 0;
@@ -898,17 +1007,14 @@
     virtual RegLocation LocCReturnDouble() = 0;
     virtual RegLocation LocCReturnFloat() = 0;
     virtual RegLocation LocCReturnWide() = 0;
-    // TODO: use to reduce/eliminate xx_FPREG() macro use.
-    virtual uint32_t FpRegMask() = 0;
-    virtual uint64_t GetRegMaskCommon(int reg) = 0;
+    virtual uint64_t GetRegMaskCommon(RegStorage reg) = 0;
     virtual void AdjustSpillMask() = 0;
     virtual void ClobberCallerSave() = 0;
-    virtual void FlushReg(RegStorage reg) = 0;
-    virtual void FlushRegWide(RegStorage reg) = 0;
     virtual void FreeCallTemps() = 0;
     virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0;
     virtual void LockCallTemps() = 0;
-    virtual void MarkPreservedSingle(int v_reg, int reg) = 0;
+    virtual void MarkPreservedSingle(int v_reg, RegStorage reg) = 0;
+    virtual void MarkPreservedDouble(int v_reg, RegStorage reg) = 0;
     virtual void CompilerInitializeRegAlloc() = 0;
 
     // Required for target - miscellaneous.
@@ -1199,11 +1305,6 @@
 
   private:
     void ClobberBody(RegisterInfo* p);
-    void ResetDefBody(RegisterInfo* p) {
-      p->def_start = NULL;
-      p->def_end = NULL;
-    }
-
     void SetCurrentDexPc(DexOffset dexpc) {
       current_dalvik_offset_ = dexpc;
     }
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6455572..a39611e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -30,7 +30,7 @@
 void Mir2Lir::ResetRegPool() {
   GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
   for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
-    info->in_use = false;
+    info->MarkFree();
   }
   // Reset temp tracking sanity check.
   if (kIsDebugBuild) {
@@ -38,66 +38,124 @@
   }
 }
 
- /*
-  * Set up temp & preserved register pools specialized by target.
-  * Note: num_regs may be zero.
-  */
-void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) {
-  for (int i = 0; i < num; i++) {
-    uint32_t reg_number = reg_nums[i];
-    regs[i].reg = reg_number;
-    regs[i].in_use = false;
-    regs[i].is_temp = false;
-    regs[i].pair = false;
-    regs[i].live = false;
-    regs[i].dirty = false;
-    regs[i].s_reg = INVALID_SREG;
-    size_t map_size = reginfo_map_.Size();
-    if (reg_number >= map_size) {
-      for (uint32_t i = 0; i < ((reg_number - map_size) + 1); i++) {
-        reginfo_map_.Insert(NULL);
-      }
-    }
-    reginfo_map_.Put(reg_number, &regs[i]);
+Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, uint64_t mask)
+  : reg_(r), is_temp_(false), wide_value_(false), live_(false),
+    dirty_(false), partner_(r), s_reg_(INVALID_SREG), def_use_mask_(mask), master_(this) {
+  switch (r.StorageSize()) {
+    case 0: storage_mask_ = 0xffffffff; break;
+    case 4: storage_mask_ = 0x00000001; break;
+    case 8: storage_mask_ = 0x00000003; break;
+    case 16: storage_mask_ = 0x0000000f; break;
+    case 32: storage_mask_ = 0x000000ff; break;
+    case 64: storage_mask_ = 0x0000ffff; break;
+    case 128: storage_mask_ = 0xffffffff; break;
   }
+  used_storage_ = r.Valid() ? ~storage_mask_ : storage_mask_;
 }
 
-void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs) {
+Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
+                                    const std::vector<RegStorage>& core_regs,
+                                    const std::vector<RegStorage>& sp_regs,
+                                    const std::vector<RegStorage>& dp_regs,
+                                    const std::vector<RegStorage>& reserved_regs,
+                                    const std::vector<RegStorage>& core_temps,
+                                    const std::vector<RegStorage>& sp_temps,
+                                    const std::vector<RegStorage>& dp_temps) :
+    core_regs_(arena, core_regs.size()), next_core_reg_(0), sp_regs_(arena, sp_regs.size()),
+    next_sp_reg_(0), dp_regs_(arena, dp_regs.size()), next_dp_reg_(0), m2l_(m2l)  {
+  // Initialize the fast lookup map.
+  m2l_->reginfo_map_.Reset();
+  m2l_->reginfo_map_.Resize(RegStorage::kMaxRegs);
+  for (unsigned i = 0; i < RegStorage::kMaxRegs; i++) {
+    m2l_->reginfo_map_.Insert(nullptr);
+  }
+
+  // Construct the register pool.
+  for (RegStorage reg : core_regs) {
+    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
+    m2l_->reginfo_map_.Put(reg.GetReg(), info);
+    core_regs_.Insert(info);
+  }
+  for (RegStorage reg : sp_regs) {
+    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
+    m2l_->reginfo_map_.Put(reg.GetReg(), info);
+    sp_regs_.Insert(info);
+  }
+  for (RegStorage reg : dp_regs) {
+    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
+    m2l_->reginfo_map_.Put(reg.GetReg(), info);
+    dp_regs_.Insert(info);
+  }
+
+  // Keep special registers from being allocated.
+  for (RegStorage reg : reserved_regs) {
+    m2l_->MarkInUse(reg);
+  }
+
+  // Mark temp regs - all others not in use can be used for promotion
+  for (RegStorage reg : core_temps) {
+    m2l_->MarkTemp(reg);
+  }
+  for (RegStorage reg : sp_temps) {
+    m2l_->MarkTemp(reg);
+  }
+  for (RegStorage reg : dp_temps) {
+    m2l_->MarkTemp(reg);
+  }
+
+  // Add an entry for InvalidReg with zero'd mask.
+  RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), 0);
+  m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg);
+}
+
+void Mir2Lir::DumpRegPool(GrowableArray<RegisterInfo*>* regs) {
   LOG(INFO) << "================================================";
-  for (int i = 0; i < num_regs; i++) {
+  GrowableArray<RegisterInfo*>::Iterator it(regs);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
     LOG(INFO) << StringPrintf(
-        "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d",
-        p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
-        p[i].live, p[i].dirty, p[i].s_reg);
+        "R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d",
+        info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ?  'f' : 'c',
+        info->IsTemp(), info->InUse(), info->IsWide(), info->Partner().GetReg(), info->IsLive(),
+        info->IsDirty(), info->SReg(), info->DefStart() != nullptr);
   }
   LOG(INFO) << "================================================";
 }
 
 void Mir2Lir::DumpCoreRegPool() {
-  DumpRegPool(reg_pool_->core_regs, reg_pool_->num_core_regs);
+  DumpRegPool(&reg_pool_->core_regs_);
 }
 
 void Mir2Lir::DumpFpRegPool() {
-  DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs);
+  DumpRegPool(&reg_pool_->sp_regs_);
+  DumpRegPool(&reg_pool_->dp_regs_);
+}
+
+void Mir2Lir::DumpRegPools() {
+  LOG(INFO) << "Core registers";
+  DumpCoreRegPool();
+  LOG(INFO) << "FP registers";
+  DumpFpRegPool();
 }
 
 void Mir2Lir::Clobber(RegStorage reg) {
   if (reg.IsPair()) {
-    ClobberBody(GetRegInfo(reg.GetLowReg()));
-    ClobberBody(GetRegInfo(reg.GetHighReg()));
+    ClobberBody(GetRegInfo(reg.GetLow()));
+    ClobberBody(GetRegInfo(reg.GetHigh()));
   } else {
-    ClobberBody(GetRegInfo(reg.GetReg()));
+    ClobberBody(GetRegInfo(reg));
   }
 }
 
-void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg) {
-  for (int i = 0; i< num_regs; i++) {
-    if (p[i].s_reg == s_reg) {
-      if (p[i].is_temp) {
-        p[i].live = false;
+void Mir2Lir::ClobberSRegBody(GrowableArray<RegisterInfo*>* regs, int s_reg) {
+  GrowableArray<RegisterInfo*>::Iterator it(regs);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if ((info->SReg() == s_reg)  ||
+        (info->IsWide() && (GetRegInfo(info->Partner())->SReg() == s_reg))) {
+      // NOTE: a single s_reg may appear multiple times, so we can't short-circuit.
+      if (info->IsTemp()) {
+        info->SetIsLive(false);
       }
-      p[i].def_start = NULL;
-      p[i].def_end = NULL;
+      info->ResetDefBody();
     }
   }
 }
@@ -114,14 +172,17 @@
  * addressed.
  */
 void Mir2Lir::ClobberSReg(int s_reg) {
-  /* Reset live temp tracking sanity checker */
-  if (kIsDebugBuild) {
-    if (s_reg == live_sreg_) {
-      live_sreg_ = INVALID_SREG;
+  if (s_reg != INVALID_SREG) {
+    /* Reset live temp tracking sanity checker */
+    if (kIsDebugBuild) {
+      if (s_reg == live_sreg_) {
+        live_sreg_ = INVALID_SREG;
+      }
     }
+    ClobberSRegBody(&reg_pool_->core_regs_, s_reg);
+    ClobberSRegBody(&reg_pool_->sp_regs_, s_reg);
+    ClobberSRegBody(&reg_pool_->dp_regs_, s_reg);
   }
-  ClobberSRegBody(reg_pool_->core_regs, reg_pool_->num_core_regs, s_reg);
-  ClobberSRegBody(reg_pool_->FPRegs, reg_pool_->num_fp_regs, s_reg);
 }
 
 /*
@@ -153,11 +214,12 @@
   }
 }
 
+// TODO: refactor following Alloc/Record routines - much commonality.
 void Mir2Lir::RecordCorePromotion(RegStorage reg, int s_reg) {
   int p_map_idx = SRegToPMap(s_reg);
   int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int reg_num = reg.GetReg();
-  GetRegInfo(reg_num)->in_use = true;
+  int reg_num = reg.GetRegNum();
+  GetRegInfo(reg)->MarkInUse();
   core_spill_mask_ |= (1 << reg_num);
   // Include reg for later sort
   core_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
@@ -166,13 +228,13 @@
   promotion_map_[p_map_idx].core_reg = reg_num;
 }
 
-/* Reserve a callee-save register.  Return -1 if none available */
+/* Reserve a callee-save register.  Return InvalidReg if none available */
 RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
   RegStorage res;
-  RegisterInfo* core_regs = reg_pool_->core_regs;
-  for (int i = 0; i < reg_pool_->num_core_regs; i++) {
-    if (!core_regs[i].is_temp && !core_regs[i].in_use) {
-      res = RegStorage::Solo32(core_regs[i].reg);
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if (!info->IsTemp() && !info->InUse()) {
+      res = info->GetReg();
       RecordCorePromotion(res, s_reg);
       break;
     }
@@ -180,100 +242,66 @@
   return res;
 }
 
-void Mir2Lir::RecordFpPromotion(RegStorage reg, int s_reg) {
+void Mir2Lir::RecordSinglePromotion(RegStorage reg, int s_reg) {
   int p_map_idx = SRegToPMap(s_reg);
   int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int reg_num = reg.GetReg();
-  GetRegInfo(reg_num)->in_use = true;
-  MarkPreservedSingle(v_reg, reg_num);
+  GetRegInfo(reg)->MarkInUse();
+  MarkPreservedSingle(v_reg, reg);
   promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-  promotion_map_[p_map_idx].FpReg = reg_num;
+  promotion_map_[p_map_idx].FpReg = reg.GetReg();
 }
 
-// Reserve a callee-save fp single register.
+// Reserve a callee-save sp single register.
 RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
   RegStorage res;
-  RegisterInfo* FPRegs = reg_pool_->FPRegs;
-  for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
-    if (!FPRegs[i].is_temp && !FPRegs[i].in_use) {
-      res = RegStorage::Solo32(FPRegs[i].reg);
-      RecordFpPromotion(res, s_reg);
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if (!info->IsTemp() && !info->InUse()) {
+      res = info->GetReg();
+      RecordSinglePromotion(res, s_reg);
       break;
     }
   }
   return res;
 }
 
-/*
- * Somewhat messy code here.  We want to allocate a pair of contiguous
- * physical single-precision floating point registers starting with
- * an even numbered reg.  It is possible that the paired s_reg (s_reg+1)
- * has already been allocated - try to fit if possible.  Fail to
- * allocate if we can't meet the requirements for the pair of
- * s_reg<=sX[even] & (s_reg+1)<= sX+1.
- */
-// TODO: needs rewrite to support non-backed 64-bit float regs.
+void Mir2Lir::RecordDoublePromotion(RegStorage reg, int s_reg) {
+  int p_map_idx = SRegToPMap(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  GetRegInfo(reg)->MarkInUse();
+  MarkPreservedDouble(v_reg, reg);
+  promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+  promotion_map_[p_map_idx].FpReg = reg.GetReg();
+}
+
+// Reserve a callee-save dp solo register.
 RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
   RegStorage res;
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int p_map_idx = SRegToPMap(s_reg);
-  if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
-    // Upper reg is already allocated.  Can we fit?
-    int high_reg = promotion_map_[p_map_idx+1].FpReg;
-    if ((high_reg & 1) == 0) {
-      // High reg is even - fail.
-      return res;  // Invalid.
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->dp_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if (!info->IsTemp() && !info->InUse()) {
+      res = info->GetReg();
+      RecordDoublePromotion(res, s_reg);
+      break;
     }
-    // Is the low reg of the pair free?
-    RegisterInfo* p = GetRegInfo(high_reg-1);
-    if (p->in_use || p->is_temp) {
-      // Already allocated or not preserved - fail.
-      return res;  // Invalid.
-    }
-    // OK - good to go.
-    res = RegStorage(RegStorage::k64BitPair, p->reg, p->reg + 1);
-    p->in_use = true;
-    DCHECK_EQ((res.GetReg() & 1), 0);
-    MarkPreservedSingle(v_reg, res.GetReg());
-  } else {
-    RegisterInfo* FPRegs = reg_pool_->FPRegs;
-    for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
-      if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
-        ((FPRegs[i].reg & 0x1) == 0x0) &&
-        !FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
-        ((FPRegs[i+1].reg & 0x1) == 0x1) &&
-        (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
-        res = RegStorage(RegStorage::k64BitPair, FPRegs[i].reg, FPRegs[i].reg+1);
-        FPRegs[i].in_use = true;
-        MarkPreservedSingle(v_reg, res.GetLowReg());
-        FPRegs[i+1].in_use = true;
-        DCHECK_EQ(res.GetLowReg() + 1, FPRegs[i+1].reg);
-        MarkPreservedSingle(v_reg+1, res.GetLowReg() + 1);
-        break;
-      }
-    }
-  }
-  if (res.Valid()) {
-    promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx].FpReg = res.GetLowReg();
-    promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx+1].FpReg = res.GetLowReg() + 1;
   }
   return res;
 }
 
-RegStorage Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
-                                  bool required) {
+
+RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required) {
+  int num_regs = regs.Size();
   int next = *next_temp;
   for (int i = 0; i< num_regs; i++) {
     if (next >= num_regs)
       next = 0;
-    if (p[next].is_temp && !p[next].in_use && !p[next].live) {
-      Clobber(p[next].reg);
-      p[next].in_use = true;
-      p[next].pair = false;
+    RegisterInfo* info = regs.Get(next);
+    if (info->IsTemp() && !info->InUse() && !info->IsLive()) {
+      Clobber(info->GetReg());
+      info->MarkInUse();
+      info->SetIsWide(false);
       *next_temp = next + 1;
-      return RegStorage::Solo32(p[next].reg);
+      return info->GetReg();
     }
     next++;
   }
@@ -281,201 +309,166 @@
   for (int i = 0; i< num_regs; i++) {
     if (next >= num_regs)
       next = 0;
-    if (p[next].is_temp && !p[next].in_use) {
-      Clobber(p[next].reg);
-      p[next].in_use = true;
-      p[next].pair = false;
+    RegisterInfo* info = regs.Get(next);
+    if (info->IsTemp() && !info->InUse()) {
+      Clobber(info->GetReg());
+      info->MarkInUse();
+      info->SetIsWide(false);
       *next_temp = next + 1;
-      return RegStorage::Solo32(p[next].reg);
+      return info->GetReg();
     }
     next++;
   }
   if (required) {
     CodegenDump();
-    DumpRegPool(reg_pool_->core_regs,
-          reg_pool_->num_core_regs);
+    DumpRegPools();
     LOG(FATAL) << "No free temp registers";
   }
   return RegStorage::InvalidReg();  // No register available
 }
 
-// REDO: too many assumptions.
-// Virtualize - this is target dependent.
-RegStorage Mir2Lir::AllocTempDouble() {
-  RegisterInfo* p = reg_pool_->FPRegs;
-  int num_regs = reg_pool_->num_fp_regs;
-  /* Start looking at an even reg */
-  int next = reg_pool_->next_fp_reg & ~0x1;
-
-  // First try to avoid allocating live registers
-  for (int i = 0; i < num_regs; i+=2) {
-    if (next >= num_regs)
-      next = 0;
-    if ((p[next].is_temp && !p[next].in_use && !p[next].live) &&
-      (p[next+1].is_temp && !p[next+1].in_use && !p[next+1].live)) {
-      Clobber(p[next].reg);
-      Clobber(p[next+1].reg);
-      p[next].in_use = true;
-      p[next+1].in_use = true;
-      DCHECK_EQ((p[next].reg+1), p[next+1].reg);
-      DCHECK_EQ((p[next].reg & 0x1), 0);
-      reg_pool_->next_fp_reg = next + 2;
-      if (reg_pool_->next_fp_reg >= num_regs) {
-        reg_pool_->next_fp_reg = 0;
-      }
-      // FIXME: should return k64BitSolo.
-      return RegStorage(RegStorage::k64BitPair, p[next].reg, p[next+1].reg);
-    }
-    next += 2;
-  }
-  next = reg_pool_->next_fp_reg & ~0x1;
-
-  // No choice - find a pair and kill it.
-  for (int i = 0; i < num_regs; i+=2) {
-    if (next >= num_regs)
-      next = 0;
-    if (p[next].is_temp && !p[next].in_use && p[next+1].is_temp &&
-      !p[next+1].in_use) {
-      Clobber(p[next].reg);
-      Clobber(p[next+1].reg);
-      p[next].in_use = true;
-      p[next+1].in_use = true;
-      DCHECK_EQ((p[next].reg+1), p[next+1].reg);
-      DCHECK_EQ((p[next].reg & 0x1), 0);
-      reg_pool_->next_fp_reg = next + 2;
-      if (reg_pool_->next_fp_reg >= num_regs) {
-        reg_pool_->next_fp_reg = 0;
-      }
-      return RegStorage(RegStorage::k64BitPair, p[next].reg, p[next+1].reg);
-    }
-    next += 2;
-  }
-  LOG(FATAL) << "No free temp registers (pair)";
-  return RegStorage::InvalidReg();
-}
-
 /* Return a temp if one is available, -1 otherwise */
 RegStorage Mir2Lir::AllocFreeTemp() {
-  return AllocTempBody(reg_pool_->core_regs,
-             reg_pool_->num_core_regs,
-             &reg_pool_->next_core_reg, false);
+  return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, false);
 }
 
 RegStorage Mir2Lir::AllocTemp() {
-  return AllocTempBody(reg_pool_->core_regs,
-             reg_pool_->num_core_regs,
-             &reg_pool_->next_core_reg, true);
+  return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, true);
 }
 
-RegStorage Mir2Lir::AllocTempFloat() {
-  return AllocTempBody(reg_pool_->FPRegs,
-             reg_pool_->num_fp_regs,
-             &reg_pool_->next_fp_reg, true);
+RegStorage Mir2Lir::AllocTempSingle() {
+  RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, true);
+  DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
+  return res;
 }
 
-Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg) {
-  if (s_reg == -1)
-    return NULL;
-  for (int i = 0; i < num_regs; i++) {
-    if ((p[i].s_reg == s_reg) && p[i].live) {
-      if (p[i].is_temp)
-        p[i].in_use = true;
-      return &p[i];
+RegStorage Mir2Lir::AllocTempDouble() {
+  RegStorage res = AllocTempBody(reg_pool_->dp_regs_, &reg_pool_->next_dp_reg_, true);
+  DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
+  return res;
+}
+
+RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg) {
+  RegStorage res;
+  GrowableArray<RegisterInfo*>::Iterator it(&regs);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if ((info->SReg() == s_reg) && info->IsLive()) {
+      res = info->GetReg();
+      break;
     }
   }
-  return NULL;
-}
-
-Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) {
-  RegisterInfo* res = NULL;
-  switch (reg_class) {
-    case kAnyReg:
-      res = AllocLiveBody(reg_pool_->FPRegs,
-                reg_pool_->num_fp_regs, s_reg);
-      if (res)
-        break;
-      /* Intentional fallthrough */
-    case kCoreReg:
-      res = AllocLiveBody(reg_pool_->core_regs,
-                reg_pool_->num_core_regs, s_reg);
-      break;
-    case kFPReg:
-      res = AllocLiveBody(reg_pool_->FPRegs,
-                reg_pool_->num_fp_regs, s_reg);
-      break;
-    default:
-      LOG(FATAL) << "Invalid register type";
-  }
   return res;
 }
 
-void Mir2Lir::FreeTemp(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  if (p->is_temp) {
-    p->in_use = false;
+RegStorage Mir2Lir::AllocLiveReg(int s_reg, int reg_class, bool wide) {
+  RegStorage reg;
+  // TODO: might be worth a sanity check here to verify at most 1 live reg per s_reg.
+  if ((reg_class == kAnyReg) || (reg_class == kFPReg)) {
+    reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
   }
-  p->pair = false;
+  if (!reg.Valid() && (reg_class != kFPReg)) {
+    reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
+  }
+  if (reg.Valid()) {
+    if (wide && reg.Is32Bit() && !reg.IsFloat()) {
+      // Only allow reg pairs for Core.
+      RegStorage high_reg = FindLiveReg(reg_pool_->core_regs_, s_reg + 1);
+      if (high_reg.Valid()) {
+        RegisterInfo* info_lo = GetRegInfo(reg);
+        RegisterInfo* info_hi = GetRegInfo(high_reg);
+        if (info_lo->IsTemp()) {
+          info_lo->MarkInUse();
+        }
+        if (info_hi->IsTemp()) {
+          info_hi->MarkInUse();
+        }
+        reg = RegStorage::MakeRegPair(reg, high_reg);
+        MarkWide(reg);
+      } else {
+        // Only half available - clobber.
+        Clobber(reg);
+        reg = RegStorage::InvalidReg();
+      }
+    }
+    if (reg.Valid() && !reg.IsPair()) {
+      RegisterInfo* info = GetRegInfo(reg);
+      if (info->IsTemp()) {
+        info->MarkInUse();
+      }
+    }
+    if (reg.Valid() && (wide != GetRegInfo(reg)->IsWide())) {
+      // Width mismatch - don't try to reuse.
+      Clobber(reg);
+      reg = RegStorage::InvalidReg();
+    }
+  }
+  return reg;
 }
 
 void Mir2Lir::FreeTemp(RegStorage reg) {
   if (reg.IsPair()) {
-    FreeTemp(reg.GetLowReg());
-    FreeTemp(reg.GetHighReg());
+    FreeTemp(reg.GetLow());
+    FreeTemp(reg.GetHigh());
   } else {
-    FreeTemp(reg.GetReg());
+    RegisterInfo* p = GetRegInfo(reg);
+    if (p->IsTemp()) {
+      p->MarkFree();
+      p->SetIsWide(false);
+      p->SetPartner(reg);
+    }
   }
 }
 
-Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  return p->live ? p : NULL;
-}
-
 bool Mir2Lir::IsLive(RegStorage reg) {
+  bool res;
   if (reg.IsPair()) {
-    return IsLive(reg.GetLowReg()) || IsLive(reg.GetHighReg());
+    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
+    res = p_lo->IsLive() || p_hi->IsLive();
   } else {
-    return IsLive(reg.GetReg());
+    RegisterInfo* p = GetRegInfo(reg);
+    res = p->IsLive();
   }
-}
-
-Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  return (p->is_temp) ? p : NULL;
+  return res;
 }
 
 bool Mir2Lir::IsTemp(RegStorage reg) {
+  bool res;
   if (reg.IsPair()) {
-    return IsTemp(reg.GetLowReg()) || IsTemp(reg.GetHighReg());
+    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
+    res = p_lo->IsTemp() || p_hi->IsTemp();
   } else {
-    return IsTemp(reg.GetReg());
+    RegisterInfo* p = GetRegInfo(reg);
+    res = p->IsTemp();
   }
-}
-
-Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  return (p->is_temp) ? NULL : p;
+  return res;
 }
 
 bool Mir2Lir::IsPromoted(RegStorage reg) {
+  bool res;
   if (reg.IsPair()) {
-    return IsPromoted(reg.GetLowReg()) || IsPromoted(reg.GetHighReg());
+    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
+    res = !p_lo->IsTemp() || !p_hi->IsTemp();
   } else {
-    return IsPromoted(reg.GetReg());
+    RegisterInfo* p = GetRegInfo(reg);
+    res = !p->IsTemp();
   }
-}
-
-bool Mir2Lir::IsDirty(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  return p->dirty;
+  return res;
 }
 
 bool Mir2Lir::IsDirty(RegStorage reg) {
+  bool res;
   if (reg.IsPair()) {
-    return IsDirty(reg.GetLowReg()) || IsDirty(reg.GetHighReg());
+    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
+    res = p_lo->IsDirty() || p_hi->IsDirty();
   } else {
-    return IsDirty(reg.GetReg());
+    RegisterInfo* p = GetRegInfo(reg);
+    res = p->IsDirty();
   }
+  return res;
 }
 
 /*
@@ -483,35 +476,44 @@
  * register.  No check is made to see if the register was previously
  * allocated.  Use with caution.
  */
-void Mir2Lir::LockTemp(int reg) {
-  RegisterInfo* p = GetRegInfo(reg);
-  DCHECK(p->is_temp);
-  p->in_use = true;
-  p->live = false;
-}
-
 void Mir2Lir::LockTemp(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  LockTemp(reg.GetReg());
-}
-
-void Mir2Lir::ResetDef(int reg) {
-  ResetDefBody(GetRegInfo(reg));
+  DCHECK(IsTemp(reg));
+  if (reg.IsPair()) {
+    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
+    p_lo->MarkInUse();
+    p_lo->SetIsLive(false);
+    p_hi->MarkInUse();
+    p_hi->SetIsLive(false);
+  } else {
+    RegisterInfo* p = GetRegInfo(reg);
+    p->MarkInUse();
+    p->SetIsLive(false);
+  }
 }
 
 void Mir2Lir::ResetDef(RegStorage reg) {
-  DCHECK(!reg.IsPair());  // Is this done?  If so, do on both low and high.
-  ResetDef(reg.GetReg());
+  if (reg.IsPair()) {
+    GetRegInfo(reg.GetLow())->ResetDefBody();
+    GetRegInfo(reg.GetHigh())->ResetDefBody();
+  } else {
+    GetRegInfo(reg)->ResetDefBody();
+  }
 }
 
-void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2) {
-  if (start && finish) {
-    LIR *p;
-    DCHECK_EQ(s_reg1, s_reg2);
-    for (p = start; ; p = p->next) {
+void Mir2Lir::NullifyRange(RegStorage reg, int s_reg) {
+  RegisterInfo* info = nullptr;
+  RegStorage rs = reg.IsPair() ? reg.GetLow() : reg;
+  if (IsTemp(rs)) {
+    info = GetRegInfo(reg);
+  }
+  if ((info != nullptr) && (info->DefStart() != nullptr) && (info->DefEnd() != nullptr)) {
+    DCHECK_EQ(info->SReg(), s_reg);  // Make sure we're on the same page.
+    for (LIR* p = info->DefStart();; p = p->next) {
       NopLIR(p);
-      if (p == finish)
+      if (p == info->DefEnd()) {
         break;
+      }
     }
   }
 }
@@ -525,9 +527,9 @@
   DCHECK(!rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
-  p->def_start = start->next;
-  p->def_end = finish;
+  RegisterInfo* p = GetRegInfo(rl.reg);
+  p->SetDefStart(start->next);
+  p->SetDefEnd(finish);
 }
 
 /*
@@ -539,28 +541,33 @@
   DCHECK(rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  RegisterInfo* p = GetRegInfo(rl.reg.GetLowReg());
-  ResetDef(rl.reg.GetHighReg());  // Only track low of pair
-  p->def_start = start->next;
-  p->def_end = finish;
+  RegisterInfo* p;
+  if (rl.reg.IsPair()) {
+    p = GetRegInfo(rl.reg.GetLow());
+    ResetDef(rl.reg.GetHigh());  // Only track low of pair
+  } else {
+    p = GetRegInfo(rl.reg);
+  }
+  p->SetDefStart(start->next);
+  p->SetDefEnd(finish);
 }
 
 RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
   DCHECK(rl.wide);
   if (rl.location == kLocPhysReg) {
-    RegisterInfo* info_lo = GetRegInfo(rl.reg.GetLowReg());
-    RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg());
-    if (info_lo->is_temp) {
-      info_lo->pair = false;
-      info_lo->def_start = NULL;
-      info_lo->def_end = NULL;
+    if (rl.reg.IsPair()) {
+      RegisterInfo* info_lo = GetRegInfo(rl.reg.GetLow());
+      RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHigh());
+      if (info_lo->IsTemp()) {
+        info_lo->SetIsWide(false);
+        info_lo->ResetDefBody();
+      }
+      if (info_hi->IsTemp()) {
+        info_hi->SetIsWide(false);
+        info_hi->ResetDefBody();
+      }
+      rl.reg = rl.reg.GetLow();
     }
-    if (info_hi->is_temp) {
-      info_hi->pair = false;
-      info_hi->def_start = NULL;
-      info_hi->def_end = NULL;
-    }
-    rl.reg = RegStorage::Solo32(rl.reg.GetLowReg());
   }
   rl.wide = false;
   return rl;
@@ -568,220 +575,244 @@
 
 void Mir2Lir::ResetDefLoc(RegLocation rl) {
   DCHECK(!rl.wide);
-  RegisterInfo* p = IsTemp(rl.reg.GetReg());
-  if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-    DCHECK(!p->pair);
-    NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
+  if (IsTemp(rl.reg) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
+    NullifyRange(rl.reg, rl.s_reg_low);
   }
-  ResetDef(rl.reg.GetReg());
+  ResetDef(rl.reg);
 }
 
 void Mir2Lir::ResetDefLocWide(RegLocation rl) {
   DCHECK(rl.wide);
-  RegisterInfo* p_low = IsTemp(rl.reg.GetLowReg());
-  RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
-  if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-    DCHECK(p_low->pair);
-    NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
+  // If pair, only track low reg of pair.
+  RegStorage rs = rl.reg.IsPair() ? rl.reg.GetLow() : rl.reg;
+  if (IsTemp(rs) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
+    NullifyRange(rs, rl.s_reg_low);
   }
-  if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-    DCHECK(p_high->pair);
-  }
-  ResetDef(rl.reg.GetLowReg());
-  ResetDef(rl.reg.GetHighReg());
+  ResetDef(rs);
 }
 
 void Mir2Lir::ResetDefTracking() {
-  for (int i = 0; i< reg_pool_->num_core_regs; i++) {
-    ResetDefBody(&reg_pool_->core_regs[i]);
+  GrowableArray<RegisterInfo*>::Iterator core_it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = core_it.Next(); info != nullptr; info = core_it.Next()) {
+    info->ResetDefBody();
   }
-  for (int i = 0; i< reg_pool_->num_fp_regs; i++) {
-    ResetDefBody(&reg_pool_->FPRegs[i]);
+  GrowableArray<RegisterInfo*>::Iterator sp_it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = sp_it.Next(); info != nullptr; info = sp_it.Next()) {
+    info->ResetDefBody();
+  }
+  GrowableArray<RegisterInfo*>::Iterator dp_it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = dp_it.Next(); info != nullptr; info = dp_it.Next()) {
+    info->ResetDefBody();
   }
 }
 
 void Mir2Lir::ClobberAllRegs() {
   GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
   for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
-    info->live = false;
-    info->s_reg = INVALID_SREG;
-    info->def_start = NULL;
-    info->def_end = NULL;
-    info->pair = false;
+    info->SetIsLive(false);
+    info->SetSReg(INVALID_SREG);
+    info->ResetDefBody();
+    info->SetIsWide(false);
+  }
+}
+
+void Mir2Lir::FlushRegWide(RegStorage reg) {
+  if (reg.IsPair()) {
+    RegisterInfo* info1 = GetRegInfo(reg.GetLow());
+    RegisterInfo* info2 = GetRegInfo(reg.GetHigh());
+    DCHECK(info1 && info2 && info1->IsWide() && info2->IsWide() &&
+         (info1->Partner() == info2->GetReg()) && (info2->Partner() == info1->GetReg()));
+    if ((info1->IsLive() && info1->IsDirty()) || (info2->IsLive() && info2->IsDirty())) {
+      if (!(info1->IsTemp() && info2->IsTemp())) {
+        /* Should not happen.  If it does, there's a problem in eval_loc */
+        LOG(FATAL) << "Long half-temp, half-promoted";
+      }
+
+      info1->SetIsDirty(false);
+      info2->SetIsDirty(false);
+      if (mir_graph_->SRegToVReg(info2->SReg()) < mir_graph_->SRegToVReg(info1->SReg())) {
+        info1 = info2;
+      }
+      int v_reg = mir_graph_->SRegToVReg(info1->SReg());
+      StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg);
+    }
+  } else {
+    RegisterInfo* info = GetRegInfo(reg);
+    if (info->IsLive() && info->IsDirty()) {
+      info->SetIsDirty(false);
+      int v_reg = mir_graph_->SRegToVReg(info->SReg());
+      StoreBaseDispWide(TargetReg(kSp), VRegOffset(v_reg), reg);
+    }
+  }
+}
+
+void Mir2Lir::FlushReg(RegStorage reg) {
+  DCHECK(!reg.IsPair());
+  RegisterInfo* info = GetRegInfo(reg);
+  if (info->IsLive() && info->IsDirty()) {
+    info->SetIsDirty(false);
+    int v_reg = mir_graph_->SRegToVReg(info->SReg());
+    StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord);
   }
 }
 
 void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
-  if (info->pair) {
-    FlushRegWide(RegStorage(RegStorage::k64BitPair, info->reg, info->partner));
+  if (info->IsWide()) {
+    FlushRegWide(info->GetReg());
   } else {
-    FlushReg(RegStorage::Solo32(info->reg));
-  }
-}
-
-// Make sure nothing is live and dirty
-void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs) {
-  for (int i = 0; i < num_regs; i++) {
-    if (info[i].live && info[i].dirty) {
-      FlushSpecificReg(&info[i]);
-    }
+    FlushReg(info->GetReg());
   }
 }
 
 void Mir2Lir::FlushAllRegs() {
-  FlushAllRegsBody(reg_pool_->core_regs,
-           reg_pool_->num_core_regs);
-  FlushAllRegsBody(reg_pool_->FPRegs,
-           reg_pool_->num_fp_regs);
-  ClobberAllRegs();
+  GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    if (info->IsLive() && info->IsDirty()) {
+      FlushSpecificReg(info);
+    }
+    DCHECK(info->IsTemp());
+    info->SetIsLive(false);
+    info->SetSReg(INVALID_SREG);
+    info->ResetDefBody();
+    info->SetIsWide(false);
+  }
 }
 
 
-// TUNING: rewrite all of this reg stuff.  Probably use an attribute table
 bool Mir2Lir::RegClassMatches(int reg_class, RegStorage reg) {
-  int reg_num = reg.IsPair() ? reg.GetLowReg() : reg.GetReg();
   if (reg_class == kAnyReg) {
     return true;
   } else if (reg_class == kCoreReg) {
-    return !IsFpReg(reg_num);
+    return !reg.IsFloat();
   } else {
-    return IsFpReg(reg_num);
+    return reg.IsFloat();
   }
 }
 
-void Mir2Lir::MarkLive(RegStorage reg, int s_reg) {
-  DCHECK(!reg.IsPair());   // Could be done - but would that be meaningful?
-  RegisterInfo* info = GetRegInfo(reg.GetReg());
-  if ((info->s_reg == s_reg) && info->live) {
-    return;  /* already live */
-  } else if (s_reg != INVALID_SREG) {
+void Mir2Lir::MarkLiveReg(RegStorage reg, int s_reg) {
+  RegisterInfo* info = GetRegInfo(reg);
+  if ((info->SReg() == s_reg) && info->IsLive()) {
+    return;  // Already live.
+  }
+  if (s_reg != INVALID_SREG) {
     ClobberSReg(s_reg);
-    if (info->is_temp) {
-      info->live = true;
+    if (info->IsTemp()) {
+      info->SetIsLive(true);
     }
   } else {
-    /* Can't be live if no associated s_reg */
-    DCHECK(info->is_temp);
-    info->live = false;
+    // Can't be live if no associated s_reg.
+    DCHECK(info->IsTemp());
+    info->SetIsLive(false);
   }
-  info->s_reg = s_reg;
+  info->SetSReg(s_reg);
 }
 
-void Mir2Lir::MarkTemp(int reg) {
-  RegisterInfo* info = GetRegInfo(reg);
-  tempreg_info_.Insert(info);
-  info->is_temp = true;
+void Mir2Lir::MarkLive(RegLocation loc) {
+  RegStorage reg = loc.reg;
+  int s_reg = loc.s_reg_low;
+  if (reg.IsPair()) {
+    MarkLiveReg(reg.GetLow(), s_reg);
+    MarkLiveReg(reg.GetHigh(), s_reg+1);
+  } else {
+    if (loc.wide) {
+      ClobberSReg(s_reg + 1);
+    }
+    MarkLiveReg(reg, s_reg);
+  }
 }
 
 void Mir2Lir::MarkTemp(RegStorage reg) {
   DCHECK(!reg.IsPair());
-  MarkTemp(reg.GetReg());
-}
-
-void Mir2Lir::UnmarkTemp(int reg) {
   RegisterInfo* info = GetRegInfo(reg);
-  tempreg_info_.Delete(info);
-  info->is_temp = false;
+  tempreg_info_.Insert(info);
+  info->SetIsTemp(true);
 }
 
 void Mir2Lir::UnmarkTemp(RegStorage reg) {
   DCHECK(!reg.IsPair());
-  UnmarkTemp(reg.GetReg());
+  RegisterInfo* info = GetRegInfo(reg);
+  tempreg_info_.Delete(info);
+  info->SetIsTemp(false);
 }
 
-void Mir2Lir::MarkPair(int low_reg, int high_reg) {
-  DCHECK_NE(low_reg, high_reg);
-  RegisterInfo* info_lo = GetRegInfo(low_reg);
-  RegisterInfo* info_hi = GetRegInfo(high_reg);
-  info_lo->pair = info_hi->pair = true;
-  info_lo->partner = high_reg;
-  info_hi->partner = low_reg;
-}
-
-void Mir2Lir::MarkClean(RegLocation loc) {
-  if (loc.wide) {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetLowReg());
-    info->dirty = false;
-    info = GetRegInfo(loc.reg.GetHighReg());
-    info->dirty = false;
+void Mir2Lir::MarkWide(RegStorage reg) {
+  if (reg.IsPair()) {
+    RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
+    RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
+    info_lo->SetIsWide(true);
+    info_hi->SetIsWide(true);
+    info_lo->SetPartner(reg.GetHigh());
+    info_hi->SetPartner(reg.GetLow());
   } else {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
-    info->dirty = false;
+    RegisterInfo* info = GetRegInfo(reg);
+    info->SetIsWide(true);
+    info->SetPartner(reg);
   }
 }
 
+void Mir2Lir::MarkClean(RegLocation loc) {
+  if (loc.reg.IsPair()) {
+    RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
+    info->SetIsDirty(false);
+    info = GetRegInfo(loc.reg.GetHigh());
+    info->SetIsDirty(false);
+  } else {
+    RegisterInfo* info = GetRegInfo(loc.reg);
+    info->SetIsDirty(false);
+  }
+}
+
+// FIXME: need to verify rules/assumptions about how wide values are treated in 64BitSolos.
 void Mir2Lir::MarkDirty(RegLocation loc) {
   if (loc.home) {
     // If already home, can't be dirty
     return;
   }
-  if (loc.wide) {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetLowReg());
-    info->dirty = true;
-    info = GetRegInfo(loc.reg.GetHighReg());
-    info->dirty = true;
+  if (loc.reg.IsPair()) {
+    RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
+    info->SetIsDirty(true);
+    info = GetRegInfo(loc.reg.GetHigh());
+    info->SetIsDirty(true);
   } else {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
-    info->dirty = true;
+    RegisterInfo* info = GetRegInfo(loc.reg);
+    info->SetIsDirty(true);
   }
 }
 
-void Mir2Lir::MarkInUse(int reg) {
-    RegisterInfo* info = GetRegInfo(reg);
-    info->in_use = true;
-}
-
 void Mir2Lir::MarkInUse(RegStorage reg) {
   if (reg.IsPair()) {
-    MarkInUse(reg.GetLowReg());
-    MarkInUse(reg.GetHighReg());
+    GetRegInfo(reg.GetLow())->MarkInUse();
+    GetRegInfo(reg.GetHigh())->MarkInUse();
   } else {
-    MarkInUse(reg.GetReg());
+    GetRegInfo(reg)->MarkInUse();
   }
 }
 
-void Mir2Lir::CopyRegInfo(int new_reg, int old_reg) {
-  RegisterInfo* new_info = GetRegInfo(new_reg);
-  RegisterInfo* old_info = GetRegInfo(old_reg);
-  // Target temp, live, dirty status must not change
-  bool is_temp = new_info->is_temp;
-  bool live = new_info->live;
-  bool dirty = new_info->dirty;
-  *new_info = *old_info;
-  // Restore target's temp, live, dirty status
-  new_info->is_temp = is_temp;
-  new_info->live = live;
-  new_info->dirty = dirty;
-  new_info->reg = new_reg;
-}
-
-void Mir2Lir::CopyRegInfo(RegStorage new_reg, RegStorage old_reg) {
-  DCHECK(!new_reg.IsPair());
-  DCHECK(!old_reg.IsPair());
-  CopyRegInfo(new_reg.GetReg(), old_reg.GetReg());
-}
-
 bool Mir2Lir::CheckCorePoolSanity() {
-  for (static int i = 0; i < reg_pool_->num_core_regs; i++) {
-    if (reg_pool_->core_regs[i].pair) {
-      static int my_reg = reg_pool_->core_regs[i].reg;
-      static int my_sreg = reg_pool_->core_regs[i].s_reg;
-      static int partner_reg = reg_pool_->core_regs[i].partner;
-      static RegisterInfo* partner = GetRegInfo(partner_reg);
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->core_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    RegStorage my_reg = info->GetReg();
+    if (info->IsWide() && my_reg.IsPair()) {
+      int my_sreg = info->SReg();
+      RegStorage partner_reg = info->Partner();
+      RegisterInfo* partner = GetRegInfo(partner_reg);
       DCHECK(partner != NULL);
-      DCHECK(partner->pair);
-      DCHECK_EQ(my_reg, partner->partner);
-      static int partner_sreg = partner->s_reg;
+      DCHECK(partner->IsWide());
+      DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
+      int partner_sreg = partner->SReg();
       if (my_sreg == INVALID_SREG) {
         DCHECK_EQ(partner_sreg, INVALID_SREG);
       } else {
         int diff = my_sreg - partner_sreg;
-        DCHECK((diff == -1) || (diff == 1));
+        DCHECK((diff == 0) || (diff == -1) || (diff == 1));
       }
+    } else {
+      // TODO: add whatever sanity checks might be useful for 64BitSolo regs here.
+      // TODO: sanity checks for floating point pools?
     }
-    if (!reg_pool_->core_regs[i].live) {
-      DCHECK(reg_pool_->core_regs[i].def_start == NULL);
-      DCHECK(reg_pool_->core_regs[i].def_end == NULL);
+    if (!info->IsLive()) {
+      DCHECK(info->DefStart() == NULL);
+      DCHECK(info->DefEnd() == NULL);
     }
   }
   return true;
@@ -796,80 +827,64 @@
  * is a bit complex when dealing with FP regs.  Examine code to see
  * if it's worthwhile trying to be more clever here.
  */
-
 RegLocation Mir2Lir::UpdateLoc(RegLocation loc) {
   DCHECK(!loc.wide);
   DCHECK(CheckCorePoolSanity());
   if (loc.location != kLocPhysReg) {
     DCHECK((loc.location == kLocDalvikFrame) ||
          (loc.location == kLocCompilerTemp));
-    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
-    if (info_lo) {
-      if (info_lo->pair) {
-        Clobber(info_lo->reg);
-        Clobber(info_lo->partner);
-        FreeTemp(info_lo->reg);
-      } else {
-        loc.reg = RegStorage::Solo32(info_lo->reg);
+    RegStorage reg = AllocLiveReg(loc.s_reg_low, kAnyReg, false);
+    if (reg.Valid()) {
+      bool match = true;
+      RegisterInfo* info = GetRegInfo(reg);
+      match &= !reg.IsPair();
+      match &= !info->IsWide();
+      if (match) {
         loc.location = kLocPhysReg;
+        loc.reg = reg;
+      } else {
+        Clobber(reg);
+        FreeTemp(reg);
       }
     }
   }
   return loc;
 }
 
-/* see comments for update_loc */
 RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) {
   DCHECK(loc.wide);
   DCHECK(CheckCorePoolSanity());
   if (loc.location != kLocPhysReg) {
     DCHECK((loc.location == kLocDalvikFrame) ||
          (loc.location == kLocCompilerTemp));
-    // Are the dalvik regs already live in physical registers?
-    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
-    RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
-    bool match = true;
-    match = match && (info_lo != NULL);
-    match = match && (info_hi != NULL);
-    // Are they both core or both FP?
-    match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
-    // If a pair of floating point singles, are they properly aligned?
-    if (match && IsFpReg(info_lo->reg)) {
-      match &= ((info_lo->reg & 0x1) == 0);
-      match &= ((info_hi->reg - info_lo->reg) == 1);
-    }
-    // If previously used as a pair, it is the same pair?
-    if (match && (info_lo->pair || info_hi->pair)) {
-      match = (info_lo->pair == info_hi->pair);
-      match &= ((info_lo->reg == info_hi->partner) &&
-            (info_hi->reg == info_lo->partner));
-    }
-    if (match) {
-      // Can reuse - update the register usage info
-      loc.location = kLocPhysReg;
-      loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
-      MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-      DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
-      return loc;
-    }
-    // Can't easily reuse - clobber and free any overlaps
-    if (info_lo) {
-      Clobber(info_lo->reg);
-      FreeTemp(info_lo->reg);
-      if (info_lo->pair)
-        Clobber(info_lo->partner);
-    }
-    if (info_hi) {
-      Clobber(info_hi->reg);
-      FreeTemp(info_hi->reg);
-      if (info_hi->pair)
-        Clobber(info_hi->partner);
+    RegStorage reg = AllocLiveReg(loc.s_reg_low, kAnyReg, true);
+    if (reg.Valid()) {
+      bool match = true;
+      if (reg.IsPair()) {
+        // If we've got a register pair, make sure that it was last used as the same pair.
+        RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
+        RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
+        match &= info_lo->IsWide();
+        match &= info_hi->IsWide();
+        match &= (info_lo->Partner() == info_hi->GetReg());
+        match &= (info_hi->Partner() == info_lo->GetReg());
+      } else {
+        RegisterInfo* info = GetRegInfo(reg);
+        match &= info->IsWide();
+        match &= (info->GetReg() == info->Partner());
+      }
+      if (match) {
+        loc.location = kLocPhysReg;
+        loc.reg = reg;
+      } else {
+        Clobber(reg);
+        FreeTemp(reg);
+      }
     }
   }
   return loc;
 }
 
-
 /* For use in cases we don't know (or care) width */
 RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) {
   if (loc.wide)
@@ -885,18 +900,15 @@
 
   /* If already in registers, we can assume proper form.  Right reg class? */
   if (loc.location == kLocPhysReg) {
-    DCHECK_EQ(IsFpReg(loc.reg.GetLowReg()), IsFpReg(loc.reg.GetHighReg()));
-    DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
     if (!RegClassMatches(reg_class, loc.reg)) {
       /* Wrong register class.  Reallocate and copy */
       RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
       OpRegCopyWide(new_regs, loc.reg);
-      CopyRegInfo(new_regs.GetLowReg(), loc.reg.GetLowReg());
-      CopyRegInfo(new_regs.GetHighReg(), loc.reg.GetHighReg());
+      // Associate the old sreg with the new register and clobber the old register.
+      GetRegInfo(new_regs)->SetSReg(GetRegInfo(loc.reg)->SReg());
       Clobber(loc.reg);
       loc.reg = new_regs;
-      MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-      DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
+      MarkWide(loc.reg);
     }
     return loc;
   }
@@ -905,23 +917,19 @@
   DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
 
   loc.reg = AllocTypedTempWide(loc.fp, reg_class);
+  MarkWide(loc.reg);
 
-  MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(loc.reg.GetLow(), loc.s_reg_low);
-    // Does this wide value live in two registers or one vector register?
-    if (loc.reg.GetLowReg() != loc.reg.GetHighReg()) {
-      MarkLive(loc.reg.GetHigh(), GetSRegHi(loc.s_reg_low));
-    }
+    MarkLive(loc);
   }
-  DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
   return loc;
 }
 
 RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
-  if (loc.wide)
+  if (loc.wide) {
     return EvalLocWide(loc, reg_class, update);
+  }
 
   loc = UpdateLoc(loc);
 
@@ -930,7 +938,8 @@
       /* Wrong register class.  Realloc, copy and transfer ownership */
       RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
       OpRegCopy(new_reg, loc.reg);
-      CopyRegInfo(new_reg, loc.reg);
+      // Associate the old sreg with the new register and clobber the old register.
+      GetRegInfo(new_reg)->SetSReg(GetRegInfo(loc.reg)->SReg());
       Clobber(loc.reg);
       loc.reg = new_reg;
     }
@@ -943,7 +952,7 @@
 
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(loc.reg, loc.s_reg_low);
+    MarkLive(loc);
   }
   return loc;
 }
@@ -1115,9 +1124,14 @@
           int low_reg = promotion_map_[p_map_idx].FpReg;
           int high_reg = promotion_map_[p_map_idx+1].FpReg;
           // Doubles require pair of singles starting at even reg
+          // TODO: move target-specific restrictions out of here.
           if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
             curr->location = kLocPhysReg;
-            curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+            if (cu_->instruction_set == kThumb2) {
+              curr->reg = RegStorage::FloatSolo64(RegStorage::RegNum(low_reg) >> 1);
+            } else {
+              curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+            }
             curr->home = true;
           }
         }
@@ -1155,13 +1169,18 @@
   RegLocation gpr_res = LocCReturnWide();
   RegLocation fpr_res = LocCReturnDouble();
   RegLocation res = is_double ? fpr_res : gpr_res;
-  Clobber(res.reg.GetLowReg());
-  Clobber(res.reg.GetHighReg());
-  LockTemp(res.reg.GetLowReg());
-  LockTemp(res.reg.GetHighReg());
-  // Does this wide value live in two registers or one vector register?
-  if (res.reg.GetLowReg() != res.reg.GetHighReg()) {
-    MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
+  if (res.reg.IsPair()) {
+    Clobber(res.reg);
+    LockTemp(res.reg);
+    // Does this wide value live in two registers or one vector register?
+    if (res.reg.GetLowReg() != res.reg.GetHighReg()) {
+      // FIXME: I think we want to mark these as wide as well.
+      MarkWide(res.reg);
+    }
+  } else {
+    Clobber(res.reg);
+    LockTemp(res.reg);
+    MarkWide(res.reg);
   }
   return res;
 }
@@ -1170,11 +1189,11 @@
   RegLocation gpr_res = LocCReturn();
   RegLocation fpr_res = LocCReturnFloat();
   RegLocation res = is_float ? fpr_res : gpr_res;
-  Clobber(res.reg.GetReg());
+  Clobber(res.reg);
   if (cu_->instruction_set == kMips) {
-    MarkInUse(res.reg.GetReg());
+    MarkInUse(res.reg);
   } else {
-    LockTemp(res.reg.GetReg());
+    LockTemp(res.reg);
   }
   return res;
 }
@@ -1204,14 +1223,9 @@
   return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
 }
 
-bool Mir2Lir::oat_live_out(int s_reg) {
+bool Mir2Lir::LiveOut(int s_reg) {
   // For now.
   return true;
 }
 
-int Mir2Lir::oatSSASrc(MIR* mir, int num) {
-  DCHECK_GT(mir->ssa_rep->num_uses, num);
-  return mir->ssa_rep->uses[num];
-}
-
 }  // namespace art
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index e7a1a69..58e2f42 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -175,7 +175,7 @@
   { kX86Mov32AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
   { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" },
 
-  { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RM", "!0r,[!1r+!2d]" },
+  { kX86Lea32RM, kRegMem, IS_TERTIARY_OP | IS_LOAD | REG_DEF0_USE1, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RM", "!0r,[!1r+!2d]" },
 
   { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
 
@@ -388,11 +388,11 @@
     }
   }
   ++size;  // modrm
-  if (has_sib || base == rX86_SP) {
+  if (has_sib || RegStorage::RegNum(base) == rs_rX86_SP.GetRegNum()) {
     // SP requires a SIB byte.
     ++size;
   }
-  if (displacement != 0 || base == rBP) {
+  if (displacement != 0 || RegStorage::RegNum(base) == rs_rBP.GetRegNum()) {
     // BP requires an explicit displacement, even when it's 0.
     if (entry->opcode != kX86Lea32RA) {
       DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name;
@@ -446,7 +446,7 @@
       } else {
         // AX opcodes don't require the modrm byte.
         int reg = lir->operands[0];
-        return size - (reg == rAX ? 1 : 0);
+        return size - (RegStorage::RegNum(reg) == rs_rAX.GetRegNum() ? 1 : 0);
       }
     }
     case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
@@ -533,7 +533,7 @@
       DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
       return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
           ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) -
-          (lir->operands[0] == rAX  ? 1 : 0);  // shorter ax encoding
+          (RegStorage::RegNum(lir->operands[0]) == rs_rAX.GetRegNum()  ? 1 : 0);  // shorter ax encoding
     default:
       break;
   }
@@ -574,7 +574,7 @@
 
 static uint8_t ModrmForDisp(int base, int disp) {
   // BP requires an explicit disp, so do not omit it in the 0 case
-  if (disp == 0 && base != rBP) {
+  if (disp == 0 && RegStorage::RegNum(base) != rs_rBP.GetRegNum()) {
     return 0;
   } else if (IS_SIMM8(disp)) {
     return 1;
@@ -585,7 +585,7 @@
 
 void X86Mir2Lir::EmitDisp(uint8_t base, int disp) {
   // BP requires an explicit disp, so do not omit it in the 0 case
-  if (disp == 0 && base != rBP) {
+  if (disp == 0 && RegStorage::RegNum(base) != rs_rBP.GetRegNum()) {
     return;
   } else if (IS_SIMM8(disp)) {
     code_buffer_.push_back(disp & 0xFF);
@@ -598,26 +598,28 @@
 }
 
 void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp) {
-  DCHECK_LT(reg_or_opcode, 8);
-  DCHECK_LT(base, 8);
-  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
+  DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
+  DCHECK_LT(RegStorage::RegNum(base), 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (RegStorage::RegNum(reg_or_opcode) << 3) |
+     RegStorage::RegNum(base);
   code_buffer_.push_back(modrm);
-  if (base == rX86_SP) {
+  if (RegStorage::RegNum(base) == rs_rX86_SP.GetRegNum()) {
     // Special SIB for SP base
-    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+    code_buffer_.push_back(0 << 6 | rs_rX86_SP.GetRegNum() << 3 | rs_rX86_SP.GetRegNum());
   }
   EmitDisp(base, disp);
 }
 
 void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index,
                                   int scale, int disp) {
-  DCHECK_LT(reg_or_opcode, 8);
-  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | rX86_SP;
+  DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | RegStorage::RegNum(reg_or_opcode) << 3 |
+      rs_rX86_SP.GetRegNum();
   code_buffer_.push_back(modrm);
   DCHECK_LT(scale, 4);
-  DCHECK_LT(index, 8);
-  DCHECK_LT(base, 8);
-  uint8_t sib = (scale << 6) | (index << 3) | base;
+  DCHECK_LT(RegStorage::RegNum(index), 8);
+  DCHECK_LT(RegStorage::RegNum(base), 8);
+  uint8_t sib = (scale << 6) | (RegStorage::RegNum(index) << 3) | RegStorage::RegNum(base);
   code_buffer_.push_back(sib);
   EmitDisp(base, disp);
 }
@@ -651,24 +653,22 @@
   // There's no 3-byte instruction with +rd
   DCHECK(entry->skeleton.opcode != 0x0F ||
          (entry->skeleton.extra_opcode1 != 0x38 && entry->skeleton.extra_opcode1 != 0x3A));
-  DCHECK(!X86_FPREG(reg));
-  DCHECK_LT(reg, 8);
-  code_buffer_.back() += reg;
+  DCHECK(!RegStorage::IsFloat(reg));
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  code_buffer_.back() += RegStorage::RegNum(reg);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
 void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg)) {
-    reg = reg & X86_FP_REG_MASK;
-  }
-  if (reg >= 4) {
-    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+  if (RegStorage::RegNum(reg) >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " "
+        << static_cast<int>(RegStorage::RegNum(reg))
         << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
-  DCHECK_LT(reg, 8);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
   code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
@@ -696,13 +696,10 @@
 void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
                        uint8_t base, int disp, uint8_t reg) {
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg)) {
-    reg = reg & X86_FP_REG_MASK;
-  }
-  if (reg >= 4) {
+  if (RegStorage::RegNum(reg) >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL ||
            entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
-        << entry->name << " " << static_cast<int>(reg)
+        << entry->name << " " << static_cast<int>(RegStorage::RegNum(reg))
         << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   EmitModrmDisp(reg, base, disp);
@@ -720,17 +717,14 @@
 void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
                               int scale, int disp) {
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg)) {
-    reg = reg & X86_FP_REG_MASK;
-  }
   EmitModrmSibDisp(reg, base, index, scale, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
-                  uint8_t reg) {
+void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale,
+                              int disp, uint8_t reg) {
   // Opcode will flip operands.
   EmitRegArray(entry, reg, base, index, scale, disp);
 }
@@ -738,15 +732,13 @@
 void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
   DCHECK_NE(entry->skeleton.prefix1, 0);
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg)) {
-    reg = reg & X86_FP_REG_MASK;
-  }
-  if (reg >= 4) {
-    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+  if (RegStorage::RegNum(reg) >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " "
+        << static_cast<int>(RegStorage::RegNum(reg))
         << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
-  DCHECK_LT(reg, 8);
-  uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  uint8_t modrm = (0 << 6) | (RegStorage::RegNum(reg) << 3) | rs_rBP.GetRegNum();
   code_buffer_.push_back(modrm);
   code_buffer_.push_back(disp & 0xFF);
   code_buffer_.push_back((disp >> 8) & 0xFF);
@@ -759,15 +751,9 @@
 
 void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg1)) {
-    reg1 = reg1 & X86_FP_REG_MASK;
-  }
-  if (X86_FPREG(reg2)) {
-    reg2 = reg2 & X86_FP_REG_MASK;
-  }
-  DCHECK_LT(reg1, 8);
-  DCHECK_LT(reg2, 8);
-  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  DCHECK_LT(RegStorage::RegNum(reg1), 8);
+  DCHECK_LT(RegStorage::RegNum(reg2), 8);
+  uint8_t modrm = (3 << 6) | (RegStorage::RegNum(reg1) << 3) | RegStorage::RegNum(reg2);
   code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
@@ -777,15 +763,9 @@
 void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
                           uint8_t reg1, uint8_t reg2, int32_t imm) {
   EmitPrefixAndOpcode(entry);
-  if (X86_FPREG(reg1)) {
-    reg1 = reg1 & X86_FP_REG_MASK;
-  }
-  if (X86_FPREG(reg2)) {
-    reg2 = reg2 & X86_FP_REG_MASK;
-  }
-  DCHECK_LT(reg1, 8);
-  DCHECK_LT(reg2, 8);
-  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  DCHECK_LT(RegStorage::RegNum(reg1), 8);
+  DCHECK_LT(RegStorage::RegNum(reg2), 8);
+  uint8_t modrm = (3 << 6) | (RegStorage::RegNum(reg1) << 3) | RegStorage::RegNum(reg2);
   code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
@@ -800,8 +780,8 @@
 void X86Mir2Lir::EmitRegMemImm(const X86EncodingMap* entry,
                                uint8_t reg, uint8_t base, int disp, int32_t imm) {
   EmitPrefixAndOpcode(entry);
-  DCHECK(!X86_FPREG(reg));
-  DCHECK_LT(reg, 8);
+  DCHECK(!RegStorage::IsFloat(reg));
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
   EmitModrmDisp(reg, base, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
@@ -817,14 +797,11 @@
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  if (reg == rAX && entry->skeleton.ax_opcode != 0) {
+  if (RegStorage::RegNum(reg) == rs_rAX.GetRegNum() && entry->skeleton.ax_opcode != 0) {
     code_buffer_.push_back(entry->skeleton.ax_opcode);
   } else {
     EmitOpcode(entry);
-    if (X86_FPREG(reg)) {
-      reg = reg & X86_FP_REG_MASK;
-    }
-    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
     code_buffer_.push_back(modrm);
   }
   EmitImm(entry, imm);
@@ -839,7 +816,7 @@
 
 void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) {
   EmitPrefixAndOpcode(entry);
-  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rs_rBP.GetRegNum();
   code_buffer_.push_back(modrm);
   code_buffer_.push_back(disp & 0xFF);
   code_buffer_.push_back((disp >> 8) & 0xFF);
@@ -850,8 +827,8 @@
 }
 
 void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
-  DCHECK_LT(reg, 8);
-  code_buffer_.push_back(0xB8 + reg);
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  code_buffer_.push_back(0xB8 + RegStorage::RegNum(reg));
   code_buffer_.push_back(imm & 0xFF);
   code_buffer_.push_back((imm >> 8) & 0xFF);
   code_buffer_.push_back((imm >> 16) & 0xFF);
@@ -869,12 +846,13 @@
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  if (reg >= 4) {
-    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+  if (RegStorage::RegNum(reg) >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " "
+        << static_cast<int>(RegStorage::RegNum(reg))
         << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
-  DCHECK_LT(reg, 8);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
   code_buffer_.push_back(modrm);
   if (imm != 1) {
     DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
@@ -884,14 +862,14 @@
 }
 
 void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
-  DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+  DCHECK_EQ(cl, static_cast<uint8_t>(rs_rCX.GetReg()));
   EmitPrefix(entry);
   code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  DCHECK_LT(reg, 8);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
   code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
@@ -899,13 +877,13 @@
 
 void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, uint8_t base,
                                 int displacement, uint8_t cl) {
-  DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+  DCHECK_EQ(cl, static_cast<uint8_t>(rs_rCX.GetReg()));
   EmitPrefix(entry);
   code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_NE(0x0F, entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  DCHECK_LT(base, 8);
+  DCHECK_LT(RegStorage::RegNum(base), 8);
   EmitModrmDisp(entry->skeleton.modrm_opcode, base, displacement);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
@@ -926,13 +904,14 @@
   DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
   code_buffer_.push_back(0x90 | condition);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  DCHECK_LT(reg, 8);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
   code_buffer_.push_back(modrm);
   DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
 }
 
-void X86Mir2Lir::EmitRegRegCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, uint8_t condition) {
+void X86Mir2Lir::EmitRegRegCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2,
+                                uint8_t condition) {
   // Generate prefix and opcode without the condition
   EmitPrefixAndOpcode(entry);
 
@@ -945,14 +924,14 @@
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
 
   // Check that registers requested for encoding are sane.
-  DCHECK_LT(reg1, 8);
-  DCHECK_LT(reg2, 8);
+  DCHECK_LT(RegStorage::RegNum(reg1), 8);
+  DCHECK_LT(RegStorage::RegNum(reg2), 8);
 
   // For register to register encoding, the mod is 3.
   const uint8_t mod = (3 << 6);
 
   // Encode the ModR/M byte now.
-  const uint8_t modrm = mod | (reg1 << 3) | reg2;
+  const uint8_t modrm = mod | (RegStorage::RegNum(reg1) << 3) | RegStorage::RegNum(reg2);
   code_buffer_.push_back(modrm);
 }
 
@@ -975,8 +954,8 @@
     DCHECK(entry->opcode == kX86JmpR);
     code_buffer_.push_back(entry->skeleton.opcode);
     uint8_t reg = static_cast<uint8_t>(rel);
-    DCHECK_LT(reg, 8);
-    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    DCHECK_LT(RegStorage::RegNum(reg), 8);
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | RegStorage::RegNum(reg);
     code_buffer_.push_back(modrm);
   }
 }
@@ -1018,7 +997,7 @@
 void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) {
   DCHECK_NE(entry->skeleton.prefix1, 0);
   EmitPrefixAndOpcode(entry);
-  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rs_rBP.GetRegNum();
   code_buffer_.push_back(modrm);
   code_buffer_.push_back(disp & 0xFF);
   code_buffer_.push_back((disp >> 8) & 0xFF);
@@ -1042,26 +1021,23 @@
     disp = tab_rec->offset;
   }
   EmitPrefix(entry);
-  if (X86_FPREG(reg)) {
-    reg = reg & X86_FP_REG_MASK;
-  }
-  DCHECK_LT(reg, 8);
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
   if (entry->opcode == kX86PcRelLoadRA) {
     code_buffer_.push_back(entry->skeleton.opcode);
     DCHECK_NE(0x0F, entry->skeleton.opcode);
     DCHECK_EQ(0, entry->skeleton.extra_opcode1);
     DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-    uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
+    uint8_t modrm = (2 << 6) | (RegStorage::RegNum(reg) << 3) | rs_rX86_SP.GetRegNum();
     code_buffer_.push_back(modrm);
     DCHECK_LT(scale, 4);
-    DCHECK_LT(index, 8);
-    DCHECK_LT(base_or_table, 8);
+    DCHECK_LT(RegStorage::RegNum(index), 8);
+    DCHECK_LT(RegStorage::RegNum(base_or_table), 8);
     uint8_t base = static_cast<uint8_t>(base_or_table);
-    uint8_t sib = (scale << 6) | (index << 3) | base;
+    uint8_t sib = (scale << 6) | (RegStorage::RegNum(index) << 3) | RegStorage::RegNum(base);
     code_buffer_.push_back(sib);
     DCHECK_EQ(0, entry->skeleton.immediate_bytes);
   } else {
-    code_buffer_.push_back(entry->skeleton.opcode + reg);
+    code_buffer_.push_back(entry->skeleton.opcode + RegStorage::RegNum(reg));
   }
   code_buffer_.push_back(disp & 0xFF);
   code_buffer_.push_back((disp >> 8) & 0xFF);
@@ -1079,10 +1055,11 @@
   code_buffer_.push_back(0);
   code_buffer_.push_back(0);
 
-  DCHECK_LT(reg, 8);
-  code_buffer_.push_back(0x58 + reg);  // pop reg
+  DCHECK_LT(RegStorage::RegNum(reg), 8);
+  code_buffer_.push_back(0x58 + RegStorage::RegNum(reg));  // pop reg
 
-  EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+  EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], RegStorage::RegNum(reg),
+             offset + 5 /* size of call +0 */);
 }
 
 void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 06cc861..f701a1f 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -152,10 +152,10 @@
     LoadValueDirect(rl_method, rs_rX86_ARG2);
     store_method_addr_used_ = true;
   } else {
-    NewLIR1(kX86StartOfMethod, rX86_ARG2);
+    NewLIR1(kX86StartOfMethod, rs_rX86_ARG2.GetReg());
   }
-  NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
-  NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
+  NewLIR2(kX86PcRelAdr, rs_rX86_ARG1.GetReg(), WrapPointer(tab_rec));
+  NewLIR2(kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg());
   CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), rs_rX86_ARG0,
                           rs_rX86_ARG1, true);
 }
@@ -191,9 +191,9 @@
    * expanding the frame or flushing.  This leaves the utility
    * code with no spare temps.
    */
-  LockTemp(rX86_ARG0);
-  LockTemp(rX86_ARG1);
-  LockTemp(rX86_ARG2);
+  LockTemp(rs_rX86_ARG0);
+  LockTemp(rs_rX86_ARG1);
+  LockTemp(rs_rX86_ARG2);
 
   /* Build frame, return address already on stack */
   // TODO: 64 bit.
@@ -240,7 +240,7 @@
     // in case a signal comes in that's not using an alternate signal stack and the large frame may
     // have moved us outside of the reserved area at the end of the stack.
     // cmp rX86_SP, fs:[stack_end_]; jcc throw_slowpath
-    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset<4>());
+    OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
     LIR* branch = OpCondBranch(kCondUlt, nullptr);
     AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - 4));
   }
@@ -249,15 +249,15 @@
 
   if (base_of_code_ != nullptr) {
     // We have been asked to save the address of the method start for later use.
-    setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rX86_ARG0);
+    setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg());
     int displacement = SRegOffset(base_of_code_->s_reg_low);
     // Native pointer - must be natural word size.
     setup_method_address_[1] = StoreWordDisp(rs_rX86_SP, displacement, rs_rX86_ARG0);
   }
 
-  FreeTemp(rX86_ARG0);
-  FreeTemp(rX86_ARG1);
-  FreeTemp(rX86_ARG2);
+  FreeTemp(rs_rX86_ARG0);
+  FreeTemp(rs_rX86_ARG1);
+  FreeTemp(rs_rX86_ARG2);
 }
 
 void X86Mir2Lir::GenExitSequence() {
@@ -265,8 +265,8 @@
    * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(rX86_RET0);
-  LockTemp(rX86_RET1);
+  LockTemp(rs_rX86_RET0);
+  LockTemp(rs_rX86_RET1);
 
   NewLIR0(kPseudoMethodExit);
   UnSpillCoreRegs();
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 760290c..c3ea55f 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -37,27 +37,21 @@
     LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
     LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                          OpSize size);
-    // TODO: collapse r_dest, r_dest_hi
     LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                             RegStorage r_dest, RegStorage r_dest_hi, OpSize size, int s_reg);
+                             RegStorage r_dest, OpSize size, int s_reg);
     LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
     LIR* StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src);
     LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                           OpSize size);
-    // TODO: collapse r_src, r_src_hi
     LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                              RegStorage r_src, RegStorage r_src_hi, OpSize size, int s_reg);
+                              RegStorage r_src, OpSize size, int s_reg);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
-    bool IsFpReg(int reg);
-    bool IsFpReg(RegStorage reg);
-    bool SameRegType(int reg1, int reg2);
     RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
     RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
-    int S2d(int low_reg, int high_reg);
     RegStorage TargetReg(SpecialTargetRegister reg);
     RegStorage GetArgMappingToPhysicalReg(int arg_num);
     RegLocation GetReturnAlt();
@@ -66,16 +60,14 @@
     RegLocation LocCReturnDouble();
     RegLocation LocCReturnFloat();
     RegLocation LocCReturnWide();
-    uint32_t FpRegMask();
-    uint64_t GetRegMaskCommon(int reg);
+    uint64_t GetRegMaskCommon(RegStorage reg);
     void AdjustSpillMask();
     void ClobberCallerSave();
-    void FlushReg(RegStorage reg);
-    void FlushRegWide(RegStorage reg);
     void FreeCallTemps();
     void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
     void LockCallTemps();
-    void MarkPreservedSingle(int v_reg, int reg);
+    void MarkPreservedSingle(int v_reg, RegStorage reg);
+    void MarkPreservedDouble(int v_reg, RegStorage reg);
     void CompilerInitializeRegAlloc();
 
     // Required for target - miscellaneous.
@@ -252,7 +244,7 @@
     void OpRegCopyWide(RegStorage dest, RegStorage src);
     void OpTlsCmp(ThreadOffset<4> offset, int val);
 
-    void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset<4> thread_offset);
+    void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
     void SpillCoreRegs();
     void UnSpillCoreRegs();
     static const X86EncodingMap EncodingMap[kX86Last];
@@ -261,12 +253,6 @@
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
 
-    RegLocation UpdateLocWide(RegLocation loc);
-    RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
-    RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
-    RegStorage AllocTempDouble();
-    void ResetDefLocWide(RegLocation rl);
-
     /*
      * @brief x86 specific codegen for int operations.
      * @param opcode Operation to perform.
@@ -379,7 +365,6 @@
     void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
                                   int64_t val, ConditionCode ccode);
-    void OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg);
     void GenConstWide(RegLocation rl_dest, int64_t value);
 
     static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f7b0c9d..1ed0b63 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -67,7 +67,7 @@
   RegStorage r_src1 = rl_src1.reg;
   RegStorage r_src2 = rl_src2.reg;
   if (r_dest == r_src2) {
-    r_src2 = AllocTempFloat();
+    r_src2 = AllocTempSingle();
     OpRegCopy(r_src2, r_dest);
   }
   OpRegCopy(r_dest, r_src1);
@@ -77,6 +77,12 @@
 
 void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_dest.fp);
+  DCHECK(rl_src1.wide);
+  DCHECK(rl_src1.fp);
+  DCHECK(rl_src2.wide);
+  DCHECK(rl_src2.fp);
   X86OpCode op = kX86Nop;
   RegLocation rl_result;
 
@@ -112,22 +118,14 @@
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
   rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
   rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  DCHECK(rl_src2.wide);
   rl_result = EvalLoc(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  // TODO: update with direct 64-bit reg.
-  int r_dest = S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg());
-  int r_src1 = S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
-  int r_src2 = S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg());
-  if (r_dest == r_src2) {
-    r_src2 = AllocTempDouble().GetLowReg() | X86_FP_DOUBLE;
-    OpRegCopy(RegStorage::Solo64(r_src2), RegStorage::Solo64(r_dest));
+  if (rl_result.reg == rl_src2.reg) {
+    rl_src2.reg = AllocTempDouble();
+    OpRegCopy(rl_src2.reg, rl_result.reg);
   }
-  OpRegCopy(RegStorage::Solo64(r_dest), RegStorage::Solo64(r_src1));
-  NewLIR2(op, r_dest, r_src2);
+  OpRegCopy(rl_result.reg, rl_src1.reg);
+  NewLIR2(op, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -141,16 +139,13 @@
 
   // If the source is in physical register, then put it in its location on stack.
   if (rl_src.location == kLocPhysReg) {
-    RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetLowReg());
+    RegisterInfo* reg_info = GetRegInfo(rl_src.reg);
 
-    if (lo_info != nullptr && lo_info->is_temp) {
+    if (reg_info != nullptr && reg_info->IsTemp()) {
       // Calling FlushSpecificReg because it will only write back VR if it is dirty.
-      FlushSpecificReg(lo_info);
-      // ResetDef for low/high to prevent NullifyRange from removing stores.
-      ResetDef(rl_src.reg.GetLowReg());
-      if (rl_src.reg.GetLowReg() != rl_src.reg.GetHighReg() && GetRegInfo(rl_src.reg.GetHighReg()) != nullptr) {
-        ResetDef(rl_src.reg.GetHighReg());
-      }
+      FlushSpecificReg(reg_info);
+      // ResetDef to prevent NullifyRange from removing stores.
+      ResetDef(rl_src.reg);
     } else {
       // It must have been register promoted if it is not a temp but is still in physical
       // register. Since we need it to be in memory to convert, we place it there now.
@@ -159,9 +154,10 @@
   }
 
   // Push the source virtual register onto the x87 stack.
-  LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(), src_v_reg_offset + LOWORD_OFFSET);
+  LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(),
+                              src_v_reg_offset + LOWORD_OFFSET);
   AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
-      true /* is_load */, true /* is64bit */);
+                          true /* is_load */, true /* is64bit */);
 
   // Now pop off x87 stack and store it in the destination VR's stack location.
   int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
@@ -204,7 +200,6 @@
                                RegLocation rl_src) {
   RegisterClass rcSrc = kFPReg;
   X86OpCode op = kX86Nop;
-  int src_reg;
   RegLocation rl_result;
   switch (opcode) {
     case Instruction::INT_TO_FLOAT:
@@ -225,18 +220,17 @@
       break;
     case Instruction::FLOAT_TO_INT: {
       rl_src = LoadValue(rl_src, kFPReg);
-      src_reg = rl_src.reg.GetReg();
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
       ClobberSReg(rl_dest.s_reg_low);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      int temp_reg = AllocTempFloat().GetReg();
+      RegStorage temp_reg = AllocTempSingle();
 
       LoadConstant(rl_result.reg, 0x7fffffff);
-      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
-      NewLIR2(kX86ComissRR, src_reg, temp_reg);
+      NewLIR2(kX86Cvtsi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
+      NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
       LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
       LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg);
+      NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
       LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
       branch_na_n->target = NewLIR0(kPseudoTargetLabel);
       NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
@@ -247,18 +241,17 @@
     }
     case Instruction::DOUBLE_TO_INT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = rl_src.reg.GetLowReg();
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
       ClobberSReg(rl_dest.s_reg_low);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      int temp_reg = AllocTempDouble().GetLowReg() | X86_FP_DOUBLE;
+      RegStorage temp_reg = AllocTempDouble();
 
       LoadConstant(rl_result.reg, 0x7fffffff);
-      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
-      NewLIR2(kX86ComisdRR, src_reg, temp_reg);
+      NewLIR2(kX86Cvtsi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
+      NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
       LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
       LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg);
+      NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
       LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
       branch_na_n->target = NewLIR0(kPseudoTargetLabel);
       NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
@@ -282,20 +275,18 @@
     default:
       LOG(INFO) << "Unexpected opcode: " << opcode;
   }
+  // At this point, target will be either float or double.
+  DCHECK(rl_dest.fp);
   if (rl_src.wide) {
     rl_src = LoadValueWide(rl_src, rcSrc);
-    src_reg = S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg());
   } else {
     rl_src = LoadValue(rl_src, rcSrc);
-    src_reg = rl_src.reg.GetReg();
   }
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   if (rl_dest.wide) {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()), src_reg);
     StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
     StoreValue(rl_dest, rl_result);
   }
 }
@@ -304,34 +295,28 @@
                           RegLocation rl_src1, RegLocation rl_src2) {
   bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
   bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
-  int src_reg1;
-  int src_reg2;
   if (single) {
     rl_src1 = LoadValue(rl_src1, kFPReg);
-    src_reg1 = rl_src1.reg.GetReg();
     rl_src2 = LoadValue(rl_src2, kFPReg);
-    src_reg2 = rl_src2.reg.GetReg();
   } else {
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    src_reg1 = S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    src_reg2 = S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg());
   }
   // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
   ClobberSReg(rl_dest.s_reg_low);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   LoadConstantNoClobber(rl_result.reg, unordered_gt ? 1 : 0);
   if (single) {
-    NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
+    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
-    NewLIR2(kX86UcomisdRR, src_reg1, src_reg2);
+    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   }
   LIR* branch = NULL;
   if (unordered_gt) {
     branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
   }
   // If the result reg can't be byte accessed, use a jump and move instead of a set.
-  if (rl_result.reg.GetReg() >= 4) {
+  if (rl_result.reg.GetReg() >= rs_rX86_SP.GetReg()) {
     LIR* branch2 = NULL;
     if (unordered_gt) {
       branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
@@ -363,8 +348,7 @@
     rl_src2 = mir_graph_->GetSrcWide(mir, 2);
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetLowReg(), rl_src2.reg.GetHighReg()));
+    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
     rl_src1 = mir_graph_->GetSrc(mir, 0);
     rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -442,8 +426,7 @@
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetLowReg(), rl_src.reg.GetHighReg()));
+  NewLIR2(kX86SqrtsdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 96c4cbe..4446f43 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -33,18 +33,18 @@
                             RegLocation rl_src2) {
   FlushAllRegs();
   LockCallTemps();  // Prepare for explicit register usage
-  RegStorage r_tmp1(RegStorage::k64BitPair, r0, r1);
-  RegStorage r_tmp2(RegStorage::k64BitPair, r2, r3);
+  RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1);
+  RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3);
   LoadValueDirectWideFixed(rl_src1, r_tmp1);
   LoadValueDirectWideFixed(rl_src2, r_tmp2);
   // Compute (r1:r0) = (r1:r0) - (r3:r2)
   OpRegReg(kOpSub, rs_r0, rs_r2);  // r0 = r0 - r2
   OpRegReg(kOpSbc, rs_r1, rs_r3);  // r1 = r1 - r3 - CF
-  NewLIR2(kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
-  NewLIR2(kX86Movzx8RR, r2, r2);
+  NewLIR2(kX86Set8R, rs_r2.GetReg(), kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, rs_r2.GetReg(), rs_r2.GetReg());
   OpReg(kOpNeg, rs_r2);         // r2 = -r2
   OpRegReg(kOpOr, rs_r0, rs_r1);   // r0 = high | low - sets ZF
-  NewLIR2(kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+  NewLIR2(kX86Set8R, rs_r0.GetReg(), kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
   NewLIR2(kX86Movzx8RR, r0, r0);
   OpRegReg(kOpOr, rs_r0, rs_r2);   // r0 = r0 | r2
   RegLocation rl_result = LocCReturn();
@@ -106,7 +106,7 @@
   if (r_src.IsPair()) {
     r_src = r_src.GetLow();
   }
-  if (X86_FPREG(r_dest.GetReg()) || X86_FPREG(r_src.GetReg()))
+  if (r_dest.IsFloat() || r_src.IsFloat())
     return OpFpRegCopy(r_dest, r_src);
   LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
                     r_dest.GetReg(), r_src.GetReg());
@@ -125,31 +125,28 @@
 
 void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
   if (r_dest != r_src) {
-    // FIXME: handle k64BitSolo when we start using them.
-    DCHECK(r_dest.IsPair());
-    DCHECK(r_src.IsPair());
-    bool dest_fp = X86_FPREG(r_dest.GetLowReg());
-    bool src_fp = X86_FPREG(r_src.GetLowReg());
+    bool dest_fp = r_dest.IsFloat();
+    bool src_fp = r_src.IsFloat();
     if (dest_fp) {
       if (src_fp) {
-        // TODO: we ought to handle this case here - reserve OpRegCopy for 32-bit copies.
-        OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
-                  RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
+        OpRegCopy(r_dest, r_src);
       } else {
         // TODO: Prevent this from happening in the code. The result is often
         // unused or could have been loaded more easily from memory.
-        NewLIR2(kX86MovdxrRR, r_dest.GetLowReg(), r_src.GetLowReg());
+        NewLIR2(kX86MovdxrRR, r_dest.GetReg(), r_src.GetLowReg());
         RegStorage r_tmp = AllocTempDouble();
-        NewLIR2(kX86MovdxrRR, r_tmp.GetLowReg(), r_src.GetHighReg());
-        NewLIR2(kX86PunpckldqRR, r_dest.GetLowReg(), r_tmp.GetLowReg());
+        NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), r_src.GetHighReg());
+        NewLIR2(kX86PunpckldqRR, r_dest.GetReg(), r_tmp.GetReg());
         FreeTemp(r_tmp);
       }
     } else {
       if (src_fp) {
-        NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetLowReg());
-        NewLIR2(kX86PsrlqRI, r_src.GetLowReg(), 32);
-        NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), r_src.GetLowReg());
+        NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetReg());
+        NewLIR2(kX86PsrlqRI, r_src.GetReg(), 32);
+        NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), r_src.GetReg());
       } else {
+        DCHECK(r_dest.IsPair());
+        DCHECK(r_src.IsPair());
         // Handle overlap
         if (r_src.GetHighReg() == r_dest.GetLowReg() && r_src.GetLowReg() == r_dest.GetHighReg()) {
           // Deal with cycles.
@@ -289,8 +286,8 @@
 
   FlushAllRegs();
   LockCallTemps();  // Prepare for explicit register usage
-  RegStorage r_tmp1(RegStorage::k64BitPair, r0, r1);
-  RegStorage r_tmp2(RegStorage::k64BitPair, r2, r3);
+  RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1);
+  RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3);
   LoadValueDirectWideFixed(rl_src1, r_tmp1);
   LoadValueDirectWideFixed(rl_src2, r_tmp2);
   // Swap operands and condition code to prevent use of zero flag.
@@ -452,8 +449,7 @@
   LockCallTemps();  // Prepare for explicit register usage.
 
   // Assume that the result will be in EDX.
-  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rs_r2,
-                           INVALID_SREG, INVALID_SREG};
+  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
 
   // handle div/rem by 1 special case.
   if (imm == 1) {
@@ -516,7 +512,7 @@
       // We will need the value later.
       if (rl_src.location == kLocPhysReg) {
         // We can use it directly.
-        DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
+        DCHECK(rl_src.reg.GetReg() != rs_r0.GetReg() && rl_src.reg.GetReg() != rs_r2.GetReg());
         numerator_reg = rl_src.reg;
       } else {
         numerator_reg = rs_r1;
@@ -532,21 +528,21 @@
     LoadConstantNoClobber(rs_r2, magic);
 
     // EDX:EAX = magic & dividend.
-    NewLIR1(kX86Imul32DaR, r2);
+    NewLIR1(kX86Imul32DaR, rs_r2.GetReg());
 
     if (imm > 0 && magic < 0) {
       // Add numerator to EDX.
       DCHECK(numerator_reg.Valid());
-      NewLIR2(kX86Add32RR, r2, numerator_reg.GetReg());
+      NewLIR2(kX86Add32RR, rs_r2.GetReg(), numerator_reg.GetReg());
     } else if (imm < 0 && magic > 0) {
       DCHECK(numerator_reg.Valid());
-      NewLIR2(kX86Sub32RR, r2, numerator_reg.GetReg());
+      NewLIR2(kX86Sub32RR, rs_r2.GetReg(), numerator_reg.GetReg());
     }
 
     // Do we need the shift?
     if (shift != 0) {
       // Shift EDX by 'shift' bits.
-      NewLIR2(kX86Sar32RI, r2, shift);
+      NewLIR2(kX86Sar32RI, rs_r2.GetReg(), shift);
     }
 
     // Add 1 to EDX if EDX < 0.
@@ -555,10 +551,10 @@
     OpRegCopy(rs_r0, rs_r2);
 
     // Move sign bit to bit 0, zeroing the rest.
-    NewLIR2(kX86Shr32RI, r2, 31);
+    NewLIR2(kX86Shr32RI, rs_r2.GetReg(), 31);
 
     // EDX = EDX + EAX.
-    NewLIR2(kX86Add32RR, r2, r0);
+    NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r0.GetReg());
 
     // Quotient is in EDX.
     if (!is_div) {
@@ -571,7 +567,7 @@
       OpRegRegImm(kOpMul, rs_r2, rs_r2, imm);
 
       // EDX -= EAX.
-      NewLIR2(kX86Sub32RR, r0, r2);
+      NewLIR2(kX86Sub32RR, rs_r0.GetReg(), rs_r2.GetReg());
 
       // For this case, return the result in EAX.
       rl_result.reg.SetReg(r0);
@@ -625,12 +621,11 @@
   // Expected case.
   minus_one_branch->target = NewLIR0(kPseudoTargetLabel);
   minint_branch->target = minus_one_branch->target;
-  NewLIR1(kX86Idivmod32DaR, r1);
+  NewLIR1(kX86Idivmod32DaR, rs_r1.GetReg());
   done->target = NewLIR0(kPseudoTargetLabel);
 
   // Result is in EAX for div and EDX for rem.
-  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rs_r0,
-                           INVALID_SREG, INVALID_SREG};
+  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG};
   if (!is_div) {
     rl_result.reg.SetReg(r2);
   }
@@ -741,16 +736,16 @@
     // TODO: CFI support.
     FlushAllRegs();
     LockCallTemps();
-    RegStorage r_tmp1(RegStorage::k64BitPair, rAX, rDX);
-    RegStorage r_tmp2(RegStorage::k64BitPair, rBX, rCX);
+    RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
+    RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_rBX, rs_rCX);
     LoadValueDirectWideFixed(rl_src_expected, r_tmp1);
     LoadValueDirectWideFixed(rl_src_new_value, r_tmp2);
-    NewLIR1(kX86Push32R, rDI);
-    MarkTemp(rDI);
-    LockTemp(rDI);
-    NewLIR1(kX86Push32R, rSI);
-    MarkTemp(rSI);
-    LockTemp(rSI);
+    NewLIR1(kX86Push32R, rs_rDI.GetReg());
+    MarkTemp(rs_rDI);
+    LockTemp(rs_rDI);
+    NewLIR1(kX86Push32R, rs_rSI.GetReg());
+    MarkTemp(rs_rSI);
+    LockTemp(rs_rSI);
     const int push_offset = 4 /* push edi */ + 4 /* push esi */;
     int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0
                 : (IsInReg(this, rl_src_obj, rs_rDI) ? 4
@@ -761,22 +756,23 @@
                    : (IsInReg(this, rl_src_offset, rs_rDI) ? 4
                    : (SRegOffset(rl_src_offset.s_reg_low) + push_offset));
     LoadWordDisp(TargetReg(kSp), srcOffsetSp, rs_rSI);
-    NewLIR4(kX86LockCmpxchg8bA, rDI, rSI, 0, 0);
+    NewLIR4(kX86LockCmpxchg8bA, rs_rDI.GetReg(), rs_rSI.GetReg(), 0, 0);
 
     // After a store we need to insert barrier in case of potential load. Since the
     // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
     GenMemBarrier(kStoreLoad);
 
-    FreeTemp(rSI);
-    UnmarkTemp(rSI);
-    NewLIR1(kX86Pop32R, rSI);
-    FreeTemp(rDI);
-    UnmarkTemp(rDI);
-    NewLIR1(kX86Pop32R, rDI);
+    FreeTemp(rs_rSI);
+    UnmarkTemp(rs_rSI);
+    NewLIR1(kX86Pop32R, rs_rSI.GetReg());
+    FreeTemp(rs_rDI);
+    UnmarkTemp(rs_rDI);
+    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
     FreeCallTemps();
   } else {
     // EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
     FlushReg(rs_r0);
+    Clobber(rs_r0);
     LockTemp(rs_r0);
 
     RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
@@ -784,9 +780,9 @@
 
     if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
       // Mark card for object assuming new value is stored.
-      FreeTemp(r0);  // Temporarily release EAX for MarkGCCard().
+      FreeTemp(rs_r0);  // Temporarily release EAX for MarkGCCard().
       MarkGCCard(rl_new_value.reg, rl_object.reg);
-      LockTemp(r0);
+      LockTemp(rs_r0);
     }
 
     RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
@@ -797,7 +793,7 @@
     // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
     GenMemBarrier(kStoreLoad);
 
-    FreeTemp(r0);
+    FreeTemp(rs_r0);
   }
 
   // Convert ZF to boolean
@@ -1003,8 +999,8 @@
       LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, sreg);
       break;
     default:
-      m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), rX86_SP,
-                  displacement, val);
+      m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
+                  rs_rX86_SP.GetReg(), displacement, val);
       AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
       break;
   }
@@ -1062,7 +1058,7 @@
     }
 
     // ECX <- ECX + EAX  (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, r1, r0);
+    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg());
 
     // EAX <- 2L
     LoadConstantNoClobber(rs_r0, val_lo);
@@ -1071,18 +1067,17 @@
     if (src1_in_reg) {
       NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
     } else {
-      LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
+      LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
       AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
                               true /* is_load */, true /* is_64bit */);
     }
 
     // EDX <- EDX + ECX (add high words)
-    NewLIR2(kX86Add32RR, r2, r1);
+    NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg());
 
     // Result is EDX:EAX
-    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-                             RegStorage::MakeRegPair(rs_r0, rs_r2),
-                             INVALID_SREG, INVALID_SREG};
+    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+                             RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
     StoreValueWide(rl_dest, rl_result);
     return;
   }
@@ -1103,7 +1098,7 @@
 
   // ECX <- 1H
   if (src1_in_reg) {
-    NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
+    NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
   } else {
     LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1,
                  k32, GetSRegHi(rl_src1.s_reg_low));
@@ -1113,20 +1108,21 @@
     // Take advantage of the fact that the values are the same.
     // ECX <- ECX * 2L  (1H * 2L)
     if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetLowReg());
+      NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
     } else {
       int displacement = SRegOffset(rl_src2.s_reg_low);
-      LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
+      LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+                       displacement + LOWORD_OFFSET);
       AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
                               true /* is_load */, true /* is_64bit */);
     }
 
     // ECX <- 2*ECX (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, r1, r1);
+    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r1.GetReg());
   } else {
     // EAX <- 2H
     if (src2_in_reg) {
-      NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
+      NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
     } else {
       LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0,
                    k32, GetSRegHi(rl_src2.s_reg_low));
@@ -1134,31 +1130,33 @@
 
     // EAX <- EAX * 1L  (2H * 1L)
     if (src1_in_reg) {
-      NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetLowReg());
+      NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg());
     } else {
       int displacement = SRegOffset(rl_src1.s_reg_low);
-      LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
+      LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP.GetReg(),
+                       displacement + LOWORD_OFFSET);
       AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
                               true /* is_load */, true /* is_64bit */);
     }
 
     // ECX <- ECX * 2L  (1H * 2L)
     if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetLowReg());
+      NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
     } else {
       int displacement = SRegOffset(rl_src2.s_reg_low);
-      LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
+      LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+                       displacement + LOWORD_OFFSET);
       AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
                               true /* is_load */, true /* is_64bit */);
     }
 
     // ECX <- ECX + EAX  (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, r1, r0);
+    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg());
   }
 
   // EAX <- 2L
   if (src2_in_reg) {
-    NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetLowReg());
+    NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
   } else {
     LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0,
                  k32, rl_src2.s_reg_low);
@@ -1169,16 +1167,16 @@
     NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
   } else {
     int displacement = SRegOffset(rl_src1.s_reg_low);
-    LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
+    LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
     AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
                             true /* is_load */, true /* is_64bit */);
   }
 
   // EDX <- EDX + ECX (add high words)
-  NewLIR2(kX86Add32RR, r2, r1);
+  NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg());
 
   // Result is EDX:EAX
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
                            RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
@@ -1295,12 +1293,12 @@
 
   // Get one of the source operands into temporary register.
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  if (IsTemp(rl_src1.reg.GetLowReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
+  if (IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) {
     GenLongRegOrMemOp(rl_src1, rl_src2, op);
   } else if (is_commutative) {
     rl_src2 = LoadValueWide(rl_src2, kCoreReg);
     // We need at least one of them to be a temporary.
-    if (!(IsTemp(rl_src2.reg.GetLowReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
+    if (!(IsTemp(rl_src2.reg.GetLow()) && IsTemp(rl_src2.reg.GetHigh()))) {
       rl_src1 = ForceTempWide(rl_src1);
       GenLongRegOrMemOp(rl_src1, rl_src2, op);
     } else {
@@ -1358,7 +1356,7 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset<4> thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
@@ -1367,7 +1365,7 @@
     LOG(FATAL) << "Bad opcode: " << op;
     break;
   }
-  NewLIR2(opcode, r_dest, thread_offset.Int32Value());
+  NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value());
 }
 
 /*
@@ -1375,7 +1373,7 @@
  */
 void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   RegLocation rl_result;
   rl_array = LoadValue(rl_array, kCoreReg);
@@ -1410,13 +1408,11 @@
     }
   }
   rl_result = EvalLoc(rl_dest, reg_class, true);
+  LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg, size,
+                      INVALID_SREG);
   if ((size == k64) || (size == kDouble)) {
-    LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg.GetLow(),
-                        rl_result.reg.GetHigh(), size, INVALID_SREG);
     StoreValueWide(rl_dest, rl_result);
   } else {
-    LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg,
-                        RegStorage::InvalidReg(), size, INVALID_SREG);
     StoreValue(rl_dest, rl_result);
   }
 }
@@ -1427,7 +1423,7 @@
  */
 void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = oat_reg_class_by_size(size);
+  RegisterClass reg_class = RegClassBySize(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
 
@@ -1466,24 +1462,19 @@
     rl_src = LoadValue(rl_src, reg_class);
   }
   // If the src reg can't be byte accessed, move it to a temp first.
-  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
+  if ((size == kSignedByte || size == kUnsignedByte) &&
+      rl_src.reg.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
     RegStorage temp = AllocTemp();
     OpRegCopy(temp, rl_src.reg);
-    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp,
-                         RegStorage::InvalidReg(), size, INVALID_SREG);
+    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size, INVALID_SREG);
   } else {
-    if (rl_src.wide) {
-      StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg.GetLow(),
-                           rl_src.reg.GetHigh(), size, INVALID_SREG);
-    } else {
-      StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg,
-                           RegStorage::InvalidReg(), size, INVALID_SREG);
-    }
+    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size,
+                         INVALID_SREG);
   }
   if (card_mark) {
     // Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
     if (!constant_index) {
-      FreeTemp(rl_index.reg.GetReg());
+      FreeTemp(rl_index.reg);
     }
     MarkGCCard(rl_src.reg, rl_array.reg);
   }
@@ -1501,7 +1492,7 @@
         LoadConstant(rl_result.reg.GetLow(), 0);
       } else if (shift_amount > 31) {
         OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow());
-        FreeTemp(rl_src.reg.GetHighReg());
+        FreeTemp(rl_src.reg.GetHigh());
         NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
         LoadConstant(rl_result.reg.GetLow(), 0);
       } else {
@@ -1679,7 +1670,7 @@
                                 int32_t value) {
   bool in_mem = loc.location != kLocPhysReg;
   bool byte_imm = IS_SIMM8(value);
-  DCHECK(in_mem || !IsFpReg(loc.reg));
+  DCHECK(in_mem || !loc.reg.IsFloat());
   switch (op) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
@@ -1763,7 +1754,7 @@
 
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   DCHECK_EQ(rl_result.location, kLocPhysReg);
-  DCHECK(!IsFpReg(rl_result.reg));
+  DCHECK(!rl_result.reg.IsFloat());
 
   if (!IsNoOp(op, val_lo)) {
     X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
@@ -1788,8 +1779,7 @@
   // Can we do this directly into the destination registers?
   if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
       rl_dest.reg.GetLowReg() == rl_src1.reg.GetLowReg() &&
-      rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
-      !IsFpReg(rl_dest.reg)) {
+      rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() && !rl_dest.reg.IsFloat()) {
     if (!IsNoOp(op, val_lo)) {
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
       NewLIR2(x86op, rl_dest.reg.GetLowReg(), val_lo);
@@ -1829,9 +1819,9 @@
   RegStorage result_reg = rl_result.reg;
 
   // SETcc only works with EAX..EDX.
-  if (result_reg == object.reg || result_reg.GetReg() >= 4) {
+  if (result_reg == object.reg || result_reg.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
     result_reg = AllocTypedTemp(false, kCoreReg);
-    DCHECK_LT(result_reg.GetReg(), 4);
+    DCHECK_LT(result_reg.GetRegNum(), rs_rX86_SP.GetRegNum());
   }
 
   // Assume that there is no match.
@@ -1930,7 +1920,7 @@
   RegLocation rl_result = GetReturn(false);
 
   // SETcc only works with EAX..EDX.
-  DCHECK_LT(rl_result.reg.GetReg(), 4);
+  DCHECK_LT(rl_result.reg.GetRegNum(), 4);
 
   // Is the class NULL?
   LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -2091,7 +2081,7 @@
           OpMemReg(op, rl_result, t_reg.GetReg());
           FreeTemp(t_reg);
           return;
-        } else if (!IsFpReg(rl_result.reg.GetReg())) {
+        } else if (!rl_result.reg.IsFloat()) {
           // Can do this directly into the result register
           OpRegReg(op, rl_result.reg, t_reg);
           FreeTemp(t_reg);
@@ -2118,7 +2108,7 @@
             OpRegMem(op, rl_result.reg, rl_rhs);
             StoreFinalValue(rl_dest, rl_result);
             return;
-          } else if (!IsFpReg(rl_rhs.reg)) {
+          } else if (!rl_rhs.reg.IsFloat()) {
             OpRegReg(op, rl_result.reg, rl_rhs.reg);
             StoreFinalValue(rl_dest, rl_result);
             return;
@@ -2129,7 +2119,7 @@
           // Okay, we can do this into memory.
           OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
           return;
-        } else if (!IsFpReg(rl_result.reg)) {
+        } else if (!rl_result.reg.IsFloat()) {
           // Can do this directly into the result register.
           OpRegReg(op, rl_result.reg, rl_rhs.reg);
           StoreFinalValue(rl_dest, rl_result);
@@ -2195,10 +2185,10 @@
 
 bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
   // If we have non-core registers, then we can't do good things.
-  if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) {
+  if (rl_lhs.location == kLocPhysReg && rl_lhs.reg.IsFloat()) {
     return false;
   }
-  if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) {
+  if (rl_rhs.location == kLocPhysReg && rl_rhs.reg.IsFloat()) {
     return false;
   }
 
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 3e3fa72..05bef52 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -26,27 +26,53 @@
 
 namespace art {
 
-// FIXME: restore "static" when usage uncovered
-/*static*/ int core_regs[] = {
-  rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
+static const RegStorage core_regs_arr[] = {
+    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP, rs_rBP, rs_rSI, rs_rDI
 #ifdef TARGET_REX_SUPPORT
-  r8, r9, r10, r11, r12, r13, r14, 15
+    rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
 #endif
 };
-/*static*/ int ReservedRegs[] = {rX86_SP};
-/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
-/*static*/ int FpRegs[] = {
-  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+static const RegStorage sp_regs_arr[] = {
+    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
 #ifdef TARGET_REX_SUPPORT
-  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
 #endif
 };
-/*static*/ int fp_temps[] = {
-  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+static const RegStorage dp_regs_arr[] = {
+    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
 #ifdef TARGET_REX_SUPPORT
-  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
 #endif
 };
+static const RegStorage reserved_regs_arr[] = {rs_rX86_SP};
+static const RegStorage core_temps_arr[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
+static const RegStorage sp_temps_arr[] = {
+    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
+#ifdef TARGET_REX_SUPPORT
+    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
+#endif
+};
+static const RegStorage dp_temps_arr[] = {
+    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
+#ifdef TARGET_REX_SUPPORT
+    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
+#endif
+};
+
+static const std::vector<RegStorage> core_regs(core_regs_arr,
+    core_regs_arr + sizeof(core_regs_arr) / sizeof(core_regs_arr[0]));
+static const std::vector<RegStorage> sp_regs(sp_regs_arr,
+    sp_regs_arr + sizeof(sp_regs_arr) / sizeof(sp_regs_arr[0]));
+static const std::vector<RegStorage> dp_regs(dp_regs_arr,
+    dp_regs_arr + sizeof(dp_regs_arr) / sizeof(dp_regs_arr[0]));
+static const std::vector<RegStorage> reserved_regs(reserved_regs_arr,
+    reserved_regs_arr + sizeof(reserved_regs_arr) / sizeof(reserved_regs_arr[0]));
+static const std::vector<RegStorage> core_temps(core_temps_arr,
+    core_temps_arr + sizeof(core_temps_arr) / sizeof(core_temps_arr[0]));
+static const std::vector<RegStorage> sp_temps(sp_temps_arr,
+    sp_temps_arr + sizeof(sp_temps_arr) / sizeof(sp_temps_arr[0]));
+static const std::vector<RegStorage> dp_temps(dp_temps_arr,
+    dp_temps_arr + sizeof(dp_temps_arr) / sizeof(dp_temps_arr[0]));
 
 RegLocation X86Mir2Lir::LocCReturn() {
   return x86_loc_c_return;
@@ -66,29 +92,29 @@
 
 // Return a target-dependent special register.
 RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
-  int res_reg = RegStorage::kInvalidRegVal;
+  RegStorage res_reg = RegStorage::InvalidReg();
   switch (reg) {
-    case kSelf: res_reg = rX86_SELF; break;
-    case kSuspend: res_reg =  rX86_SUSPEND; break;
-    case kLr: res_reg =  rX86_LR; break;
-    case kPc: res_reg =  rX86_PC; break;
-    case kSp: res_reg =  rX86_SP; break;
-    case kArg0: res_reg = rX86_ARG0; break;
-    case kArg1: res_reg = rX86_ARG1; break;
-    case kArg2: res_reg = rX86_ARG2; break;
-    case kArg3: res_reg = rX86_ARG3; break;
-    case kFArg0: res_reg = rX86_FARG0; break;
-    case kFArg1: res_reg = rX86_FARG1; break;
-    case kFArg2: res_reg = rX86_FARG2; break;
-    case kFArg3: res_reg = rX86_FARG3; break;
-    case kRet0: res_reg = rX86_RET0; break;
-    case kRet1: res_reg = rX86_RET1; break;
-    case kInvokeTgt: res_reg = rX86_INVOKE_TGT; break;
-    case kHiddenArg: res_reg = rAX; break;
-    case kHiddenFpArg: res_reg = fr0; break;
-    case kCount: res_reg = rX86_COUNT; break;
+    case kSelf: res_reg = RegStorage::InvalidReg(); break;
+    case kSuspend: res_reg =  RegStorage::InvalidReg(); break;
+    case kLr: res_reg =  RegStorage::InvalidReg(); break;
+    case kPc: res_reg =  RegStorage::InvalidReg(); break;
+    case kSp: res_reg =  rs_rX86_SP; break;
+    case kArg0: res_reg = rs_rX86_ARG0; break;
+    case kArg1: res_reg = rs_rX86_ARG1; break;
+    case kArg2: res_reg = rs_rX86_ARG2; break;
+    case kArg3: res_reg = rs_rX86_ARG3; break;
+    case kFArg0: res_reg = rs_rX86_FARG0; break;
+    case kFArg1: res_reg = rs_rX86_FARG1; break;
+    case kFArg2: res_reg = rs_rX86_FARG2; break;
+    case kFArg3: res_reg = rs_rX86_FARG3; break;
+    case kRet0: res_reg = rs_rX86_RET0; break;
+    case kRet1: res_reg = rs_rX86_RET1; break;
+    case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
+    case kHiddenArg: res_reg = rs_rAX; break;
+    case kHiddenFpArg: res_reg = rs_fr0; break;
+    case kCount: res_reg = rs_rX86_COUNT; break;
   }
-  return RegStorage::Solo32(res_reg);
+  return res_reg;
 }
 
 RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
@@ -106,34 +132,19 @@
   }
 }
 
-// Create a double from a pair of singles.
-int X86Mir2Lir::S2d(int low_reg, int high_reg) {
-  return X86_S2D(low_reg, high_reg);
-}
-
-// Return mask to strip off fp reg flags and bias.
-uint32_t X86Mir2Lir::FpRegMask() {
-  return X86_FP_REG_MASK;
-}
-
-// True if both regs single, both core or both double.
-bool X86Mir2Lir::SameRegType(int reg1, int reg2) {
-  return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
-}
-
 /*
  * Decode the register id.
  */
-uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) {
+uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
   uint64_t seed;
   int shift;
   int reg_id;
 
-  reg_id = reg & 0xf;
+  reg_id = reg.GetRegNum();
   /* Double registers in x86 are just a single FP register */
   seed = 1;
   /* FP register starts at bit position 16 */
-  shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
+  shift = reg.IsFloat() ? kX86FPReg0 : 0;
   /* Expand the double register id into single offset */
   shift += reg_id;
   return (seed << shift);
@@ -162,34 +173,34 @@
   }
 
   if (flags & REG_DEFA) {
-    SetupRegMask(&lir->u.m.def_mask, rAX);
+    SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_DEFD) {
-    SetupRegMask(&lir->u.m.def_mask, rDX);
+    SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
   }
   if (flags & REG_USEA) {
-    SetupRegMask(&lir->u.m.use_mask, rAX);
+    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_USEC) {
-    SetupRegMask(&lir->u.m.use_mask, rCX);
+    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
   }
 
   if (flags & REG_USED) {
-    SetupRegMask(&lir->u.m.use_mask, rDX);
+    SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
   }
 
   if (flags & REG_USEB) {
-    SetupRegMask(&lir->u.m.use_mask, rBX);
+    SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
   }
 
   // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
   if (lir->opcode == kX86RepneScasw) {
-    SetupRegMask(&lir->u.m.use_mask, rAX);
-    SetupRegMask(&lir->u.m.use_mask, rCX);
-    SetupRegMask(&lir->u.m.use_mask, rDI);
-    SetupRegMask(&lir->u.m.def_mask, rDI);
+    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
+    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
+    SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
+    SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
   }
 
   if (flags & USE_FP_STACK) {
@@ -261,12 +272,13 @@
             break;
           }
           case 'r':
-            if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
-              int fp_reg = operand & X86_FP_REG_MASK;
+            if (RegStorage::IsFloat(operand)) {
+              int fp_reg = RegStorage::RegNum(operand);
               buf += StringPrintf("xmm%d", fp_reg);
             } else {
-              DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
-              buf += x86RegName[operand];
+              int reg_num = RegStorage::RegNum(operand);
+              DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
+              buf += x86RegName[reg_num];
             }
             break;
           case 't':
@@ -329,7 +341,7 @@
 
 void X86Mir2Lir::AdjustSpillMask() {
   // Adjustment for LR spilling, x86 has no LR so nothing to do here
-  core_spill_mask_ |= (1 << rRET);
+  core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
   num_core_spills_++;
 }
 
@@ -339,97 +351,56 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) {
-  UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
-#if 0
-  LOG(FATAL) << "No support yet for promoted FP regs";
-#endif
+void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
+  UNIMPLEMENTED(FATAL) << "MarkPreservedSingle";
 }
 
-void X86Mir2Lir::FlushRegWide(RegStorage reg) {
-  RegisterInfo* info1 = GetRegInfo(reg.GetLowReg());
-  RegisterInfo* info2 = GetRegInfo(reg.GetHighReg());
-  DCHECK(info1 && info2 && info1->pair && info2->pair &&
-         (info1->partner == info2->reg) &&
-         (info2->partner == info1->reg));
-  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
-    if (!(info1->is_temp && info2->is_temp)) {
-      /* Should not happen.  If it does, there's a problem in eval_loc */
-      LOG(FATAL) << "Long half-temp, half-promoted";
-    }
-
-    info1->dirty = false;
-    info2->dirty = false;
-    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
-      info1 = info2;
-    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(rs_rX86_SP, VRegOffset(v_reg),
-                      RegStorage(RegStorage::k64BitPair, info1->reg, info1->partner));
-  }
-}
-
-void X86Mir2Lir::FlushReg(RegStorage reg) {
-  // FIXME: need to handle 32 bits in 64-bit register as well as wide values held in single reg.
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg.GetReg());
-  if (info->live && info->dirty) {
-    info->dirty = false;
-    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
-    StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, k32);
-  }
-}
-
-/* Give access to the target-dependent FP register encoding to common code */
-bool X86Mir2Lir::IsFpReg(int reg) {
-  return X86_FPREG(reg);
-}
-
-bool X86Mir2Lir::IsFpReg(RegStorage reg) {
-  return IsFpReg(reg.IsPair() ? reg.GetLowReg() : reg.GetReg());
+void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
+  UNIMPLEMENTED(FATAL) << "MarkPreservedDouble";
 }
 
 /* Clobber all regs that might be used by an external C call */
 void X86Mir2Lir::ClobberCallerSave() {
-  Clobber(rAX);
-  Clobber(rCX);
-  Clobber(rDX);
-  Clobber(rBX);
+  Clobber(rs_rAX);
+  Clobber(rs_rCX);
+  Clobber(rs_rDX);
+  Clobber(rs_rBX);
 }
 
 RegLocation X86Mir2Lir::GetReturnWideAlt() {
   RegLocation res = LocCReturnWide();
-  CHECK(res.reg.GetLowReg() == rAX);
-  CHECK(res.reg.GetHighReg() == rDX);
-  Clobber(rAX);
-  Clobber(rDX);
-  MarkInUse(rAX);
-  MarkInUse(rDX);
-  MarkPair(res.reg.GetLowReg(), res.reg.GetHighReg());
+  DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
+  DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
+  Clobber(rs_rAX);
+  Clobber(rs_rDX);
+  MarkInUse(rs_rAX);
+  MarkInUse(rs_rDX);
+  MarkWide(res.reg);
   return res;
 }
 
 RegLocation X86Mir2Lir::GetReturnAlt() {
   RegLocation res = LocCReturn();
-  res.reg.SetReg(rDX);
-  Clobber(rDX);
-  MarkInUse(rDX);
+  res.reg.SetReg(rs_rDX.GetReg());
+  Clobber(rs_rDX);
+  MarkInUse(rs_rDX);
   return res;
 }
 
 /* To be used when explicitly managing register use */
 void X86Mir2Lir::LockCallTemps() {
-  LockTemp(rX86_ARG0);
-  LockTemp(rX86_ARG1);
-  LockTemp(rX86_ARG2);
-  LockTemp(rX86_ARG3);
+  LockTemp(rs_rX86_ARG0);
+  LockTemp(rs_rX86_ARG1);
+  LockTemp(rs_rX86_ARG2);
+  LockTemp(rs_rX86_ARG3);
 }
 
 /* To be used when explicitly managing register use */
 void X86Mir2Lir::FreeCallTemps() {
-  FreeTemp(rX86_ARG0);
-  FreeTemp(rX86_ARG1);
-  FreeTemp(rX86_ARG2);
-  FreeTemp(rX86_ARG3);
+  FreeTemp(rs_rX86_ARG0);
+  FreeTemp(rs_rX86_ARG1);
+  FreeTemp(rs_rX86_ARG2);
+  FreeTemp(rs_rX86_ARG3);
 }
 
 bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
@@ -495,40 +466,35 @@
 
 RegStorage X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    return AllocTempFloat();
+    return AllocTempSingle();
   }
   return AllocTemp();
 }
 
 void X86Mir2Lir::CompilerInitializeRegAlloc() {
-  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
-  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
-  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
-  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
-  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
-                                                       kArenaAllocRegAlloc));
-  reg_pool_->num_core_regs = num_regs;
-  reg_pool_->core_regs =
-      static_cast<RegisterInfo*>(arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs),
-                                               kArenaAllocRegAlloc));
-  reg_pool_->num_fp_regs = num_fp_regs;
-  reg_pool_->FPRegs =
-      static_cast<RegisterInfo *>(arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs),
-                                                kArenaAllocRegAlloc));
-  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
-  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
-  // Keep special registers from being allocated
-  for (int i = 0; i < num_reserved; i++) {
-    MarkInUse(ReservedRegs[i]);
+  reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, sp_regs, dp_regs, reserved_regs,
+                                        core_temps, sp_temps, dp_temps);
+
+  // Target-specific adjustments.
+
+  // Alias single precision xmm to double xmms.
+  // TODO: as needed, add larger vector sizes - alias all to the largest.
+  GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
+  for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
+    int sp_reg_num = info->GetReg().GetRegNum();
+    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | sp_reg_num);
+    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+    // 64-bit xmm vector register's master storage should refer to itself.
+    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+    // Redirect 32-bit vector's master storage to 64-bit vector.
+    info->SetMaster(dp_reg_info);
   }
-  // Mark temp regs - all others not in use can be used for promotion
-  for (int i = 0; i < num_temps; i++) {
-    MarkTemp(core_temps[i]);
-  }
-  for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(fp_temps[i]);
-  }
+
+  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+  // TODO: adjust for x86/hard float calling convention.
+  reg_pool_->next_core_reg_ = 2;
+  reg_pool_->next_sp_reg_ = 2;
+  reg_pool_->next_dp_reg_ = 1;
 }
 
 void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
@@ -541,8 +507,7 @@
   if ((free_low != keep_low) && (free_low != keep_high) &&
       (free_high != keep_low) && (free_high != keep_high)) {
     // No overlap, free both
-    FreeTemp(free_low);
-    FreeTemp(free_high);
+    FreeTemp(rl_free.reg);
   }
 }
 
@@ -551,7 +516,7 @@
     return;
   }
   // Spill mask not including fake return address register
-  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
   int offset = frame_size_ - (4 * num_core_spills_);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
@@ -566,7 +531,7 @@
     return;
   }
   // Spill mask not including fake return address register
-  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
   int offset = frame_size_ - (4 * num_core_spills_);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
@@ -629,215 +594,6 @@
   return X86Mir2Lir::EncodingMap[opcode].fmt;
 }
 
-/*
- * Return an updated location record with current in-register status.
- * If the value lives in live temps, reflect that fact.  No code
- * is generated.  If the live value is part of an older pair,
- * clobber both low and high.
- */
-// TODO: Reunify with common code after 'pair mess' has been fixed
-RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
-  DCHECK(loc.wide);
-  DCHECK(CheckCorePoolSanity());
-  if (loc.location != kLocPhysReg) {
-    DCHECK((loc.location == kLocDalvikFrame) ||
-         (loc.location == kLocCompilerTemp));
-    // Are the dalvik regs already live in physical registers?
-    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
-
-    // Handle FP registers specially on x86.
-    if (info_lo && IsFpReg(info_lo->reg)) {
-      bool match = true;
-
-      // We can't match a FP register with a pair of Core registers.
-      match = match && (info_lo->pair == 0);
-
-      if (match) {
-        // We can reuse;update the register usage info.
-        loc.location = kLocPhysReg;
-        loc.vec_len = kVectorLength8;
-        // TODO: use k64BitVector
-        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
-        DCHECK(IsFpReg(loc.reg.GetLowReg()));
-        return loc;
-      }
-      // We can't easily reuse; clobber and free any overlaps.
-      if (info_lo) {
-        Clobber(info_lo->reg);
-        FreeTemp(info_lo->reg);
-        if (info_lo->pair)
-          Clobber(info_lo->partner);
-      }
-    } else {
-      RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
-      bool match = true;
-      match = match && (info_lo != NULL);
-      match = match && (info_hi != NULL);
-      // Are they both core or both FP?
-      match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
-      // If a pair of floating point singles, are they properly aligned?
-      if (match && IsFpReg(info_lo->reg)) {
-        match &= ((info_lo->reg & 0x1) == 0);
-        match &= ((info_hi->reg - info_lo->reg) == 1);
-      }
-      // If previously used as a pair, it is the same pair?
-      if (match && (info_lo->pair || info_hi->pair)) {
-        match = (info_lo->pair == info_hi->pair);
-        match &= ((info_lo->reg == info_hi->partner) &&
-              (info_hi->reg == info_lo->partner));
-      }
-      if (match) {
-        // Can reuse - update the register usage info
-        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
-        loc.location = kLocPhysReg;
-        MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-        DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
-        return loc;
-      }
-      // Can't easily reuse - clobber and free any overlaps
-      if (info_lo) {
-        Clobber(info_lo->reg);
-        FreeTemp(info_lo->reg);
-        if (info_lo->pair)
-          Clobber(info_lo->partner);
-      }
-      if (info_hi) {
-        Clobber(info_hi->reg);
-        FreeTemp(info_hi->reg);
-        if (info_hi->pair)
-          Clobber(info_hi->partner);
-      }
-    }
-  }
-  return loc;
-}
-
-// TODO: Reunify with common code after 'pair mess' has been fixed
-RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
-  DCHECK(loc.wide);
-
-  loc = UpdateLocWide(loc);
-
-  /* If it is already in a register, we can assume proper form.  Is it the right reg class? */
-  if (loc.location == kLocPhysReg) {
-    DCHECK_EQ(IsFpReg(loc.reg.GetLowReg()), loc.IsVectorScalar());
-    if (!RegClassMatches(reg_class, loc.reg)) {
-      /* It is the wrong register class.  Reallocate and copy. */
-      if (!IsFpReg(loc.reg.GetLowReg())) {
-        // We want this in a FP reg, and it is in core registers.
-        DCHECK(reg_class != kCoreReg);
-        // Allocate this into any FP reg, and mark it with the right size.
-        int32_t low_reg = AllocTypedTemp(true, reg_class).GetReg();
-        OpVectorRegCopyWide(low_reg, loc.reg.GetLowReg(), loc.reg.GetHighReg());
-        CopyRegInfo(low_reg, loc.reg.GetLowReg());
-        Clobber(loc.reg);
-        loc.reg.SetReg(low_reg);
-        loc.reg.SetHighReg(low_reg);  // Play nice with existing code.
-        loc.vec_len = kVectorLength8;
-      } else {
-        // The value is in a FP register, and we want it in a pair of core registers.
-        DCHECK_EQ(reg_class, kCoreReg);
-        DCHECK_EQ(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-        RegStorage new_regs = AllocTypedTempWide(false, kCoreReg);  // Force to core registers.
-        OpRegCopyWide(new_regs, loc.reg);
-        CopyRegInfo(new_regs.GetLowReg(), loc.reg.GetLowReg());
-        CopyRegInfo(new_regs.GetHighReg(), loc.reg.GetHighReg());
-        Clobber(loc.reg);
-        loc.reg = new_regs;
-        MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-        DCHECK(!IsFpReg(loc.reg.GetLowReg()) || ((loc.reg.GetLowReg() & 0x1) == 0));
-      }
-    }
-    return loc;
-  }
-
-  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
-  DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
-
-  loc.reg = AllocTypedTempWide(loc.fp, reg_class);
-
-  // FIXME: take advantage of RegStorage notation.
-  if (loc.reg.GetLowReg() == loc.reg.GetHighReg()) {
-    DCHECK(IsFpReg(loc.reg.GetLowReg()));
-    loc.vec_len = kVectorLength8;
-  } else {
-    MarkPair(loc.reg.GetLowReg(), loc.reg.GetHighReg());
-  }
-  if (update) {
-    loc.location = kLocPhysReg;
-    MarkLive(loc.reg.GetLow(), loc.s_reg_low);
-    if (loc.reg.GetLowReg() != loc.reg.GetHighReg()) {
-      MarkLive(loc.reg.GetHigh(), GetSRegHi(loc.s_reg_low));
-    }
-  }
-  return loc;
-}
-
-// TODO: Reunify with common code after 'pair mess' has been fixed
-RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
-  if (loc.wide)
-    return EvalLocWide(loc, reg_class, update);
-
-  loc = UpdateLoc(loc);
-
-  if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(reg_class, loc.reg)) {
-      /* Wrong register class.  Realloc, copy and transfer ownership. */
-      RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
-      OpRegCopy(new_reg, loc.reg);
-      CopyRegInfo(new_reg, loc.reg);
-      Clobber(loc.reg);
-      loc.reg = new_reg;
-      if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
-        loc.vec_len = kVectorLength4;
-    }
-    return loc;
-  }
-
-  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
-
-  loc.reg = AllocTypedTemp(loc.fp, reg_class);
-  if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
-    loc.vec_len = kVectorLength4;
-
-  if (update) {
-    loc.location = kLocPhysReg;
-    MarkLive(loc.reg, loc.s_reg_low);
-  }
-  return loc;
-}
-
-RegStorage X86Mir2Lir::AllocTempDouble() {
-  // We really don't need a pair of registers.
-  // FIXME - update to double
-  int reg = AllocTempFloat().GetReg();
-  return RegStorage(RegStorage::k64BitPair, reg, reg);
-}
-
-// TODO: Reunify with common code after 'pair mess' has been fixed
-void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
-  DCHECK(rl.wide);
-  RegisterInfo* p_low = IsTemp(rl.reg.GetLowReg());
-  if (IsFpReg(rl.reg.GetLowReg())) {
-    // We are using only the low register.
-    if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-      NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
-    }
-    ResetDef(rl.reg.GetLowReg());
-  } else {
-    RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
-    if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-      DCHECK(p_low->pair);
-      NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
-    }
-    if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-      DCHECK(p_high->pair);
-    }
-    ResetDef(rl.reg.GetLowReg());
-    ResetDef(rl.reg.GetHighReg());
-  }
-}
-
 void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
   // Can we do this directly to memory?
   rl_dest = UpdateLocWide(rl_dest);
@@ -872,7 +628,6 @@
              << (loc.ref ? " r" : "  ")
              << (loc.high_word ? " h" : "  ")
              << (loc.home ? " H" : "  ")
-             << " vec_len: " << loc.vec_len
              << ", low: " << static_cast<int>(loc.reg.GetLowReg())
              << ", high: " << static_cast<int>(loc.reg.GetHighReg())
              << ", s_reg: " << loc.s_reg_low
@@ -1059,7 +814,7 @@
 
   // We need to preserve EDI, but have no spare registers, so push it on the stack.
   // We have to remember that all stack addresses after this are offset by sizeof(EDI).
-  NewLIR1(kX86Push32R, rDI);
+  NewLIR1(kX86Push32R, rs_rDI.GetReg());
 
   // Compute the number of words to search in to rCX.
   Load32Disp(rs_rDX, count_offset, rs_rCX);
@@ -1096,7 +851,7 @@
         OpRegReg(kOpSub, rs_rCX, rl_start.reg);
         if (rl_start.reg == rs_rDI) {
           // The special case. We will use EDI further, so lets put start index to stack.
-          NewLIR1(kX86Push32R, rDI);
+          NewLIR1(kX86Push32R, rs_rDI.GetReg());
           is_index_on_stack = true;
         }
       } else {
@@ -1110,7 +865,7 @@
         length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
         OpRegReg(kOpSub, rs_rCX, rs_rBX);
         // Put the start index to stack.
-        NewLIR1(kX86Push32R, rBX);
+        NewLIR1(kX86Push32R, rs_rBX.GetReg());
         is_index_on_stack = true;
       }
     }
@@ -1130,12 +885,12 @@
     if (start_value == 0) {
       OpRegCopy(rs_rDI, rs_rBX);
     } else {
-      NewLIR3(kX86Lea32RM, rDI, rBX, 2 * start_value);
+      NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value);
     }
   } else {
     if (is_index_on_stack == true) {
       // Load the start index from stack.
-      NewLIR1(kX86Pop32R, rDX);
+      NewLIR1(kX86Pop32R, rs_rDX.GetReg());
       OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
     } else {
       OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
@@ -1153,7 +908,7 @@
   // index = ((curr_ptr - orig_ptr) / 2) - 1.
   OpRegReg(kOpSub, rs_rDI, rs_rBX);
   OpRegImm(kOpAsr, rs_rDI, 1);
-  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
+  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
   LIR *all_done = NewLIR1(kX86Jmp8, 0);
 
   // Failed to match; return -1.
@@ -1165,7 +920,7 @@
   // And join up at the end.
   all_done->target = NewLIR0(kPseudoTargetLabel);
   // Restore EDI from the stack.
-  NewLIR1(kX86Pop32R, rDI);
+  NewLIR1(kX86Pop32R, rs_rDI.GetReg());
 
   // Out of line code returns here.
   if (slowpath_branch != nullptr) {
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index b972d08..da6ded5 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -26,18 +26,19 @@
 LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
-  DCHECK_EQ(X86_DOUBLEREG(r_dest.GetReg()), X86_DOUBLEREG(r_src.GetReg()));
-  if (X86_DOUBLEREG(r_dest.GetReg())) {
+  DCHECK(r_dest.IsFloat() || r_src.IsFloat());
+  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+  if (r_dest.IsDouble()) {
     opcode = kX86MovsdRR;
   } else {
-    if (X86_SINGLEREG(r_dest.GetReg())) {
-      if (X86_SINGLEREG(r_src.GetReg())) {
+    if (r_dest.IsSingle()) {
+      if (r_src.IsSingle()) {
         opcode = kX86MovssRR;
       } else {  // Fpr <- Gpr
         opcode = kX86MovdxrRR;
       }
     } else {  // Gpr <- Fpr
-      DCHECK(X86_SINGLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
       opcode = kX86MovdrxRR;
     }
   }
@@ -76,11 +77,10 @@
  */
 LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
   RegStorage r_dest_save = r_dest;
-  if (X86_FPREG(r_dest.GetReg())) {
+  if (r_dest.IsFloat()) {
     if (value == 0) {
       return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
     }
-    DCHECK(X86_SINGLEREG(r_dest.GetReg()));
     r_dest = AllocTemp();
   }
 
@@ -92,7 +92,7 @@
     res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
   }
 
-  if (X86_FPREG(r_dest_save.GetReg())) {
+  if (r_dest_save.IsFloat()) {
     NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
     FreeTemp(r_dest);
   }
@@ -129,7 +129,7 @@
 LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
   X86OpCode opcode = kX86Bkpt;
   bool byte_imm = IS_SIMM8(value);
-  DCHECK(!X86_FPREG(r_dest_src1.GetReg()));
+  DCHECK(!r_dest_src1.IsFloat());
   switch (op) {
     case kOpLsl: opcode = kX86Sal32RI; break;
     case kOpLsr: opcode = kX86Shr32RI; break;
@@ -191,8 +191,10 @@
       case kOpOr:  opcode = kX86Or32RR; break;
       case kOpXor: opcode = kX86Xor32RR; break;
       case kOp2Byte:
+        // TODO: there are several instances of this check.  A utility function perhaps?
+        // TODO: Similar to Arm's reg < 8 check.  Perhaps add attribute checks to RegStorage?
         // Use shifts instead of a byte operand if the source can't be byte accessed.
-        if (r_src2.GetReg() >= 4) {
+        if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
           NewLIR2(kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
           NewLIR2(kX86Sal32RI, r_dest_src1.GetReg(), 24);
           return NewLIR2(kX86Sar32RI, r_dest_src1.GetReg(), 24);
@@ -207,49 +209,49 @@
         LOG(FATAL) << "Bad case in OpRegReg " << op;
         break;
     }
-    CHECK(!src2_must_be_cx || r_src2.GetReg() == rCX);
+    CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
     return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
 }
 
 LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
-  DCHECK(!(X86_FPREG(r_base.GetReg())));
+  DCHECK(!r_base.IsFloat());
   X86OpCode opcode = kX86Nop;
   int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
   switch (move_type) {
     case kMov8GP:
-      CHECK(!X86_FPREG(dest));
+      CHECK(!r_dest.IsFloat());
       opcode = kX86Mov8RM;
       break;
     case kMov16GP:
-      CHECK(!X86_FPREG(dest));
+      CHECK(!r_dest.IsFloat());
       opcode = kX86Mov16RM;
       break;
     case kMov32GP:
-      CHECK(!X86_FPREG(dest));
+      CHECK(!r_dest.IsFloat());
       opcode = kX86Mov32RM;
       break;
     case kMov32FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovssRM;
       break;
     case kMov64FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovsdRM;
       break;
     case kMovU128FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovupsRM;
       break;
     case kMovA128FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovapsRM;
       break;
     case kMovLo128FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovlpsRM;
       break;
     case kMovHi128FP:
-      CHECK(X86_FPREG(dest));
+      CHECK(r_dest.IsFloat());
       opcode = kX86MovhpsRM;
       break;
     case kMov64GP:
@@ -264,45 +266,45 @@
 }
 
 LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
-  DCHECK(!(X86_FPREG(r_base.GetReg())));
+  DCHECK(!r_base.IsFloat());
   int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
 
   X86OpCode opcode = kX86Nop;
   switch (move_type) {
     case kMov8GP:
-      CHECK(!X86_FPREG(src));
+      CHECK(!r_src.IsFloat());
       opcode = kX86Mov8MR;
       break;
     case kMov16GP:
-      CHECK(!X86_FPREG(src));
+      CHECK(!r_src.IsFloat());
       opcode = kX86Mov16MR;
       break;
     case kMov32GP:
-      CHECK(!X86_FPREG(src));
+      CHECK(!r_src.IsFloat());
       opcode = kX86Mov32MR;
       break;
     case kMov32FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovssMR;
       break;
     case kMov64FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovsdMR;
       break;
     case kMovU128FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovupsMR;
       break;
     case kMovA128FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovapsMR;
       break;
     case kMovLo128FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovlpsMR;
       break;
     case kMovHi128FP:
-      CHECK(X86_FPREG(src));
+      CHECK(r_src.IsFloat());
       opcode = kX86MovhpsMR;
       break;
     case kMov64GP:
@@ -367,7 +369,7 @@
       LOG(FATAL) << "Bad case in OpMemReg " << op;
       break;
   }
-  LIR *l = NewLIR3(opcode, rX86_SP, displacement, r_value);
+  LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
   AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */);
   AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, false /* is_64bit */);
   return l;
@@ -390,7 +392,7 @@
       LOG(FATAL) << "Bad case in OpRegMem " << op;
       break;
   }
-  LIR *l = NewLIR3(opcode, r_dest.GetReg(), rX86_SP, displacement);
+  LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
   AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */);
   return l;
 }
@@ -449,7 +451,7 @@
     X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
     return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
   } else if (op == kOpAnd) {
-    if (value == 0xFF && r_src.GetReg() < 4) {
+    if (value == 0xFF && r_src.Low4()) {
       return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
     } else if (value == 0xFFFF) {
       return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
@@ -497,7 +499,7 @@
     int32_t val_hi = High32Bits(value);
     int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
     LIR *res;
-    bool is_fp = X86_FPREG(low_reg_val);
+    bool is_fp = RegStorage::IsFloat(low_reg_val);
     // TODO: clean this up once we fully recognize 64-bit storage containers.
     if (is_fp) {
       if (value == 0) {
@@ -530,10 +532,9 @@
           res = LoadConstantNoClobber(RegStorage::Solo32(low_reg_val), val_lo);
         }
         if (val_hi != 0) {
-          // FIXME: clean up when AllocTempDouble no longer returns a pair.
           RegStorage r_dest_hi = AllocTempDouble();
-          LoadConstantNoClobber(RegStorage::Solo32(r_dest_hi.GetLowReg()), val_hi);
-          NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetLowReg());
+          LoadConstantNoClobber(r_dest_hi, val_hi);
+          NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
           FreeTemp(r_dest_hi);
         }
       }
@@ -544,25 +545,20 @@
     return res;
 }
 
-// FIXME: don't split r_dest into two storage units.
 LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, RegStorage r_dest_hi,
-                                     OpSize size, int s_reg) {
+                                     int displacement, RegStorage r_dest, OpSize size, int s_reg) {
   LIR *load = NULL;
   LIR *load2 = NULL;
   bool is_array = r_index.Valid();
-  bool pair = false;
-  bool is64bit = false;
+  bool pair = r_dest.IsPair();
+  bool is64bit = ((size == k64) || (size == kDouble));
   X86OpCode opcode = kX86Nop;
   switch (size) {
     case k64:
     case kDouble:
-      // TODO: use regstorage attributes here.
-      is64bit = true;
-      if (X86_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
       } else {
-        pair = true;
         opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
       }
       // TODO: double store is to unaligned address
@@ -572,9 +568,9 @@
     case kSingle:
     case kReference:  // TODO: update for reference decompression on 64-bit targets.
       opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
-      if (X86_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = is_array ? kX86MovssRA : kX86MovssRM;
-        DCHECK(X86_SINGLEREG(r_dest.GetReg()));
+        DCHECK(r_dest.IsFloat());
       }
       DCHECK_EQ((displacement & 0x3), 0);
       break;
@@ -600,13 +596,14 @@
     if (!pair) {
       load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
     } else {
-      if (r_base == r_dest) {
-        load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(),
+      DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
+      if (r_base == r_dest.GetLow()) {
+        load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
                         displacement + HIWORD_OFFSET);
-        load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
+        load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
       } else {
-        load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
-        load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(),
+        load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
+        load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
                         displacement + HIWORD_OFFSET);
       }
     }
@@ -623,36 +620,37 @@
       load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
                      displacement + LOWORD_OFFSET);
     } else {
-      if (r_base == r_dest) {
-        if (r_dest_hi == r_index) {
+      DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
+      if (r_base == r_dest.GetLow()) {
+        if (r_dest.GetHigh() == r_index) {
           // We can't use either register for the first load.
           RegStorage temp = AllocTemp();
           load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
                           displacement + HIWORD_OFFSET);
-          load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
                          displacement + LOWORD_OFFSET);
-          OpRegCopy(r_dest_hi, temp);
+          OpRegCopy(r_dest.GetHigh(), temp);
           FreeTemp(temp);
         } else {
-          load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
                           displacement + HIWORD_OFFSET);
-          load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
                          displacement + LOWORD_OFFSET);
         }
       } else {
-        if (r_dest == r_index) {
+        if (r_dest.GetLow() == r_index) {
           // We can't use either register for the first load.
           RegStorage temp = AllocTemp();
           load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
                          displacement + LOWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
                           displacement + HIWORD_OFFSET);
-          OpRegCopy(r_dest, temp);
+          OpRegCopy(r_dest.GetLow(), temp);
           FreeTemp(temp);
         } else {
-          load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
                          displacement + LOWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+          load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
                           displacement + HIWORD_OFFSET);
         }
       }
@@ -665,44 +663,38 @@
 /* Load value from base + scaled index. */
 LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) {
-  return LoadBaseIndexedDisp(r_base, r_index, scale, 0,
-                             r_dest, RegStorage::InvalidReg(), size, INVALID_SREG);
+  return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size, INVALID_SREG);
 }
 
-LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement,
-                  RegStorage r_dest, OpSize size, int s_reg) {
+LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                              OpSize size, int s_reg) {
   // TODO: base this on target.
   if (size == kWord) {
     size = k32;
   }
-  return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
-                             r_dest, RegStorage::InvalidReg(), size, s_reg);
+  return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
+                             size, s_reg);
 }
 
 LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
                                   int s_reg) {
-  return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
-                             r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg);
+  return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, k64, s_reg);
 }
 
 LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, RegStorage r_src_hi,
-                                      OpSize size, int s_reg) {
+                                      int displacement, RegStorage r_src, OpSize size, int s_reg) {
   LIR *store = NULL;
   LIR *store2 = NULL;
   bool is_array = r_index.Valid();
-  // FIXME: use regstorage attributes in place of these.
-  bool pair = false;
-  bool is64bit = false;
+  bool pair = r_src.IsPair();
+  bool is64bit = (size == k64) || (size == kDouble);
   X86OpCode opcode = kX86Nop;
   switch (size) {
     case k64:
     case kDouble:
-      is64bit = true;
-      if (X86_FPREG(r_src.GetReg())) {
+      if (r_src.IsFloat()) {
         opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
       } else {
-        pair = true;
         opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
       }
       // TODO: double store is to unaligned address
@@ -712,9 +704,9 @@
     case kSingle:
     case kReference:
       opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
-      if (X86_FPREG(r_src.GetReg())) {
+      if (r_src.IsFloat()) {
         opcode = is_array ? kX86MovssAR : kX86MovssMR;
-        DCHECK(X86_SINGLEREG(r_src.GetReg()));
+        DCHECK(r_src.IsSingle());
       }
       DCHECK_EQ((displacement & 0x3), 0);
       break;
@@ -735,8 +727,9 @@
     if (!pair) {
       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
     } else {
-      store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
-      store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src_hi.GetReg());
+      DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
+      store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
+      store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
     }
     if (r_base == rs_rX86_SP) {
       AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
@@ -751,21 +744,20 @@
       store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
                       displacement + LOWORD_OFFSET, r_src.GetReg());
     } else {
+      DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
       store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
-                      displacement + LOWORD_OFFSET, r_src.GetReg());
+                      displacement + LOWORD_OFFSET, r_src.GetLowReg());
       store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
-                       displacement + HIWORD_OFFSET, r_src_hi.GetReg());
+                       displacement + HIWORD_OFFSET, r_src.GetHighReg());
     }
   }
-
   return store;
 }
 
 /* store value base base + scaled index. */
 LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                       int scale, OpSize size) {
-  return StoreBaseIndexedDisp(r_base, r_index, scale, 0,
-                              r_src, RegStorage::InvalidReg(), size, INVALID_SREG);
+  return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size, INVALID_SREG);
 }
 
 LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
@@ -774,25 +766,13 @@
   if (size == kWord) {
     size = k32;
   }
-  return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src,
-                              RegStorage::InvalidReg(), size, INVALID_SREG);
+  return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size,
+                              INVALID_SREG);
 }
 
 LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
   return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
-                              r_src.GetLow(), r_src.GetHigh(), k64, INVALID_SREG);
-}
-
-/*
- * Copy a long value in Core registers to an XMM register
- *
- */
-void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
-  NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
-  int tmp_reg = AllocTempDouble().GetLowReg();
-  NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
-  NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg);
-  FreeTemp(tmp_reg);
+                              r_src, k64, INVALID_SREG);
 }
 
 LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 1759cbe..9bf49c3 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -102,27 +102,6 @@
  * +========================+
  */
 
-// Offset to distingish FP regs.
-#define X86_FP_REG_OFFSET 32
-// Offset to distinguish DP FP regs.
-#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
-// Reg types.
-#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
-#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
-#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
-#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
-
-/*
- * Note: the low register of a floating point pair is sufficient to
- * create the name of a double, but require both names to be passed to
- * allow for asserts to verify that the pair is consecutive if significant
- * rework is done in this area.  Also, it is a good reminder in the calling
- * code that reg locations always describe doubles as a pair of singles.
- */
-#define X86_S2D(x, y) ((x) | X86_FP_DOUBLE)
-/* Mask to strip off fp flags */
-#define X86_FP_REG_MASK 0xF
-
 enum X86ResourceEncodingPos {
   kX86GPReg0   = 0,
   kX86RegSP    = 4,
@@ -135,72 +114,119 @@
 #define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
 #define ENCODE_X86_FP_STACK         (1ULL << kX86FPStack)
 
+// FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
 enum X86NativeRegisterPool {
-  r0     = 0,
-  rAX    = r0,
-  r1     = 1,
-  rCX    = r1,
-  r2     = 2,
-  rDX    = r2,
-  r3     = 3,
-  rBX    = r3,
-  r4sp   = 4,
-  rX86_SP    = r4sp,
+  r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+  rAX            = r0,
+  r1             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+  rCX            = r1,
+  r2             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+  rDX            = r2,
+  r3             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+  rBX            = r3,
+  r4sp           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
+  rX86_SP        = r4sp,
   r4sib_no_index = r4sp,
-  r5     = 5,
-  rBP    = r5,
-  r5sib_no_base = r5,
-  r6     = 6,
-  rSI    = r6,
-  r7     = 7,
-  rDI    = r7,
+  r5             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+  rBP            = r5,
+  r5sib_no_base  = r5,
+  r6             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+  rSI            = r6,
+  r7             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+  rDI            = r7,
 #ifndef TARGET_REX_SUPPORT
-  rRET   = 8,  // fake return address register for core spill mask.
+  // fake return address register for core spill mask.
+  rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
 #else
-  r8     = 8,
-  r9     = 9,
-  r10    = 10,
-  r11    = 11,
-  r12    = 12,
-  r13    = 13,
-  r14    = 14,
-  r15    = 15,
-  rRET   = 16,  // fake return address register for core spill mask.
+  r8             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+  r9             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+  r10            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+  r11            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+  r12            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+  r13            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+  r14            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+  r15            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+  // fake return address register for core spill mask.
+  rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
 #endif
-  fr0  =  0 + X86_FP_REG_OFFSET,
-  fr1  =  1 + X86_FP_REG_OFFSET,
-  fr2  =  2 + X86_FP_REG_OFFSET,
-  fr3  =  3 + X86_FP_REG_OFFSET,
-  fr4  =  4 + X86_FP_REG_OFFSET,
-  fr5  =  5 + X86_FP_REG_OFFSET,
-  fr6  =  6 + X86_FP_REG_OFFSET,
-  fr7  =  7 + X86_FP_REG_OFFSET,
-  fr8  =  8 + X86_FP_REG_OFFSET,
-  fr9  =  9 + X86_FP_REG_OFFSET,
-  fr10 = 10 + X86_FP_REG_OFFSET,
-  fr11 = 11 + X86_FP_REG_OFFSET,
-  fr12 = 12 + X86_FP_REG_OFFSET,
-  fr13 = 13 + X86_FP_REG_OFFSET,
-  fr14 = 14 + X86_FP_REG_OFFSET,
-  fr15 = 15 + X86_FP_REG_OFFSET,
+
+  // xmm registers, single precision view
+  fr0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
+  fr1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
+  fr2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
+  fr3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
+  fr4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
+  fr5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
+  fr6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
+  fr7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
+
+  // xmm registers, double precision alises
+  dr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+  dr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+  dr2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+  dr3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+  dr4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+  dr5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+  dr6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+  dr7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+
+  // xmm registers, quad precision alises
+  qr0  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 0,
+  qr1  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 1,
+  qr2  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 2,
+  qr3  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 3,
+  qr4  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 4,
+  qr5  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 5,
+  qr6  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 6,
+  qr7  = RegStorage::k128BitSolo | RegStorage::kFloatingPoint | 7,
+
+  // TODO: as needed, add 256, 512 and 1024-bit xmm views.
 };
 
-const RegStorage rs_r0(RegStorage::k32BitSolo, r0);
-const RegStorage rs_rAX = rs_r0;
-const RegStorage rs_r1(RegStorage::k32BitSolo, r1);
-const RegStorage rs_rCX = rs_r1;
-const RegStorage rs_r2(RegStorage::k32BitSolo, r2);
-const RegStorage rs_rDX = rs_r2;
-const RegStorage rs_r3(RegStorage::k32BitSolo, r3);
-const RegStorage rs_rBX = rs_r3;
-const RegStorage rs_r4sp(RegStorage::k32BitSolo, r4sp);
-const RegStorage rs_rX86_SP = rs_r4sp;
-const RegStorage rs_r5(RegStorage::k32BitSolo, r5);
-const RegStorage rs_rBP = rs_r5;
-const RegStorage rs_r6(RegStorage::k32BitSolo, r6);
-const RegStorage rs_rSI = rs_r6;
-const RegStorage rs_r7(RegStorage::k32BitSolo, r7);
-const RegStorage rs_rDI = rs_r7;
+constexpr RegStorage rs_r0(RegStorage::kValid | r0);
+constexpr RegStorage rs_rAX = rs_r0;
+constexpr RegStorage rs_r1(RegStorage::kValid | r1);
+constexpr RegStorage rs_rCX = rs_r1;
+constexpr RegStorage rs_r2(RegStorage::kValid | r2);
+constexpr RegStorage rs_rDX = rs_r2;
+constexpr RegStorage rs_r3(RegStorage::kValid | r3);
+constexpr RegStorage rs_rBX = rs_r3;
+constexpr RegStorage rs_r4sp(RegStorage::kValid | r4sp);
+constexpr RegStorage rs_rX86_SP = rs_r4sp;
+constexpr RegStorage rs_r5(RegStorage::kValid | r5);
+constexpr RegStorage rs_rBP = rs_r5;
+constexpr RegStorage rs_r6(RegStorage::kValid | r6);
+constexpr RegStorage rs_rSI = rs_r6;
+constexpr RegStorage rs_r7(RegStorage::kValid | r7);
+constexpr RegStorage rs_rDI = rs_r7;
+constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
+
+constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
+constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
+constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
+constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
+constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
+constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
+constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
+constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
+
+constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
+constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
+constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
+constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
+constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
+constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
+constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
+constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
+
+constexpr RegStorage rs_qr0(RegStorage::kValid | qr0);
+constexpr RegStorage rs_qr1(RegStorage::kValid | qr1);
+constexpr RegStorage rs_qr2(RegStorage::kValid | qr2);
+constexpr RegStorage rs_qr3(RegStorage::kValid | qr3);
+constexpr RegStorage rs_qr4(RegStorage::kValid | qr4);
+constexpr RegStorage rs_qr5(RegStorage::kValid | qr5);
+constexpr RegStorage rs_qr6(RegStorage::kValid | qr6);
+constexpr RegStorage rs_qr7(RegStorage::kValid | qr7);
 
 // TODO: elminate these #defines?
 #define rX86_ARG0 rAX
@@ -234,19 +260,17 @@
 
 // RegisterLocation templates return values (r_V0, or r_V0/r_V1).
 const RegLocation x86_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
 const RegLocation x86_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
+    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
      RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
 const RegLocation x86_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
+    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
      RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
 const RegLocation x86_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
-     RegStorage(RegStorage::k64BitPair, fr0, fr0), INVALID_SREG, INVALID_SREG};
+    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+     RegStorage(RegStorage::k64BitSolo, dr0), INVALID_SREG, INVALID_SREG};
 
 /*
  * The following enum defines the list of supported X86 instructions by the
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 11bec99..df5aa7b 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -21,77 +21,105 @@
 namespace art {
 
 /*
- * Representation of the physical register, register pair or vector holding a Dalvik value.
- * The basic configuration of the storage (i.e. solo reg, pair, vector) is common across all
- * targets, but the encoding of the actual storage element is target independent.
+ * 16-bit representation of the physical register container holding a Dalvik value.
+ * The encoding allows up to 32 physical elements per storage class, and supports eight
+ * register container shapes.
  *
- * The two most-significant bits describe the basic shape of the storage, while meaning of the
- * lower 14 bits depends on the shape:
+ * [V] [D] [HHHHH] [SSS] [F] [LLLLL]
  *
- *  [PW]
- *       P: 0 -> pair, 1 -> solo (or vector)
- *       W: 1 -> 64 bits, 0 -> 32 bits
+ * [LLLLL]
+ *  Physical register number for the low or solo register.
+ *    0..31
  *
- *  [00] [xxxxxxxxxxxxxx]     Invalid (typically all zeros)
- *  [01] [HHHHHHH] [LLLLLLL]  64-bit storage, composed of 2 32-bit registers
- *  [10] [0] [xxxxxx] [RRRRRRR]  32-bit solo register
- *  [11] [0] [xxxxxx] [RRRRRRR]  64-bit solo register
- *  [10] [1] [xxxxxx] [VVVVVVV]  32-bit vector storage
- *  [11] [1] [xxxxxx] [VVVVVVV]  64-bit vector storage
+ * [F]
+ *  Describes type of the [LLLLL] register.
+ *    0: Core
+ *    1: Floating point
  *
- * x - don't care
- * L - low register number of a pair
- * H - high register number of a pair
- * R - register number of a solo reg
- * V - vector description
+ * [SSS]
+ *  Shape of the register container.
+ *    000: Invalid
+ *    001: 32-bit solo register
+ *    010: 64-bit solo register
+ *    011: 64-bit pair consisting of two 32-bit solo registers
+ *    100: 128-bit solo register
+ *    101: 256-bit solo register
+ *    110: 512-bit solo register
+ *    111: 1024-bit solo register
  *
- * Note that in all non-invalid cases, the low 7 bits must be sufficient to describe
- * whether the storage element is floating point (see IsFloatReg()).
+ * [HHHHH]
+ *  Physical register number of the high register (valid only for register pair).
+ *    0..31
  *
+ * [D]
+ *  Describes type of the [HHHHH] register (valid only for register pair).
+ *    0: Core
+ *    1: Floating point
+ *
+ * [V]
+ *    0 -> Invalid
+ *    1 -> Valid
+ *
+ * Note that in all non-invalid cases, we can determine if the storage is floating point
+ * by testing bit 6.  Though a mismatch appears to be permitted by the format, the [F][D] values
+ * from each half of a pair must match (this allows the high and low regs of a pair to be more
+ * easily individually manipulated).
+ *
+ * On some target architectures, the same underlying physical register container can be given
+ * different views.  For example, Arm's 32-bit single-precision floating point registers
+ * s2 and s3 map to the low and high halves of double-precision d1.  Similarly, X86's xmm3
+ * vector register can be viewed as 32-bit, 64-bit, 128-bit, etc.  In these cases the use of
+ * one view will affect the other views.  The RegStorage class does not concern itself
+ * with potential aliasing.  That will be done using the associated RegisterInfo struct.
+ * Distinct RegStorage elements should be created for each view of a physical register
+ * container.  The management of the aliased physical elements will be handled via RegisterInfo
+ * records.
  */
 
 class RegStorage {
  public:
   enum RegStorageKind {
-    kInvalid     = 0x0000,
-    k64BitPair   = 0x4000,
-    k32BitSolo   = 0x8000,
-    k64BitSolo   = 0xc000,
-    k32BitVector = 0xa000,
-    k64BitVector = 0xe000,
-    kPairMask    = 0x8000,
-    kPair        = 0x0000,
-    kSizeMask    = 0x4000,
-    k64Bit       = 0x4000,
-    k32Bit       = 0x0000,
-    kVectorMask  = 0xa000,
-    kVector      = 0xa000,
-    kSolo        = 0x8000,
-    kShapeMask   = 0xc000,
-    kKindMask    = 0xe000
+    kValidMask     = 0x8000,
+    kValid         = 0x8000,
+    kInvalid       = 0x0000,
+    kShapeMask     = 0x01c0,
+    k32BitSolo     = 0x0040,
+    k64BitSolo     = 0x0080,
+    k64BitPair     = 0x00c0,
+    k128BitSolo    = 0x0100,
+    k256BitSolo    = 0x0140,
+    k512BitSolo    = 0x0180,
+    k1024BitSolo   = 0x01c0,
+    k64BitMask     = 0x0180,
+    k64Bits        = 0x0080,
+    kShapeTypeMask = 0x01e0,
+    kFloatingPoint = 0x0020,
+    kCoreRegister  = 0x0000,
   };
 
-  static const uint16_t kRegValMask = 0x007f;
-  static const uint16_t kInvalidRegVal = 0x007f;
-  static const uint16_t kHighRegShift = 7;
-  static const uint16_t kHighRegMask = kRegValMask << kHighRegShift;
+  static const uint16_t kRegValMask  = 0x01ff;  // Num, type and shape.
+  static const uint16_t kRegTypeMask = 0x003f;  // Num and type.
+  static const uint16_t kRegNumMask  = 0x001f;  // Num only.
+  static const uint16_t kMaxRegs     = kRegValMask + 1;
+  // TODO: deprecate use of kInvalidRegVal and speed up GetReg().
+  static const uint16_t kInvalidRegVal = 0x01ff;
+  static const uint16_t kHighRegShift = 9;
+  static const uint16_t kShapeMaskShift = 6;
+  static const uint16_t kHighRegMask = (kRegTypeMask << kHighRegShift);
 
+  // Reg is [F][LLLLL], will override any existing shape and use rs_kind.
   RegStorage(RegStorageKind rs_kind, int reg) {
-    DCHECK_NE(rs_kind & kShapeMask, kInvalid);
-    DCHECK_NE(rs_kind & kShapeMask, k64BitPair);
-    DCHECK_EQ(rs_kind & ~kKindMask, 0);
-    DCHECK_EQ(reg & ~kRegValMask, 0);
-    reg_ = rs_kind | reg;
+    DCHECK_NE(rs_kind, k64BitPair);
+    DCHECK_EQ(rs_kind & ~kShapeMask, 0);
+    reg_ = kValid | rs_kind | (reg & kRegTypeMask);
   }
   RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg) {
     DCHECK_EQ(rs_kind, k64BitPair);
-    DCHECK_EQ(low_reg & ~kRegValMask, 0);
-    DCHECK_EQ(high_reg & ~kRegValMask, 0);
-    reg_ = rs_kind | (high_reg << kHighRegShift) | low_reg;
+    DCHECK_EQ(low_reg & kFloatingPoint, high_reg & kFloatingPoint);
+    reg_ = kValid | rs_kind | ((high_reg & kRegTypeMask) << kHighRegShift) | (low_reg & kRegTypeMask);
   }
-  explicit RegStorage(uint16_t val) : reg_(val) {}
+  constexpr explicit RegStorage(uint16_t val) : reg_(val) {}
   RegStorage() : reg_(kInvalid) {}
-  ~RegStorage() {}
 
   bool operator==(const RegStorage rhs) const {
     return (reg_ == rhs.GetRawBits());
@@ -102,73 +130,127 @@
   }
 
   bool Valid() const {
-    return ((reg_ & kShapeMask) != kInvalid);
+    return ((reg_ & kValidMask) == kValid);
   }
 
   bool Is32Bit() const {
-    return ((reg_ & kSizeMask) == k32Bit);
+    return ((reg_ & kShapeMask) == k32BitSolo);
   }
 
   bool Is64Bit() const {
-    return ((reg_ & kSizeMask) == k64Bit);
+    return ((reg_ & k64BitMask) == k64Bits);
   }
 
   bool IsPair() const {
-    return ((reg_ & kPairMask) == kPair);
+    return ((reg_ & kShapeMask) == k64BitPair);
   }
 
-  bool IsSolo() const {
-    return ((reg_ & kVectorMask) == kSolo);
+  bool IsFloat() const {
+    DCHECK(Valid());
+    return ((reg_ & kFloatingPoint) == kFloatingPoint);
   }
 
-  bool IsVector() const {
-    return ((reg_ & kVectorMask) == kVector);
+  bool IsDouble() const {
+    DCHECK(Valid());
+    return (reg_ & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
+  }
+
+  bool IsSingle() const {
+    DCHECK(Valid());
+    return (reg_ & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
+  }
+
+  static bool IsFloat(uint16_t reg) {
+    return ((reg & kFloatingPoint) == kFloatingPoint);
+  }
+
+  static bool IsDouble(uint16_t reg) {
+    return (reg & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
+  }
+
+  static bool IsSingle(uint16_t reg) {
+    return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
   }
 
   // Used to retrieve either the low register of a pair, or the only register.
   int GetReg() const {
-    DCHECK(!IsPair());
+    DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
     return Valid() ? (reg_ & kRegValMask) : kInvalidRegVal;
   }
 
+  // Sets shape, type and num of solo.
   void SetReg(int reg) {
     DCHECK(Valid());
+    DCHECK(!IsPair());
     reg_ = (reg_ & ~kRegValMask) | reg;
   }
 
+  // Set the reg number and type only, target remain 64-bit pair.
   void SetLowReg(int reg) {
     DCHECK(IsPair());
-    reg_ = (reg_ & ~kRegValMask) | reg;
+    reg_ = (reg_ & ~kRegTypeMask) | (reg & kRegTypeMask);
   }
 
-  // Retrieve the least significant register of a pair.
+  // Retrieve the least significant register of a pair and return as 32-bit solo.
   int GetLowReg() const {
     DCHECK(IsPair());
-    return (reg_ & kRegValMask);
+    return ((reg_ & kRegTypeMask) | k32BitSolo);
   }
 
   // Create a stand-alone RegStorage from the low reg of a pair.
   RegStorage GetLow() const {
     DCHECK(IsPair());
-    return RegStorage(k32BitSolo, reg_ & kRegValMask);
+    return RegStorage(k32BitSolo, reg_ & kRegTypeMask);
   }
 
   // Retrieve the most significant register of a pair.
   int GetHighReg() const {
     DCHECK(IsPair());
-    return (reg_ & kHighRegMask) >> kHighRegShift;
+    return k32BitSolo | ((reg_ & kHighRegMask) >> kHighRegShift);
   }
 
   // Create a stand-alone RegStorage from the high reg of a pair.
   RegStorage GetHigh() const {
     DCHECK(IsPair());
-    return RegStorage(k32BitSolo, (reg_ & kHighRegMask) >> kHighRegShift);
+    return RegStorage(kValid | GetHighReg());
   }
 
   void SetHighReg(int reg) {
     DCHECK(IsPair());
-    reg_ = (reg_ & ~kHighRegMask) | (reg << kHighRegShift);
-    DCHECK_EQ(GetHighReg(), reg);
+    reg_ = (reg_ & ~kHighRegMask) | ((reg & kRegTypeMask) << kHighRegShift);
+  }
+
+  // Return the register number of low or solo.
+  int GetRegNum() const {
+    return reg_ & kRegNumMask;
+  }
+
+  // Aliased double to low single.
+  RegStorage DoubleToLowSingle() const {
+    DCHECK(IsDouble());
+    return FloatSolo32(GetRegNum() << 1);
+  }
+
+  // Aliased double to high single.
+  RegStorage DoubleToHighSingle() const {
+    DCHECK(IsDouble());
+    return FloatSolo32((GetRegNum() << 1) + 1);
+  }
+
+  // Single to aliased double.
+  RegStorage SingleToDouble() const {
+    DCHECK(IsSingle());
+    return FloatSolo64(GetRegNum() >> 1);
+  }
+
+  // Is register number in 0..7?
+  bool Low8() const {
+    return GetRegNum() < 8;
+  }
+
+  // Is register number in 0..3?
+  bool Low4() const {
+    return GetRegNum() < 4;
   }
 
   // Combine 2 32-bit solo regs into a pair.
@@ -180,24 +262,61 @@
     return RegStorage(k64BitPair, low.GetReg(), high.GetReg());
   }
 
+  static bool SameRegType(RegStorage reg1, RegStorage reg2) {
+    return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
+  }
+
+  static bool SameRegType(int reg1, int reg2) {
+    return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
+  }
+
   // Create a 32-bit solo.
   static RegStorage Solo32(int reg_num) {
-    return RegStorage(k32BitSolo, reg_num);
+    return RegStorage(k32BitSolo, reg_num & kRegTypeMask);
+  }
+
+  // Create a floating point 32-bit solo.
+  static RegStorage FloatSolo32(int reg_num) {
+    return RegStorage(k32BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
   }
 
   // Create a 64-bit solo.
   static RegStorage Solo64(int reg_num) {
-    return RegStorage(k64BitSolo, reg_num);
+    return RegStorage(k64BitSolo, reg_num & kRegTypeMask);
+  }
+
+  // Create a floating point 64-bit solo.
+  static RegStorage FloatSolo64(int reg_num) {
+    return RegStorage(k64BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
   }
 
   static RegStorage InvalidReg() {
     return RegStorage(kInvalid);
   }
 
+  static uint16_t RegNum(int raw_reg_bits) {
+    return raw_reg_bits & kRegNumMask;
+  }
+
   int GetRawBits() const {
     return reg_;
   }
 
+  size_t StorageSize() {
+    switch (reg_ & kShapeMask) {
+      case kInvalid: return 0;
+      case k32BitSolo: return 4;
+      case k64BitSolo: return 8;
+      case k64BitPair: return 8;  // Is this useful?  Might want to disallow taking size of pair.
+      case k128BitSolo: return 16;
+      case k256BitSolo: return 32;
+      case k512BitSolo: return 64;
+      case k1024BitSolo: return 128;
+      default: LOG(FATAL) << "Unexpected shap";
+    }
+    return 0;
+  }
+
  private:
   uint16_t reg_;
 };
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index d5c2598..95b3d86 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -403,7 +403,7 @@
 }
 
 // FIXME - will likely need to revisit all uses of this.
-static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed,
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
                                       RegStorage(), INVALID_SREG, INVALID_SREG};
 
 void MIRGraph::InitRegLocations() {