ARM: VIXL32: Fix breaking changes from recent VIXL update.

Test: m test-art-host
Test: m test-art-target
Change-Id: I02a608bf51b889a2bfff43272a3619582bf9cf20
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 59e1784..a78b3da 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -572,8 +572,10 @@
 
   // We are about to use the assembler to place literals directly. Make sure we have enough
   // underlying code buffer and we have generated the jump table with right size.
-  CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t),
-                             CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize);
+  vixl::CodeBufferCheckScope scope(codegen->GetVIXLAssembler(),
+                                   num_entries * sizeof(int32_t),
+                                   vixl::CodeBufferCheckScope::kReserveBufferSpace,
+                                   vixl::CodeBufferCheckScope::kExactSize);
 
   __ Bind(&table_start_);
   const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
@@ -2260,10 +2262,10 @@
         masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
     if (prev->IsLoadOrStore()) {
       // Make sure we emit only exactly one nop.
-      vixl::aarch64::CodeBufferCheckScope scope(masm,
-                                                kInstructionSize,
-                                                vixl::aarch64::CodeBufferCheckScope::kCheck,
-                                                vixl::aarch64::CodeBufferCheckScope::kExactSize);
+      vixl::CodeBufferCheckScope scope(masm,
+                                       kInstructionSize,
+                                       vixl::CodeBufferCheckScope::kReserveBufferSpace,
+                                       vixl::CodeBufferCheckScope::kExactSize);
       __ nop();
     }
   }
@@ -4036,7 +4038,8 @@
       vixl::aarch64::Label* label = &relative_call_patches_.back().label;
       SingleEmissionCheckScope guard(GetVIXLAssembler());
       __ Bind(label);
-      __ bl(0);  // Branch and link to itself. This will be overriden at link time.
+      // Branch and link to itself. This will be overriden at link time.
+      __ bl(static_cast<int64_t>(0));
       break;
     }
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
@@ -4167,7 +4170,7 @@
   DCHECK(reg.IsX());
   SingleEmissionCheckScope guard(GetVIXLAssembler());
   __ Bind(fixup_label);
-  __ adrp(reg, /* offset placeholder */ 0);
+  __ adrp(reg, /* offset placeholder */ static_cast<int64_t>(0));
 }
 
 void CodeGeneratorARM64::EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7aea616..e399f32 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -47,6 +47,7 @@
 using helpers::InputRegisterAt;
 using helpers::InputSRegisterAt;
 using helpers::InputVRegisterAt;
+using helpers::Int32ConstantFrom;
 using helpers::LocationFrom;
 using helpers::LowRegisterFrom;
 using helpers::LowSRegisterFrom;
@@ -132,7 +133,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        __ Add(base, sp, stack_offset);
+        __ Add(base, sp, Operand::From(stack_offset));
       }
       __ Vstm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
     }
@@ -180,7 +181,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        __ Add(base, sp, stack_offset);
+        __ Add(base, sp, Operand::From(stack_offset));
       }
       __ Vldm(F64, base, NO_WRITE_BACK, DRegisterList(d_reg, number_of_d_regs));
     }
@@ -673,8 +674,8 @@
   DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
 
   // We are about to use the assembler to place literals directly. Make sure we have enough
-  // underlying code buffer and we have generated the jump table with right size.
-  codegen->GetVIXLAssembler()->GetBuffer().Align();
+  // underlying code buffer and we have generated a jump table of the right size, using
+  // codegen->GetVIXLAssembler()->GetBuffer().Align();
   AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
                              num_entries * sizeof(int32_t),
                              CodeBufferCheckScope::kMaximumSize);
@@ -701,7 +702,7 @@
     DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
     DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
 
-    bb_addresses_[i].get()->UpdateValue(jump_offset, &codegen->GetVIXLAssembler()->GetBuffer());
+    bb_addresses_[i].get()->UpdateValue(jump_offset, codegen->GetVIXLAssembler()->GetBuffer());
   }
 }
 
@@ -1667,7 +1668,20 @@
   // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
   // instruction from clobbering it as they might use r12 as a scratch register.
   DCHECK(hidden_reg.Is(r12));
-  __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+
+  {
+    // The VIXL macro assembler may clobber any of the scratch registers that are available to it,
+    // so it checks if the application is using them (by passing them to the macro assembler
+    // methods). The following application of UseScratchRegisterScope corrects VIXL's notion of
+    // what is available, and is the opposite of the standard usage: Instead of requesting a
+    // temporary location, it imposes an external constraint (i.e. a specific register is reserved
+    // for the hidden argument). Note that this works even if VIXL needs a scratch register itself
+    // (to materialize the constant), since the destination register becomes available for such use
+    // internally for the duration of the macro instruction.
+    UseScratchRegisterScope temps(GetVIXLAssembler());
+    temps.Exclude(hidden_reg);
+    __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+  }
 
   {
     AssemblerAccurateScope aas(GetVIXLAssembler(),
@@ -2458,13 +2472,13 @@
   vixl32::Register dividend = InputRegisterAt(instruction, 0);
   vixl32::Register temp1 = RegisterFrom(locations->GetTemp(0));
   vixl32::Register temp2 = RegisterFrom(locations->GetTemp(1));
-  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+  int32_t imm = Int32ConstantFrom(second);
 
   int64_t magic;
   int shift;
   CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
 
-  __ Mov(temp1, magic);
+  __ Mov(temp1, Operand::From(magic));
   __ Smull(temp2, temp1, dividend, temp1);
 
   if (imm > 0 && magic < 0) {
@@ -2857,9 +2871,9 @@
     }
     // Rotate, or mov to out for zero or word size rotations.
     if (rot != 0u) {
-      __ Lsr(out_reg_hi, in_reg_hi, rot);
+      __ Lsr(out_reg_hi, in_reg_hi, Operand::From(rot));
       __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
-      __ Lsr(out_reg_lo, in_reg_lo, rot);
+      __ Lsr(out_reg_lo, in_reg_lo, Operand::From(rot));
       __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
     } else {
       __ Mov(out_reg_lo, in_reg_lo);
@@ -2874,7 +2888,7 @@
     __ And(shift_right, RegisterFrom(rhs), 0x1F);
     __ Lsrs(shift_left, RegisterFrom(rhs), 6);
     // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
-    __ Rsb(shift_left, shift_right, kArmBitsPerWord);
+    __ Rsb(shift_left, shift_right, Operand::From(kArmBitsPerWord));
     __ B(cc, &shift_by_32_plus_shift_right);
 
     // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
@@ -3040,11 +3054,11 @@
           // Shift the high part
           __ Lsl(o_h, high, o_l);
           // Shift the low part and `or` what overflew on the high part
-          __ Rsb(temp, o_l, kArmBitsPerWord);
+          __ Rsb(temp, o_l, Operand::From(kArmBitsPerWord));
           __ Lsr(temp, low, temp);
           __ Orr(o_h, o_h, temp);
           // If the shift is > 32 bits, override the high part
-          __ Subs(temp, o_l, kArmBitsPerWord);
+          __ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3059,11 +3073,11 @@
           // Shift the low part
           __ Lsr(o_l, low, o_h);
           // Shift the high part and `or` what underflew on the low part
-          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
           __ Lsl(temp, high, temp);
           __ Orr(o_l, o_l, temp);
           // If the shift is > 32 bits, override the low part
-          __ Subs(temp, o_h, kArmBitsPerWord);
+          __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3077,10 +3091,10 @@
           __ And(o_h, second_reg, kMaxLongShiftDistance);
           // same as Shr except we use `Lsr`s and not `Asr`s
           __ Lsr(o_l, low, o_h);
-          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Rsb(temp, o_h, Operand::From(kArmBitsPerWord));
           __ Lsl(temp, high, temp);
           __ Orr(o_l, o_l, temp);
-          __ Subs(temp, o_h, kArmBitsPerWord);
+          __ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
           {
             AssemblerAccurateScope guard(GetVIXLAssembler(),
                                          3 * kArmInstrMaxSizeInBytes,
@@ -3424,7 +3438,7 @@
     __ Add(temp, addr, offset);
     addr = temp;
   }
-  __ Ldrexd(out_lo, out_hi, addr);
+  __ Ldrexd(out_lo, out_hi, MemOperand(addr));
 }
 
 void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr,
@@ -3444,9 +3458,9 @@
   __ Bind(&fail);
   // We need a load followed by store. (The address used in a STREX instruction must
   // be the same as the address in the most recently executed LDREX instruction.)
-  __ Ldrexd(temp1, temp2, addr);
+  __ Ldrexd(temp1, temp2, MemOperand(addr));
   codegen_->MaybeRecordImplicitNullCheck(instruction);
-  __ Strexd(temp1, value_lo, value_hi, addr);
+  __ Strexd(temp1, value_lo, value_hi, MemOperand(addr));
   __ CompareAndBranchIfNonZero(temp1, &fail);
 }
 
@@ -4648,7 +4662,7 @@
   }
   GetAssembler()->LoadFromOffset(
       kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
-  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+  __ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift));
   __ Strb(card, MemOperand(card, temp));
   if (can_be_null) {
     __ Bind(&is_null);
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 5129daf..d3623f1 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -139,9 +139,14 @@
   HConstant* instr = location.GetConstant();
   if (instr->IsIntConstant()) {
     return instr->AsIntConstant()->GetValue();
-  } else {
-    DCHECK(instr->IsNullConstant()) << instr->DebugName();
+  } else if (instr->IsNullConstant()) {
     return 0;
+  } else {
+    DCHECK(instr->IsLongConstant()) << instr->DebugName();
+    const int64_t ret = instr->AsLongConstant()->GetValue();
+    DCHECK_GE(ret, std::numeric_limits<int32_t>::min());
+    DCHECK_LE(ret, std::numeric_limits<int32_t>::max());
+    return ret;
   }
 }
 
@@ -161,7 +166,7 @@
   if (location.IsRegister()) {
     return vixl::aarch32::Operand(RegisterFrom(location, type));
   } else {
-    return vixl::aarch32::Operand(Int64ConstantFrom(location));
+    return vixl::aarch32::Operand(Int32ConstantFrom(location));
   }
 }
 
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7a1ec9f..c8e3534 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -518,7 +518,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldrsb(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldrsb(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -528,7 +528,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldr(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldr(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -545,9 +545,9 @@
   vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out());
   if (addr.Is(lo)) {
     __ Ldr(hi, MemOperand(addr, 4));
-    __ Ldr(lo, addr);
+    __ Ldr(lo, MemOperand(addr));
   } else {
-    __ Ldr(lo, addr);
+    __ Ldr(lo, MemOperand(addr));
     __ Ldr(hi, MemOperand(addr, 4));
   }
 }
@@ -559,7 +559,7 @@
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   // Ignore upper 4B of long address.
-  __ Ldrsh(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
@@ -576,7 +576,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Strb(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Strb(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -585,7 +585,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Str(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Str(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -598,7 +598,7 @@
   vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
   // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
   // exception. So we can't use ldrd as addr may be unaligned.
-  __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), addr);
+  __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr));
   __ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4));
 }
 
@@ -608,7 +608,7 @@
 
 void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
-  __ Strh(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+  __ Strh(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
 }
 
 void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
@@ -842,8 +842,8 @@
       __ Add(temp_reg, base, offset);
       vixl32::Label loop_head;
       __ Bind(&loop_head);
-      __ Ldrexd(temp_lo, temp_hi, temp_reg);
-      __ Strexd(temp_lo, value_lo, value_hi, temp_reg);
+      __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
+      __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
       __ Cmp(temp_lo, 0);
       __ B(ne, &loop_head);
     } else {
@@ -1042,7 +1042,7 @@
   vixl32::Label loop_head;
   __ Bind(&loop_head);
 
-  __ Ldrex(tmp, tmp_ptr);
+  __ Ldrex(tmp, MemOperand(tmp_ptr));
 
   __ Subs(tmp, tmp, expected);
 
@@ -1052,7 +1052,7 @@
                                CodeBufferCheckScope::kMaximumSize);
 
     __ itt(eq);
-    __ strex(eq, tmp, value, tmp_ptr);
+    __ strex(eq, tmp, value, MemOperand(tmp_ptr));
     __ cmp(eq, tmp, 1);
   }
 
@@ -1220,7 +1220,7 @@
   static_assert(IsAligned<8>(kObjectAlignment),
                 "String data must be 8-byte aligned for unrolled CompareTo loop.");
 
-  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  const unsigned char_size = Primitive::ComponentSize(Primitive::kPrimChar);
   DCHECK_EQ(char_size, 2u);
 
   UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
@@ -1469,7 +1469,7 @@
   __ Bind(&loop);
   __ Ldr(out, MemOperand(str, temp1));
   __ Ldr(temp2, MemOperand(arg, temp1));
-  __ Add(temp1, temp1, sizeof(uint32_t));
+  __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
   __ Cmp(out, temp2);
   __ B(ne, &return_false);
   // With string compression, we have compared 4 bytes, otherwise 2 chars.
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 1aaa231..c35c393 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -43,12 +43,12 @@
 }
 
 const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const {
-  return vixl_masm_.GetStartAddress<uint8_t*>();
+  return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
 }
 
 void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) {
   // Copy the instructions from the buffer.
-  MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+  MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
   region.CopyFrom(0, from);
 }
 
@@ -365,7 +365,7 @@
       if (stack_offset != 0) {
         base = temps.Acquire();
         DCHECK_EQ(regs & (1u << base.GetCode()), 0u);
-        ___ Add(base, sp, stack_offset);
+        ___ Add(base, sp, Operand::From(stack_offset));
       }
       ___ Stm(base, NO_WRITE_BACK, RegisterList(regs));
     } else {
@@ -385,7 +385,7 @@
       vixl32::Register base = sp;
       if (stack_offset != 0) {
         base = temps.Acquire();
-        ___ Add(base, sp, stack_offset);
+        ___ Add(base, sp, Operand::From(stack_offset));
       }
       ___ Ldm(base, NO_WRITE_BACK, RegisterList(regs));
     } else {
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index b2bbd72..f20ed0a 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -168,6 +168,8 @@
     CHECK_EQ(0u, size);
   } else if (src.IsCoreRegister()) {
     CHECK_EQ(4u, size);
+    UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+    temps.Exclude(src.AsVIXLRegister());
     asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
   } else if (src.IsRegisterPair()) {
     CHECK_EQ(8u, size);
@@ -186,12 +188,16 @@
 void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
   ArmManagedRegister src = msrc.AsArm();
   CHECK(src.IsCoreRegister()) << src;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(src.AsVIXLRegister());
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
 }
 
 void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
   ArmManagedRegister src = msrc.AsArm();
   CHECK(src.IsCoreRegister()) << src;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(src.AsVIXLRegister());
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
 }
 
@@ -202,6 +208,8 @@
   ArmManagedRegister src = msrc.AsArm();
   ArmManagedRegister scratch = mscratch.AsArm();
   asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4);
 }
@@ -210,6 +218,8 @@
                                        FrameOffset src,
                                        ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value());
 }
@@ -220,6 +230,8 @@
                                        bool unpoison_reference) {
   ArmManagedRegister dst = dest.AsArm();
   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord,
                       dst.AsVIXLRegister(),
                       base.AsArm().AsVIXLRegister(),
@@ -246,6 +258,8 @@
                                                      ManagedRegister scratch) {
   ArmManagedRegister mscratch = scratch.AsArm();
   CHECK(mscratch.IsCoreRegister()) << mscratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(mscratch.AsVIXLRegister());
   asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm);
   asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value());
 }
@@ -263,6 +277,8 @@
 void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
   ArmManagedRegister dst = m_dst.AsArm();
   CHECK(dst.IsCoreRegister()) << dst;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(dst.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value());
 }
 
@@ -271,6 +287,8 @@
                                                     ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
 }
@@ -286,6 +304,8 @@
                                                         ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
   asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
 }
@@ -312,6 +332,8 @@
   if (!dst.Equals(src)) {
     if (dst.IsCoreRegister()) {
       CHECK(src.IsCoreRegister()) << src;
+      UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+      temps.Exclude(dst.AsVIXLRegister());
       ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
     } else if (dst.IsDRegister()) {
       if (src.IsDRegister()) {
@@ -351,6 +373,8 @@
   ArmManagedRegister temp = scratch.AsArm();
   CHECK(temp.IsCoreRegister()) << temp;
   CHECK(size == 4 || size == 8) << size;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(temp.AsVIXLRegister());
   if (size == 4) {
     asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
     asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
@@ -414,6 +438,8 @@
   ArmManagedRegister in_reg = min_reg.AsArm();
   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
   CHECK(out_reg.IsCoreRegister()) << out_reg;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(out_reg.AsVIXLRegister());
   if (null_allowed) {
     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
     // the address in the handle scope holding the reference.
@@ -425,6 +451,8 @@
                           handle_scope_offset.Int32Value());
       in_reg = out_reg;
     }
+
+    temps.Exclude(in_reg.AsVIXLRegister());
     ___ Cmp(in_reg.AsVIXLRegister(), 0);
 
     if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
@@ -457,6 +485,8 @@
                                                       bool null_allowed) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   if (null_allowed) {
     asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
@@ -503,6 +533,8 @@
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(base.IsCoreRegister()) << base;
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   asm_.LoadFromOffset(kLoadWord,
                       scratch.AsVIXLRegister(),
                       base.AsVIXLRegister(),
@@ -514,6 +546,8 @@
 void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
   ArmManagedRegister scratch = mscratch.AsArm();
   CHECK(scratch.IsCoreRegister()) << scratch;
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   // Call *(*(SP + base) + offset)
   asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value());
   asm_.LoadFromOffset(kLoadWord,
@@ -541,6 +575,8 @@
 void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
   CHECK_ALIGNED(stack_adjust, kStackAlignment);
   ArmManagedRegister scratch = m_scratch.AsArm();
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch.AsVIXLRegister());
   exception_blocks_.emplace_back(
       new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
   asm_.LoadFromOffset(kLoadWord,
@@ -598,11 +634,14 @@
   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
     DecreaseFrameSize(exception->stack_adjust_);
   }
+
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(exception->scratch_.AsVIXLRegister());
   // Pass exception object as argument.
   // Don't care about preserving r0 as this won't return.
   ___ Mov(r0, exception->scratch_.AsVIXLRegister());
+  temps.Include(exception->scratch_.AsVIXLRegister());
   // TODO: check that exception->scratch_ is dead by this point.
-  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   vixl32::Register temp = temps.Acquire();
   ___ Ldr(temp,
           MemOperand(tr,
@@ -624,6 +663,9 @@
   } else if (dest.IsCoreRegister()) {
     CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
 
+    UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+    temps.Exclude(dest.AsVIXLRegister());
+
     if (size == 1u) {
       ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset));
     } else {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f91bcfa..6ed0e9b 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -40,12 +40,12 @@
 }
 
 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
-  return vixl_masm_.GetStartAddress<uint8_t*>();
+  return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
 }
 
 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
   // Copy the instructions from the buffer.
-  MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
+  MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
   region.CopyFrom(0, from);
 }
 
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 10bed13..50a1d9f 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1753,7 +1753,10 @@
   __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
   __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
 
+  vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
+  temps.Exclude(R12);
   __ LoadFromOffset(kLoadWord, R0, R12, 12);  // 32-bit because of R12.
+  temps.Include(R12);
   __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
 
   __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
@@ -1783,7 +1786,10 @@
   __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
   __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
 
+  vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
+  temps.Exclude(R12);
   __ StoreToOffset(kStoreWord, R0, R12, 12);  // 32-bit because of R12.
+  temps.Include(R12);
   __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
 
   __ StoreToOffset(kStoreByte, R2, R4, 12);
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 30b708c..3347dac 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -63,9 +63,7 @@
         case kVld2Location:
         case kVld3Location:
         case kVld4Location: {
-          const uintptr_t pc_delta = disasm_->IsT32()
-              ? vixl::aarch32::kT32PcDelta
-              : vixl::aarch32::kA32PcDelta;
+          const uintptr_t pc_delta = label.GetLabel()->GetPcOffset();
           const int32_t offset = label.GetLabel()->GetLocation();
 
           os() << "[pc, #" << offset - pc_delta << "]";
@@ -77,7 +75,7 @@
       }
     }
 
-    DisassemblerStream& operator<<(const vixl::aarch32::Register reg) OVERRIDE {
+    DisassemblerStream& operator<<(vixl::aarch32::Register reg) OVERRIDE {
       if (reg.Is(tr)) {
         os() << "tr";
         return *this;
@@ -118,20 +116,11 @@
   CustomDisassembler(std::ostream& os, const DisassemblerOptions* options)
       : PrintDisassembler(&disassembler_stream_), disassembler_stream_(os, this, options) {}
 
-  void PrintPc(uint32_t prog_ctr) OVERRIDE {
+  void PrintCodeAddress(uint32_t prog_ctr) OVERRIDE {
     os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": ";
   }
 
-  bool IsT32() const {
-    return is_t32_;
-  }
-
-  void SetT32(bool is_t32) {
-    is_t32_ = is_t32;
-  }
-
  private:
-  bool is_t32_;
   CustomDisassemblerStream disassembler_stream_;
 };
 
@@ -152,7 +141,7 @@
       sizeof(unaligned_float), sizeof(unaligned_double)};
   const uintptr_t begin = reinterpret_cast<uintptr_t>(options_->base_address_);
   const uintptr_t end = reinterpret_cast<uintptr_t>(options_->end_address_);
-  uintptr_t literal_addr = RoundDown(disasm_->GetPc(), vixl::aarch32::kRegSizeInBytes) + offset;
+  uintptr_t literal_addr = RoundDown(disasm_->GetCodeAddress(), vixl::aarch32::kRegSizeInBytes) + offset;
 
   if (!options_->absolute_addresses_) {
     literal_addr += begin;
@@ -208,12 +197,14 @@
   // Remove the Thumb specifier bit; no effect if begin does not point to T32 code.
   const uintptr_t instr_ptr = reinterpret_cast<uintptr_t>(begin) & ~1;
 
-  disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0);
-  disasm_->JumpToPc(GetPc(instr_ptr));
+  const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0;
+  disasm_->SetCodeAddress(GetPc(instr_ptr));
 
-  if (disasm_->IsT32()) {
+  if (is_t32) {
     const uint16_t* const ip = reinterpret_cast<const uint16_t*>(instr_ptr);
-    next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip));
+    const uint16_t* const end_address = reinterpret_cast<const uint16_t*>(
+        GetDisassemblerOptions()->end_address_);
+    next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip, end_address));
   } else {
     const uint32_t* const ip = reinterpret_cast<const uint32_t*>(instr_ptr);
     next = reinterpret_cast<uintptr_t>(disasm_->DecodeA32At(ip));
@@ -230,10 +221,10 @@
   // Remove the Thumb specifier bit; no effect if begin does not point to T32 code.
   const uintptr_t base = reinterpret_cast<uintptr_t>(begin) & ~1;
 
-  disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0);
-  disasm_->JumpToPc(GetPc(base));
+  const bool is_t32 = (reinterpret_cast<uintptr_t>(begin) & 1) != 0;
+  disasm_->SetCodeAddress(GetPc(base));
 
-  if (disasm_->IsT32()) {
+  if (is_t32) {
     // The Thumb specifier bits cancel each other.
     disasm_->DisassembleT32Buffer(reinterpret_cast<const uint16_t*>(base), end - begin);
   } else {
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index 02c609e..6b25747 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -30,7 +30,7 @@
 
   /// CHECK-START-ARM: int Main.and255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                and {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                and {{r\d+}}, {{r\d+}}, #0xff
 
   public static int and255(int arg) {
     return arg & 255;
@@ -46,7 +46,7 @@
 
   /// CHECK-START-ARM: int Main.andNot15(int) disassembly (after)
   /// CHECK-NOT:            mvn {{r\d+}}, #15
-  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #0xf
 
   public static int andNot15(int arg) {
     return arg & ~15;
@@ -54,7 +54,7 @@
 
   /// CHECK-START-ARM: int Main.or255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #0xff
 
   public static int or255(int arg) {
     return arg | 255;
@@ -70,7 +70,7 @@
 
   /// CHECK-START-ARM: int Main.orNot15(int) disassembly (after)
   /// CHECK-NOT:            mvn {{r\d+}}, #15
-  /// CHECK:                orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                orn {{r\d+}}, {{r\d+}}, #0xf
 
   public static int orNot15(int arg) {
     return arg | ~15;
@@ -78,7 +78,7 @@
 
   /// CHECK-START-ARM: int Main.xor255(int) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
-  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #0xff
 
   public static int xor255(int arg) {
     return arg ^ 255;
@@ -104,7 +104,7 @@
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #255
+  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-DAG:            movs {{r\d+}}, #0
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
@@ -131,7 +131,7 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK:                bic {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
 
@@ -144,8 +144,8 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
-  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            bic {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            bic {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
 
@@ -157,7 +157,7 @@
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                orr {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
 
@@ -183,7 +183,7 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-DAG:            mvn {{r\d+}}, #0
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
@@ -197,8 +197,8 @@
   /// CHECK-NOT:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
-  /// CHECK-DAG:            orr {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #15
+  /// CHECK-DAG:            orr {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            orn {{r\d+}}, {{r\d+}}, #0xf
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
 
@@ -209,7 +209,7 @@
   /// CHECK-START-ARM: long Main.xor255(long) disassembly (after)
   /// CHECK-NOT:            movs {{r\d+}}, #255
   /// CHECK-NOT:            eor{{(\.w)?}}
-  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #255
+  /// CHECK:                eor {{r\d+}}, {{r\d+}}, #0xff
   /// CHECK-NOT:            eor{{(\.w)?}}
 
   public static long xor255(long arg) {
@@ -257,8 +257,8 @@
   /// CHECK-NOT:            movs {{r\d+}}, #15
   /// CHECK-NOT:            mov.w {{r\d+}}, #-268435456
   /// CHECK-NOT:            eor{{(\.w)?}}
-  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #15
-  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #4026531840
+  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #0xf
+  /// CHECK-DAG:            eor {{r\d+}}, {{r\d+}}, #0xf0000000
   /// CHECK-NOT:            eor{{(\.w)?}}
 
   public static long xor0xf00000000000000f(long arg) {