AArch64: Fix the usage of IP0, IP1 as temporary registers

This patch fixes the usage of temporary registers by using VIXL's
UseScratchRegisterScope. For the primitives used by the trampoline
compiler we explicitly exclude IP0, IP1 from the temporary list.

Change-Id: Icf9afbabd93214302891ddd536ce03a9c181463b
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index fb909a8..d03b99f 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -62,18 +62,15 @@
 
   switch (abi) {
     case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
-      // FIXME IPx used by VIXL - this is unsafe.
       __ JumpTo(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()),
           Arm64ManagedRegister::FromCoreRegister(IP1));
 
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
-
       __ LoadRawPtr(Arm64ManagedRegister::FromCoreRegister(IP1),
                       Arm64ManagedRegister::FromCoreRegister(X0),
                       Offset(JNIEnvExt::SelfOffset().Int32Value()));
 
-      // FIXME IPx used by VIXL - this is unsafe.
       __ JumpTo(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()),
                 Arm64ManagedRegister::FromCoreRegister(IP0));
 
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f728abf..f486b3c 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -79,11 +79,13 @@
     // VIXL macro-assembler handles all variants.
     ___ Add(reg_x(rd), reg_x(rn), value);
   } else {
-    // ip1 = rd + value
-    // rd = cond ? ip1 : rn
-    CHECK_NE(rn, IP1);
-    ___ Add(reg_x(IP1), reg_x(rn), value);
-    ___ Csel(reg_x(rd), reg_x(IP1), reg_x(rd), COND_OP(cond));
+    // temp = rd + value
+    // rd = cond ? temp : rn
+    vixl::UseScratchRegisterScope temps(vixl_masm_);
+    temps.Exclude(reg_x(rd), reg_x(rn));
+    vixl::Register temp = temps.AcquireX();
+    ___ Add(temp, reg_x(rn), value);
+    ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
   }
 }
 
@@ -175,9 +177,10 @@
 }
 
 void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
-  // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg.
-  ___ Mov(reg_x(IP1), reg_x(SP));
-  StoreToOffset(IP1, ETR, tr_offs.Int32Value());
+  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  vixl::Register temp = temps.AcquireX();
+  ___ Mov(temp, reg_x(SP));
+  ___ Str(temp, MEM_OP(reg_x(ETR), tr_offs.Int32Value()));
 }
 
 void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
@@ -195,12 +198,14 @@
   if ((cond == AL) || (cond == NV)) {
     ___ Mov(reg_x(dest), value);
   } else {
-    // ip1 = value
-    // rd = cond ? ip1 : rd
+    // temp = value
+    // rd = cond ? temp : rd
     if (value != 0) {
-      CHECK_NE(dest, IP1);
-      ___ Mov(reg_x(IP1), value);
-      ___ Csel(reg_x(dest), reg_x(IP1), reg_x(dest), COND_OP(cond));
+      vixl::UseScratchRegisterScope temps(vixl_masm_);
+      temps.Exclude(reg_x(dest));
+      vixl::Register temp = temps.AcquireX();
+      ___ Mov(temp, value);
+      ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
     } else {
       ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
     }
@@ -298,7 +303,10 @@
   Arm64ManagedRegister dst = m_dst.AsArm64();
   Arm64ManagedRegister base = m_base.AsArm64();
   CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
-  LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
+  // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  temps.Exclude(reg_x(dst.AsCoreRegister()), reg_x(base.AsCoreRegister()));
+  ___ Ldr(reg_x(dst.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
 }
 
 void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
@@ -511,7 +519,10 @@
   Arm64ManagedRegister scratch = m_scratch.AsArm64();
   CHECK(base.IsCoreRegister()) << base;
   CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
+  // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
+  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  temps.Exclude(reg_x(base.AsCoreRegister()), reg_x(scratch.AsCoreRegister()));
+  ___ Ldr(reg_x(scratch.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
   ___ Br(reg_x(scratch.AsCoreRegister()));
 }
 
@@ -601,7 +612,11 @@
 }
 
 void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
-    // Bind exception poll entry.
+  vixl::UseScratchRegisterScope temps(vixl_masm_);
+  temps.Exclude(reg_x(exception->scratch_.AsCoreRegister()));
+  vixl::Register temp = temps.AcquireX();
+
+  // Bind exception poll entry.
   ___ Bind(exception->Entry());
   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
     DecreaseFrameSize(exception->stack_adjust_);
@@ -609,14 +624,14 @@
   // Pass exception object as argument.
   // Don't care about preserving X0 as this won't return.
   ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
-  LoadFromOffset(IP1, ETR, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
+  ___ Ldr(temp, MEM_OP(reg_x(ETR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
 
   // Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
   // to external functions that might trash TR. We do not need the original
   // X19 saved in BuildFrame().
   ___ Mov(reg_x(TR), reg_x(ETR));
 
-  ___ Blr(reg_x(IP1));
+  ___ Blr(temp);
   // Call should never return.
   ___ Brk();
 }
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 97fb93a..583150c 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -85,6 +85,7 @@
   vixl_masm_(new vixl::MacroAssembler(vixl_buf_, kBufferSizeArm64)) {}
 
   virtual ~Arm64Assembler() {
+    delete vixl_masm_;
     delete[] vixl_buf_;
   }
 
@@ -237,8 +238,8 @@
   // Vixl buffer.
   byte* vixl_buf_;
 
-  // Unique ptr - vixl assembler.
-  UniquePtr<vixl::MacroAssembler> vixl_masm_;
+  // Vixl assembler.
+  vixl::MacroAssembler* vixl_masm_;
 
   // List of exception blocks to generate at the end of the code cache.
   std::vector<Arm64Exception*> exception_blocks_;