ART: Extract JNI macro assembler for x86-64

Extract the JNI assembler parts from the regular assembler.

Test: m test-art-host
Change-Id: I291fb76ad9232123b4c1992488ee81fec3c1db47
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3b7b1e6..f022c87 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -152,6 +152,7 @@
 	optimizing/intrinsics_x86_64.cc \
 	optimizing/code_generator_x86_64.cc \
 	utils/x86_64/assembler_x86_64.cc \
+	utils/x86_64/jni_macro_assembler_x86_64.cc \
 	utils/x86_64/managed_register_x86_64.cc \
 
 
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 6c14888..0b1aa08 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -36,7 +36,7 @@
 #include "x86/assembler_x86.h"
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
-#include "x86_64/assembler_x86_64.h"
+#include "x86_64/jni_macro_assembler_x86_64.h"
 #endif
 #include "base/casts.h"
 #include "globals.h"
@@ -98,7 +98,7 @@
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
     case kX86_64:
-      return MacroAsm64UniquePtr(new (arena) x86_64::X86_64Assembler(arena));
+      return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
 #endif
     default:
       LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ce4ea1d..ddc8244 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2639,543 +2639,6 @@
   }
 }
 
-static dwarf::Reg DWARFReg(Register reg) {
-  return dwarf::Reg::X86_64Core(static_cast<int>(reg));
-}
-static dwarf::Reg DWARFReg(FloatRegister reg) {
-  return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 8;
-
-void X86_64Assembler::BuildFrame(size_t frame_size,
-                                 ManagedRegister method_reg,
-                                 ArrayRef<const ManagedRegister> spill_regs,
-                                 const ManagedRegisterEntrySpills& entry_spills) {
-  DCHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet.
-  cfi_.SetCurrentCFAOffset(8);  // Return address on stack.
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  int gpr_count = 0;
-  for (int i = spill_regs.size() - 1; i >= 0; --i) {
-    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
-    if (spill.IsCpuRegister()) {
-      pushq(spill.AsCpuRegister());
-      gpr_count++;
-      cfi_.AdjustCFAOffset(kFramePointerSize);
-      cfi_.RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
-    }
-  }
-  // return address then method on stack.
-  int64_t rest_of_frame = static_cast<int64_t>(frame_size)
-                          - (gpr_count * kFramePointerSize)
-                          - kFramePointerSize /*return address*/;
-  subq(CpuRegister(RSP), Immediate(rest_of_frame));
-  cfi_.AdjustCFAOffset(rest_of_frame);
-
-  // spill xmms
-  int64_t offset = rest_of_frame;
-  for (int i = spill_regs.size() - 1; i >= 0; --i) {
-    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
-    if (spill.IsXmmRegister()) {
-      offset -= sizeof(double);
-      movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
-      cfi_.RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
-    }
-  }
-
-  static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
-                "Unexpected frame pointer size.");
-
-  movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
-
-  for (size_t i = 0; i < entry_spills.size(); ++i) {
-    ManagedRegisterSpill spill = entry_spills.at(i);
-    if (spill.AsX86_64().IsCpuRegister()) {
-      if (spill.getSize() == 8) {
-        movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
-             spill.AsX86_64().AsCpuRegister());
-      } else {
-        CHECK_EQ(spill.getSize(), 4);
-        movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
-      }
-    } else {
-      if (spill.getSize() == 8) {
-        movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
-      } else {
-        CHECK_EQ(spill.getSize(), 4);
-        movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
-      }
-    }
-  }
-}
-
-void X86_64Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  cfi_.RememberState();
-  int gpr_count = 0;
-  // unspill xmms
-  int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize;
-  for (size_t i = 0; i < spill_regs.size(); ++i) {
-    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
-    if (spill.IsXmmRegister()) {
-      offset += sizeof(double);
-      movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
-      cfi_.Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
-    } else {
-      gpr_count++;
-    }
-  }
-  int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
-  addq(CpuRegister(RSP), Immediate(adjust));
-  cfi_.AdjustCFAOffset(-adjust);
-  for (size_t i = 0; i < spill_regs.size(); ++i) {
-    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
-    if (spill.IsCpuRegister()) {
-      popq(spill.AsCpuRegister());
-      cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
-      cfi_.Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
-    }
-  }
-  ret();
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size);
-}
-
-void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
-  cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  addq(CpuRegister(RSP), Immediate(adjust));
-  cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
-  X86_64ManagedRegister src = msrc.AsX86_64();
-  if (src.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (src.IsCpuRegister()) {
-    if (size == 4) {
-      CHECK_EQ(4u, size);
-      movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
-    } else {
-      CHECK_EQ(8u, size);
-      movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
-    }
-  } else if (src.IsRegisterPair()) {
-    CHECK_EQ(0u, size);
-    movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
-    movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
-         src.AsRegisterPairHigh());
-  } else if (src.IsX87Register()) {
-    if (size == 4) {
-      fstps(Address(CpuRegister(RSP), offs));
-    } else {
-      fstpl(Address(CpuRegister(RSP), offs));
-    }
-  } else {
-    CHECK(src.IsXmmRegister());
-    if (size == 4) {
-      movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
-    } else {
-      movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
-    }
-  }
-}
-
-void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
-  X86_64ManagedRegister src = msrc.AsX86_64();
-  CHECK(src.IsCpuRegister());
-  movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
-  X86_64ManagedRegister src = msrc.AsX86_64();
-  CHECK(src.IsCpuRegister());
-  movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
-                                            ManagedRegister) {
-  movl(Address(CpuRegister(RSP), dest), Immediate(imm));  // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                                 FrameOffset fr_offs,
-                                                 ManagedRegister mscratch) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  CHECK(scratch.IsCpuRegister());
-  leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
-  gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
-  gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
-}
-
-void X86_64Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
-                                 FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
-  UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
-}
-
-void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  if (dest.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (dest.IsCpuRegister()) {
-    if (size == 4) {
-      CHECK_EQ(4u, size);
-      movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-    } else {
-      CHECK_EQ(8u, size);
-      movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-    }
-  } else if (dest.IsRegisterPair()) {
-    CHECK_EQ(0u, size);
-    movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
-    movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
-  } else if (dest.IsX87Register()) {
-    if (size == 4) {
-      flds(Address(CpuRegister(RSP), src));
-    } else {
-      fldl(Address(CpuRegister(RSP), src));
-    }
-  } else {
-    CHECK(dest.IsXmmRegister());
-    if (size == 4) {
-      movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
-    } else {
-      movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
-    }
-  }
-}
-
-void X86_64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  if (dest.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (dest.IsCpuRegister()) {
-    CHECK_EQ(4u, size);
-    gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
-  } else if (dest.IsRegisterPair()) {
-    CHECK_EQ(8u, size);
-    gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
-  } else if (dest.IsX87Register()) {
-    if (size == 4) {
-      gs()->flds(Address::Absolute(src, true));
-    } else {
-      gs()->fldl(Address::Absolute(src, true));
-    }
-  } else {
-    CHECK(dest.IsXmmRegister());
-    if (size == 4) {
-      gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
-    } else {
-      gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
-    }
-  }
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  CHECK(dest.IsCpuRegister());
-  movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-                              bool unpoison_reference) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
-  movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
-  if (unpoison_reference) {
-    MaybeUnpoisonHeapReference(dest.AsCpuRegister());
-  }
-}
-
-void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
-                              Offset offs) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
-  movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
-}
-
-void X86_64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  CHECK(dest.IsCpuRegister());
-  gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
-}
-
-void X86_64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
-  X86_64ManagedRegister reg = mreg.AsX86_64();
-  CHECK(size == 1 || size == 2) << size;
-  CHECK(reg.IsCpuRegister()) << reg;
-  if (size == 1) {
-    movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
-  } else {
-    movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
-  }
-}
-
-void X86_64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
-  X86_64ManagedRegister reg = mreg.AsX86_64();
-  CHECK(size == 1 || size == 2) << size;
-  CHECK(reg.IsCpuRegister()) << reg;
-  if (size == 1) {
-    movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
-  } else {
-    movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
-  }
-}
-
-void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
-  X86_64ManagedRegister dest = mdest.AsX86_64();
-  X86_64ManagedRegister src = msrc.AsX86_64();
-  if (!dest.Equals(src)) {
-    if (dest.IsCpuRegister() && src.IsCpuRegister()) {
-      movq(dest.AsCpuRegister(), src.AsCpuRegister());
-    } else if (src.IsX87Register() && dest.IsXmmRegister()) {
-      // Pass via stack and pop X87 register
-      subl(CpuRegister(RSP), Immediate(16));
-      if (size == 4) {
-        CHECK_EQ(src.AsX87Register(), ST0);
-        fstps(Address(CpuRegister(RSP), 0));
-        movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
-      } else {
-        CHECK_EQ(src.AsX87Register(), ST0);
-        fstpl(Address(CpuRegister(RSP), 0));
-        movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
-      }
-      addq(CpuRegister(RSP), Immediate(16));
-    } else {
-      // TODO: x87, SSE
-      UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
-    }
-  }
-}
-
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  CHECK(scratch.IsCpuRegister());
-  movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
-  movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
-                                             ThreadOffset64 thr_offs,
-                                             ManagedRegister mscratch) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  CHECK(scratch.IsCpuRegister());
-  gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
-  Store(fr_offs, scratch, 8);
-}
-
-void X86_64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
-                                           FrameOffset fr_offs,
-                                           ManagedRegister mscratch) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  CHECK(scratch.IsCpuRegister());
-  Load(scratch, fr_offs, 8);
-  gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
-                           size_t size) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  if (scratch.IsCpuRegister() && size == 8) {
-    Load(scratch, src, 4);
-    Store(dest, scratch, 4);
-    Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
-    Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
-  } else {
-    Load(scratch, src, size);
-    Store(dest, scratch, size);
-  }
-}
-
-void X86_64Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
-                        ManagedRegister /*scratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-                        ManagedRegister scratch, size_t size) {
-  CHECK(scratch.IsNoRegister());
-  CHECK_EQ(size, 4u);
-  pushq(Address(CpuRegister(RSP), src));
-  popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
-                        ManagedRegister mscratch, size_t size) {
-  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
-  CHECK_EQ(size, 4u);
-  movq(scratch, Address(CpuRegister(RSP), src_base));
-  movq(scratch, Address(scratch, src_offset));
-  movq(Address(CpuRegister(RSP), dest), scratch);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
-                        ManagedRegister src, Offset src_offset,
-                        ManagedRegister scratch, size_t size) {
-  CHECK_EQ(size, 4u);
-  CHECK(scratch.IsNoRegister());
-  pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
-  popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-                        ManagedRegister mscratch, size_t size) {
-  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
-  CHECK_EQ(size, 4u);
-  CHECK_EQ(dest.Int32Value(), src.Int32Value());
-  movq(scratch, Address(CpuRegister(RSP), src));
-  pushq(Address(scratch, src_offset));
-  popq(Address(scratch, dest_offset));
-}
-
-void X86_64Assembler::MemoryBarrier(ManagedRegister) {
-  mfence();
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                   FrameOffset handle_scope_offset,
-                                   ManagedRegister min_reg, bool null_allowed) {
-  X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
-  X86_64ManagedRegister in_reg = min_reg.AsX86_64();
-  if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
-    // Use out_reg as indicator of null.
-    in_reg = out_reg;
-    // TODO: movzwl
-    movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-  }
-  CHECK(in_reg.IsCpuRegister());
-  CHECK(out_reg.IsCpuRegister());
-  VerifyObject(in_reg, null_allowed);
-  if (null_allowed) {
-    Label null_arg;
-    if (!out_reg.Equals(in_reg)) {
-      xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
-    }
-    testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
-    j(kZero, &null_arg);
-    leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-    Bind(&null_arg);
-  } else {
-    leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-  }
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                   FrameOffset handle_scope_offset,
-                                   ManagedRegister mscratch,
-                                   bool null_allowed) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  CHECK(scratch.IsCpuRegister());
-  if (null_allowed) {
-    Label null_arg;
-    movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-    testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
-    j(kZero, &null_arg);
-    leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-    Bind(&null_arg);
-  } else {
-    leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
-  }
-  Store(out_off, scratch, 8);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
-                                         ManagedRegister min_reg) {
-  X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
-  X86_64ManagedRegister in_reg = min_reg.AsX86_64();
-  CHECK(out_reg.IsCpuRegister());
-  CHECK(in_reg.IsCpuRegister());
-  Label null_arg;
-  if (!out_reg.Equals(in_reg)) {
-    xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
-  }
-  testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
-  j(kZero, &null_arg);
-  movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
-  Bind(&null_arg);
-}
-
-void X86_64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
-  // TODO: not validating references
-}
-
-void X86_64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
-  // TODO: not validating references
-}
-
-void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
-  X86_64ManagedRegister base = mbase.AsX86_64();
-  CHECK(base.IsCpuRegister());
-  call(Address(base.AsCpuRegister(), offset.Int32Value()));
-  // TODO: place reference map on call
-}
-
-void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
-  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
-  movq(scratch, Address(CpuRegister(RSP), base));
-  call(Address(scratch, offset));
-}
-
-void X86_64Assembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
-  gs()->call(Address::Absolute(offset, true));
-}
-
-void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
-  gs()->movq(tr.AsX86_64().AsCpuRegister(),
-             Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
-}
-
-void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
-  X86_64ManagedRegister scratch = mscratch.AsX86_64();
-  gs()->movq(scratch.AsCpuRegister(),
-             Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
-  movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
-}
-
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
- public:
-  explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
-  virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
-  const size_t stack_adjust_;
-};
-
-void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
-  X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
-  buffer_.EnqueueSlowPath(slow);
-  gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
-  j(kNotEqual, slow->Entry());
-}
-
-void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
-  X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
-#define __ sp_asm->
-  __ Bind(&entry_);
-  // Note: the return value is dead
-  if (stack_adjust_ != 0) {  // Fix up the frame.
-    __ DecreaseFrameSize(stack_adjust_);
-  }
-  // Pass exception as argument in RDI
-  __ gs()->movq(CpuRegister(RDI),
-                Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
-  __ gs()->call(
-      Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
-  // this call should never return
-  __ int3();
-#undef __
-}
-
 void X86_64Assembler::AddConstantArea() {
   ArrayRef<const int32_t> area = constant_area_.GetBuffer();
   for (size_t i = 0, e = area.size(); i < e; i++) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index d298da2..370f49c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -333,20 +333,11 @@
 };
 
 
-class X86_64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class X86_64Assembler FINAL : public Assembler {
  public:
   explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
   virtual ~X86_64Assembler() {}
 
-  size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
-  DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
-  void FinalizeCode() OVERRIDE {
-    Assembler::FinalizeCode();
-  }
-  void FinalizeInstructions(const MemoryRegion& region) {
-    Assembler::FinalizeInstructions(region);
-  }
-
   /*
    * Emit Machine Instructions.
    */
@@ -709,124 +700,6 @@
   }
   void Bind(NearLabel* label);
 
-  //
-  // Overridden common assembler high-level functionality
-  //
-
-  // Emit code that will create an activation on the stack
-  void BuildFrame(size_t frame_size,
-                  ManagedRegister method_reg,
-                  ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
-  // Emit code that will remove an activation from the stack
-  void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
-      OVERRIDE;
-
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
-  // Store routines
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
-  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
-
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
-
-  void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
-
-  // Load routines
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-
-  void LoadRef(ManagedRegister dest, FrameOffset  src) OVERRIDE;
-
-  void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
-
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
-  // Copying routines
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size);
-
-  void CopyRawPtrFromThread(FrameOffset fr_offs,
-                            ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
-
-  void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
-
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
-  void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-
-  void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-
-  void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-
-  void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-
-  void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-
-  void MemoryBarrier(ManagedRegister) OVERRIDE;
-
-  // Sign extension
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Zero extension
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Exploit fast access in managed code to Thread::Current()
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
-                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
-  // src holds a handle scope entry (Object**) load this into dst
-  virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
-                                     ManagedRegister src);
-
-  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
-  // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
-  // Call to address held at [base+offset]
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
-  // Generate code to check if Thread::Current()->exception_ is non-null
-  // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
   // Add a double to the constant area, returning the offset into
   // the constant area where the literal resides.
   size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 788c725..36c966b 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -22,7 +22,9 @@
 
 #include "base/bit_utils.h"
 #include "base/stl_util.h"
+#include "jni_macro_assembler_x86_64.h"
 #include "utils/assembler_test.h"
+#include "utils/jni_macro_assembler_test.h"
 
 namespace art {
 
@@ -1485,6 +1487,62 @@
   DriverFn(&setcc_test_fn, "setcc");
 }
 
+TEST_F(AssemblerX86_64Test, MovzxbRegs) {
+  DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxbRegs) {
+  DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
+}
+
+TEST_F(AssemblerX86_64Test, Repnescasw) {
+  GetAssembler()->repne_scasw();
+  const char* expected = "repne scasw\n";
+  DriverStr(expected, "Repnescasw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+  GetAssembler()->repe_cmpsw();
+  const char* expected = "repe cmpsw\n";
+  DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsl) {
+  GetAssembler()->repe_cmpsl();
+  const char* expected = "repe cmpsl\n";
+  DriverStr(expected, "Repecmpsl");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsq) {
+  GetAssembler()->repe_cmpsq();
+  const char* expected = "repe cmpsq\n";
+  DriverStr(expected, "Repecmpsq");
+}
+
+TEST_F(AssemblerX86_64Test, Cmpb) {
+  GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+                       x86_64::Immediate(0));
+  const char* expected = "cmpb $0, 128(%RDI)\n";
+  DriverStr(expected, "cmpb");
+}
+
+class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler> {
+ public:
+  using Base = JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler>;
+
+ protected:
+  // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+  std::string GetArchitectureString() OVERRIDE {
+    return "x86_64";
+  }
+
+  std::string GetDisassembleParameters() OVERRIDE {
+    return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
+  }
+
+ private:
+};
+
 static x86_64::X86_64ManagedRegister ManagedFromCpu(x86_64::Register r) {
   return x86_64::X86_64ManagedRegister::FromCpuRegister(r);
 }
@@ -1493,8 +1551,8 @@
   return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
 }
 
-std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
-                               x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+                               x86_64::X86_64JNIMacroAssembler* assembler) {
   // TODO: more interesting spill registers / entry spills.
 
   // Two random spill regs.
@@ -1536,12 +1594,12 @@
   return str.str();
 }
 
-TEST_F(AssemblerX86_64Test, BuildFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, BuildFrame) {
   DriverFn(&buildframe_test_fn, "BuildFrame");
 }
 
-std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
-                                x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+                                x86_64::X86_64JNIMacroAssembler* assembler) {
   // TODO: more interesting spill registers / entry spills.
 
   // Two random spill regs.
@@ -1567,12 +1625,13 @@
   return str.str();
 }
 
-TEST_F(AssemblerX86_64Test, RemoveFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, RemoveFrame) {
   DriverFn(&removeframe_test_fn, "RemoveFrame");
 }
 
-std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
-                                  x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(
+    JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+    x86_64::X86_64JNIMacroAssembler* assembler) {
   assembler->IncreaseFrameSize(0U);
   assembler->IncreaseFrameSize(kStackAlignment);
   assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -1586,12 +1645,13 @@
   return str.str();
 }
 
-TEST_F(AssemblerX86_64Test, IncreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, IncreaseFrame) {
   DriverFn(&increaseframe_test_fn, "IncreaseFrame");
 }
 
-std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
-                                  x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(
+    JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+    x86_64::X86_64JNIMacroAssembler* assembler) {
   assembler->DecreaseFrameSize(0U);
   assembler->DecreaseFrameSize(kStackAlignment);
   assembler->DecreaseFrameSize(10 * kStackAlignment);
@@ -1605,47 +1665,8 @@
   return str.str();
 }
 
-TEST_F(AssemblerX86_64Test, DecreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, DecreaseFrame) {
   DriverFn(&decreaseframe_test_fn, "DecreaseFrame");
 }
 
-TEST_F(AssemblerX86_64Test, MovzxbRegs) {
-  DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
-}
-
-TEST_F(AssemblerX86_64Test, MovsxbRegs) {
-  DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
-}
-
-TEST_F(AssemblerX86_64Test, Repnescasw) {
-  GetAssembler()->repne_scasw();
-  const char* expected = "repne scasw\n";
-  DriverStr(expected, "Repnescasw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsw) {
-  GetAssembler()->repe_cmpsw();
-  const char* expected = "repe cmpsw\n";
-  DriverStr(expected, "Repecmpsw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsl) {
-  GetAssembler()->repe_cmpsl();
-  const char* expected = "repe cmpsl\n";
-  DriverStr(expected, "Repecmpsl");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsq) {
-  GetAssembler()->repe_cmpsq();
-  const char* expected = "repe cmpsq\n";
-  DriverStr(expected, "Repecmpsq");
-}
-
-TEST_F(AssemblerX86_64Test, Cmpb) {
-  GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
-                       x86_64::Immediate(0));
-  const char* expected = "cmpb $0, 128(%RDI)\n";
-  DriverStr(expected, "cmpb");
-}
-
 }  // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
new file mode 100644
index 0000000..47fb59b
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86_64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86_64 {
+
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+  return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 8;
+
+#define __ asm_.
+
+void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
+                                         ManagedRegister method_reg,
+                                         ArrayRef<const ManagedRegister> spill_regs,
+                                         const ManagedRegisterEntrySpills& entry_spills) {
+  DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
+  cfi().SetCurrentCFAOffset(8);  // Return address on stack.
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  int gpr_count = 0;
+  for (int i = spill_regs.size() - 1; i >= 0; --i) {
+    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+    if (spill.IsCpuRegister()) {
+      __ pushq(spill.AsCpuRegister());
+      gpr_count++;
+      cfi().AdjustCFAOffset(kFramePointerSize);
+      cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
+    }
+  }
+  // return address then method on stack.
+  int64_t rest_of_frame = static_cast<int64_t>(frame_size)
+                          - (gpr_count * kFramePointerSize)
+                          - kFramePointerSize /*return address*/;
+  __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
+  cfi().AdjustCFAOffset(rest_of_frame);
+
+  // spill xmms
+  int64_t offset = rest_of_frame;
+  for (int i = spill_regs.size() - 1; i >= 0; --i) {
+    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+    if (spill.IsXmmRegister()) {
+      offset -= sizeof(double);
+      __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
+      cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
+    }
+  }
+
+  static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+                "Unexpected frame pointer size.");
+
+  __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    ManagedRegisterSpill spill = entry_spills.at(i);
+    if (spill.AsX86_64().IsCpuRegister()) {
+      if (spill.getSize() == 8) {
+        __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+                spill.AsX86_64().AsCpuRegister());
+      } else {
+        CHECK_EQ(spill.getSize(), 4);
+        __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+                spill.AsX86_64().AsCpuRegister());
+      }
+    } else {
+      if (spill.getSize() == 8) {
+        __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+                 spill.AsX86_64().AsXmmRegister());
+      } else {
+        CHECK_EQ(spill.getSize(), 4);
+        __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+                 spill.AsX86_64().AsXmmRegister());
+      }
+    }
+  }
+}
+
+void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+                                          ArrayRef<const ManagedRegister> spill_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  cfi().RememberState();
+  int gpr_count = 0;
+  // unspill xmms
+  int64_t offset = static_cast<int64_t>(frame_size)
+      - (spill_regs.size() * kFramePointerSize)
+      - 2 * kFramePointerSize;
+  for (size_t i = 0; i < spill_regs.size(); ++i) {
+    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+    if (spill.IsXmmRegister()) {
+      offset += sizeof(double);
+      __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
+      cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
+    } else {
+      gpr_count++;
+    }
+  }
+  int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
+  __ addq(CpuRegister(RSP), Immediate(adjust));
+  cfi().AdjustCFAOffset(-adjust);
+  for (size_t i = 0; i < spill_regs.size(); ++i) {
+    x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+    if (spill.IsCpuRegister()) {
+      __ popq(spill.AsCpuRegister());
+      cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+      cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
+    }
+  }
+  __ ret();
+  // The CFI should be restored for any code that follows the exit block.
+  cfi().RestoreState();
+  cfi().DefCFAOffset(frame_size);
+}
+
+void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+  cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  assembler->addq(CpuRegister(RSP), Immediate(adjust));
+  assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+  DecreaseFrameSizeImpl(adjust, &asm_);
+}
+
+void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+  X86_64ManagedRegister src = msrc.AsX86_64();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCpuRegister()) {
+    if (size == 4) {
+      CHECK_EQ(4u, size);
+      __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+    } else {
+      CHECK_EQ(8u, size);
+      __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+    }
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(0u, size);
+    __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
+    __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
+            src.AsRegisterPairHigh());
+  } else if (src.IsX87Register()) {
+    if (size == 4) {
+      __ fstps(Address(CpuRegister(RSP), offs));
+    } else {
+      __ fstpl(Address(CpuRegister(RSP), offs));
+    }
+  } else {
+    CHECK(src.IsXmmRegister());
+    if (size == 4) {
+      __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+    } else {
+      __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+    }
+  }
+}
+
+void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  X86_64ManagedRegister src = msrc.AsX86_64();
+  CHECK(src.IsCpuRegister());
+  __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  X86_64ManagedRegister src = msrc.AsX86_64();
+  CHECK(src.IsCpuRegister());
+  __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+                                                    uint32_t imm,
+                                                    ManagedRegister) {
+  __ movl(Address(CpuRegister(RSP), dest), Immediate(imm));  // TODO(64) movq?
+}
+
+void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+                                                       FrameOffset fr_offs,
+                                                       ManagedRegister mscratch) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  CHECK(scratch.IsCpuRegister());
+  __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
+  __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
+  __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+}
+
+void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+                                            ManagedRegister /*src*/,
+                                            FrameOffset /*in_off*/,
+                                            ManagedRegister /*scratch*/) {
+  UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
+}
+
+void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    if (size == 4) {
+      CHECK_EQ(4u, size);
+      __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+    } else {
+      CHECK_EQ(8u, size);
+      __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+    }
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(0u, size);
+    __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
+    __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      __ flds(Address(CpuRegister(RSP), src));
+    } else {
+      __ fldl(Address(CpuRegister(RSP), src));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+    } else {
+      __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+    }
+  }
+}
+
+void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
+                                             ThreadOffset64 src, size_t size) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      __ gs()->flds(Address::Absolute(src, true));
+    } else {
+      __ gs()->fldl(Address::Absolute(src, true));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
+    } else {
+      __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
+    }
+  }
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  CHECK(dest.IsCpuRegister());
+  __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
+                                      ManagedRegister base,
+                                      MemberOffset offs,
+                                      bool unpoison_reference) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  __ movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+  if (unpoison_reference) {
+    __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+  }
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  __ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  CHECK(dest.IsCpuRegister());
+  __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
+}
+
+void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+  X86_64ManagedRegister reg = mreg.AsX86_64();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsCpuRegister()) << reg;
+  if (size == 1) {
+    __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+  } else {
+    __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+  }
+}
+
+void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+  X86_64ManagedRegister reg = mreg.AsX86_64();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsCpuRegister()) << reg;
+  if (size == 1) {
+    __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+  } else {
+    __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+  }
+}
+
+void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+  X86_64ManagedRegister dest = mdest.AsX86_64();
+  X86_64ManagedRegister src = msrc.AsX86_64();
+  if (!dest.Equals(src)) {
+    if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+      __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
+    } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+      // Pass via stack and pop X87 register
+      __ subl(CpuRegister(RSP), Immediate(16));
+      if (size == 4) {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        __ fstps(Address(CpuRegister(RSP), 0));
+        __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+      } else {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        __ fstpl(Address(CpuRegister(RSP), 0));
+        __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+      }
+      __ addq(CpuRegister(RSP), Immediate(16));
+    } else {
+      // TODO: x87, SSE
+      UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+    }
+  }
+}
+
+void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  CHECK(scratch.IsCpuRegister());
+  __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
+  __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                                   ThreadOffset64 thr_offs,
+                                                   ManagedRegister mscratch) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  CHECK(scratch.IsCpuRegister());
+  __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
+  Store(fr_offs, scratch, 8);
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+                                                 FrameOffset fr_offs,
+                                                 ManagedRegister mscratch) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  CHECK(scratch.IsCpuRegister());
+  Load(scratch, fr_offs, 8);
+  __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+                                   FrameOffset src,
+                                   ManagedRegister mscratch,
+                                   size_t size) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  if (scratch.IsCpuRegister() && size == 8) {
+    Load(scratch, src, 4);
+    Store(dest, scratch, 4);
+    Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+    Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+  } else {
+    Load(scratch, src, size);
+    Store(dest, scratch, size);
+  }
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+                                   ManagedRegister /*src_base*/,
+                                   Offset /*src_offset*/,
+                                   ManagedRegister /*scratch*/,
+                                   size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
+                                   Offset dest_offset,
+                                   FrameOffset src,
+                                   ManagedRegister scratch,
+                                   size_t size) {
+  CHECK(scratch.IsNoRegister());
+  CHECK_EQ(size, 4u);
+  __ pushq(Address(CpuRegister(RSP), src));
+  __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+                                   FrameOffset src_base,
+                                   Offset src_offset,
+                                   ManagedRegister mscratch,
+                                   size_t size) {
+  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  __ movq(scratch, Address(CpuRegister(RSP), src_base));
+  __ movq(scratch, Address(scratch, src_offset));
+  __ movq(Address(CpuRegister(RSP), dest), scratch);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
+                                   Offset dest_offset,
+                                   ManagedRegister src,
+                                   Offset src_offset,
+                                   ManagedRegister scratch,
+                                   size_t size) {
+  CHECK_EQ(size, 4u);
+  CHECK(scratch.IsNoRegister());
+  __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
+  __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+                                   Offset dest_offset,
+                                   FrameOffset src,
+                                   Offset src_offset,
+                                   ManagedRegister mscratch,
+                                   size_t size) {
+  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  CHECK_EQ(dest.Int32Value(), src.Int32Value());
+  __ movq(scratch, Address(CpuRegister(RSP), src));
+  __ pushq(Address(scratch, src_offset));
+  __ popq(Address(scratch, dest_offset));
+}
+
+void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+  __ mfence();
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+                                                     FrameOffset handle_scope_offset,
+                                                     ManagedRegister min_reg,
+                                                     bool null_allowed) {
+  X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+  X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+  if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
+    // Use out_reg as indicator of null.
+    in_reg = out_reg;
+    // TODO: movzwl
+    __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+  }
+  CHECK(in_reg.IsCpuRegister());
+  CHECK(out_reg.IsCpuRegister());
+  VerifyObject(in_reg, null_allowed);
+  if (null_allowed) {
+    Label null_arg;
+    if (!out_reg.Equals(in_reg)) {
+      __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+    }
+    __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+    __ j(kZero, &null_arg);
+    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ Bind(&null_arg);
+  } else {
+    __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+  }
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+                                                     FrameOffset handle_scope_offset,
+                                                     ManagedRegister mscratch,
+                                                     bool null_allowed) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  CHECK(scratch.IsCpuRegister());
+  if (null_allowed) {
+    Label null_arg;
+    __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+    __ j(kZero, &null_arg);
+    __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+    __ Bind(&null_arg);
+  } else {
+    __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+  }
+  Store(out_off, scratch, 8);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+                                                           ManagedRegister min_reg) {
+  X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+  X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+  CHECK(out_reg.IsCpuRegister());
+  CHECK(in_reg.IsCpuRegister());
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+  }
+  __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+  __ j(kZero, &null_arg);
+  __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+  __ Bind(&null_arg);
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+  X86_64ManagedRegister base = mbase.AsX86_64();
+  CHECK(base.IsCpuRegister());
+  __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+  // TODO: place reference map on call
+}
+
+void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+  CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+  __ movq(scratch, Address(CpuRegister(RSP), base));
+  __ call(Address(scratch, offset));
+}
+
+void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
+  __ gs()->call(Address::Absolute(offset, true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+  __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+                Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
+  X86_64ManagedRegister scratch = mscratch.AsX86_64();
+  __ gs()->movq(scratch.AsCpuRegister(),
+                Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+  __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86_64ExceptionSlowPath FINAL : public SlowPath {
+ public:
+  explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+  virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+  const size_t stack_adjust_;
+};
+
+void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+  X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+  __ GetBuffer()->EnqueueSlowPath(slow);
+  __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+  __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
+  X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  // Note: the return value is dead
+  if (stack_adjust_ != 0) {  // Fix up the frame.
+    DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
+  }
+  // Pass exception as argument in RDI
+  __ gs()->movq(CpuRegister(RDI),
+                Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+  __ gs()->call(
+      Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
+  // this call should never return
+  __ int3();
+#undef __
+}
+
+}  // namespace x86_64
+}  // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
new file mode 100644
index 0000000..cc4e57c
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+
+#include <vector>
+
+#include "assembler_x86_64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+                                                                  PointerSize::k64> {
+ public:
+  explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
+      : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+  virtual ~X86_64JNIMacroAssembler() {}
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  void BuildFrame(size_t frame_size,
+                  ManagedRegister method_reg,
+                  ArrayRef<const ManagedRegister> callee_save_regs,
+                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+  // Emit code that will remove an activation from the stack
+  void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+      OVERRIDE;
+
+  void IncreaseFrameSize(size_t adjust) OVERRIDE;
+  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+  // Store routines
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+                                FrameOffset fr_offs,
+                                ManagedRegister scratch) OVERRIDE;
+
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+
+  void StoreSpanning(FrameOffset dest,
+                     ManagedRegister src,
+                     FrameOffset in_off,
+                     ManagedRegister scratch) OVERRIDE;
+
+  // Load routines
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+
+  void LoadRef(ManagedRegister dest, FrameOffset  src) OVERRIDE;
+
+  void LoadRef(ManagedRegister dest,
+               ManagedRegister base,
+               MemberOffset offs,
+               bool unpoison_reference) OVERRIDE;
+
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+  // Copying routines
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+  void CopyRawPtrFromThread(FrameOffset fr_offs,
+                            ThreadOffset64 thr_offs,
+                            ManagedRegister scratch) OVERRIDE;
+
+  void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+      OVERRIDE;
+
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+  void Copy(FrameOffset dest,
+            ManagedRegister src_base,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+
+  void Copy(ManagedRegister dest_base,
+            Offset dest_offset,
+            FrameOffset src,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+
+  void Copy(FrameOffset dest,
+            FrameOffset src_base,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+
+  void Copy(ManagedRegister dest,
+            Offset dest_offset,
+            ManagedRegister src,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+
+  void Copy(FrameOffset dest,
+            Offset dest_offset,
+            FrameOffset src,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+
+  void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+  // Sign extension
+  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+  // Zero extension
+  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+  // Exploit fast access in managed code to Thread::Current()
+  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the handle scope entry to see if the value is
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister in_reg,
+                              bool null_allowed) OVERRIDE;
+
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
+  // value is null and null_allowed.
+  void CreateHandleScopeEntry(FrameOffset out_off,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister scratch,
+                              bool null_allowed) OVERRIDE;
+
+  // src holds a handle scope entry (Object**) load this into dst
+  virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+  // Call to address held at [base+offset]
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
+};
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_