x86_64: Load double constant using core register

This changes the double const loading scheme for x86_64 in order to
reduce number of memory accesses.

Change-Id: If6ee34b890287ed616ca4ea5d1c13f06a06293a9
Signed-off-by: Alexei Zavjalov <alexei.zavjalov@intel.com>
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index a77d79e..a48613f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -565,6 +565,7 @@
     bool is_fp = r_dest.IsFloat();
     // TODO: clean this up once we fully recognize 64-bit storage containers.
     if (is_fp) {
+      DCHECK(r_dest.IsDouble());
       if (value == 0) {
         return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
       } else if (base_of_code_ != nullptr) {
@@ -594,16 +595,23 @@
         Clobber(rl_method.reg);
         store_method_addr_used_ = true;
       } else {
-        if (val_lo == 0) {
-          res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+        if (r_dest.IsPair()) {
+          if (val_lo == 0) {
+            res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+          } else {
+            res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
+          }
+          if (val_hi != 0) {
+            RegStorage r_dest_hi = AllocTempDouble();
+            LoadConstantNoClobber(r_dest_hi, val_hi);
+            NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
+            FreeTemp(r_dest_hi);
+          }
         } else {
-          res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
-        }
-        if (val_hi != 0) {
-          RegStorage r_dest_hi = AllocTempDouble();
-          LoadConstantNoClobber(r_dest_hi, val_hi);
-          NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
-          FreeTemp(r_dest_hi);
+          RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
+          res = LoadConstantWide(r_temp, value);
+          OpRegCopyWide(r_dest, r_temp);
+          FreeTemp(r_temp);
         }
       }
     } else {
@@ -1008,8 +1016,8 @@
 }
 
 void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
-  // If this is a double literal, we will want it in the literal pool.
-  if (use.is_const) {
+  // If this is a double literal, we will want it in the literal pool on 32b platforms.
+  if (use.is_const && !cu_->target64) {
     store_method_addr_ = true;
   }
 }
@@ -1043,12 +1051,18 @@
 }
 
 void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+  // For now this is only actual for x86-32.
+  if (cu_->target64) {
+    return;
+  }
+
   uint32_t index = mir->dalvikInsn.vB;
   if (!(mir->optimization_flags & MIR_INLINED)) {
     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+    DexFileMethodInliner* method_inliner =
+      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
     InlineMethod method;
-    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-        ->IsIntrinsic(index, &method)) {
+    if (method_inliner->IsIntrinsic(index, &method)) {
       switch (method.opcode) {
         case kIntrinsicAbsDouble:
         case kIntrinsicMinMaxDouble: