ART: Address issues with kIntrinsicMinMaxDouble for x86

This patch fixes the following issues:
- Makes sure that invoke-static/range is considered for analysis
path that decides whether base of code register is needed.
- Invalidates the code pointer register in intrinsic implementations
of min/max for FP since generated code has diamond shape and Quick
does not correctly handle control flow merges.
- Reverts the clobbering of base of code register from constant
loading path since it reduces performance in linear code that
needs the register.
- Ensures that no assumption is made on whether base of code register
exists in cases like 64-bit where rip register may be used.

Change-Id: I96463ae1197e5cfa2a8bd3571163b38fb338a340
Signed-off-by: Razvan A Lupusoru <razvan.a.lupusoru@intel.com>
Signed-off-by: Alexei Zavjalov <alexei.zavjalov@intel.com>
Signed-off-by: Haitao Feng <haitao.feng@intel.com>
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 2920fb6..21d1a5c 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -730,6 +730,25 @@
     // Handle NaN.
     branch_nan->target = NewLIR0(kPseudoTargetLabel);
     LoadConstantWide(rl_result.reg, INT64_C(0x7ff8000000000000));
+
+    // The base_of_code_ compiler temp is non-null when it is reserved
+    // for being able to do data accesses relative to method start.
+    if (base_of_code_ != nullptr) {
+      // Loading from the constant pool may have used base of code register.
+      // However, the code here generates logic in diamond shape and not all
+      // paths load base of code register. Therefore, we ensure it is clobbered so
+      // that the temp caching system does not believe it is live at merge point.
+      RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
+      if (rl_method.wide) {
+        rl_method = UpdateLocWide(rl_method);
+      } else {
+        rl_method = UpdateLoc(rl_method);
+      }
+      if (rl_method.location == kLocPhysReg) {
+        Clobber(rl_method.reg);
+      }
+    }
+
     LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
     // Handle Min/Max. Copy greater/lesser value from src2.
     branch_cond1->target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 022fd80..4f65a0f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -592,7 +592,6 @@
                            kDouble, kNotVolatile);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
-        Clobber(rl_method.reg);
         store_method_addr_used_ = true;
       } else {
         if (r_dest.IsPair()) {
@@ -1025,6 +1024,7 @@
       store_method_addr_ = true;
       break;
     case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
       AnalyzeInvokeStatic(opcode, bb, mir);
       break;
     default: