Update counting VR for promotion

For 64-bit it makes sense to compute VR uses together for
int and long because core reg is shared.

Change-Id: Ie8676ece12c928d090da2465dfb4de4e91411920
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 43db24c..d4b0de7 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -198,6 +198,13 @@
     RegStorage AllocPreservedDouble(int s_reg);
     RegStorage AllocPreservedSingle(int s_reg);
 
+    bool WideGPRsAreAliases() OVERRIDE {
+      return false;  // Wide GPRs are formed by pairing.
+    }
+    bool WideFPRsAreAliases() OVERRIDE {
+      return false;  // Wide FPRs are formed by pairing.
+    }
+
   private:
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
                                   ConditionCode ccode);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 7d75da9..060509b 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -298,6 +298,13 @@
                            bool skip_this);
     InToRegStorageMapping in_to_reg_storage_mapping_;
 
+    bool WideGPRsAreAliases() OVERRIDE {
+      return true;  // 64b architecture.
+    }
+    bool WideFPRsAreAliases() OVERRIDE {
+      return true;  // 64b architecture.
+    }
+
   private:
     /**
      * @brief Given register xNN (dNN), returns register wNN (sNN).
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 025f97a..2c33377 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -192,6 +192,13 @@
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
 
+    bool WideGPRsAreAliases() OVERRIDE {
+      return false;  // Wide GPRs are formed by pairing.
+    }
+    bool WideFPRsAreAliases() OVERRIDE {
+      return false;  // Wide FPRs are formed by pairing.
+    }
+
   private:
     void ConvertShortToLongBranch(LIR* lir);
     RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 87509b6..95781fb 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1635,6 +1635,17 @@
      */
     virtual void GenConst(RegLocation rl_dest, int value);
 
+    /**
+     * Returns true iff wide GPRs are just different views on the same physical register.
+     */
+    virtual bool WideGPRsAreAliases() = 0;
+
+    /**
+     * Returns true iff wide FPRs are just different views on the same physical register.
+     */
+    virtual bool WideFPRsAreAliases() = 0;
+
+
     enum class WidenessCheck {  // private
       kIgnoreWide,
       kCheckWide,
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e8fc919..fa1c36e 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1157,20 +1157,23 @@
     int use_count = mir_graph_->GetUseCount(i);
     if (loc.fp) {
       if (loc.wide) {
-        // Treat doubles as a unit, using upper half of fp_counts array.
-        counts[p_map_idx + num_regs].count += use_count;
+        if (WideFPRsAreAliases()) {
+          // Floats and doubles can be counted together.
+          counts[p_map_idx].count += use_count;
+        } else {
+          // Treat doubles as a unit, using upper half of fp_counts array.
+          counts[p_map_idx + num_regs].count += use_count;
+        }
         i++;
       } else {
         counts[p_map_idx].count += use_count;
       }
     } else if (!IsInexpensiveConstant(loc)) {
-      if (loc.wide && cu_->target64) {
-        // Treat long as a unit, using upper half of core_counts array.
-        counts[p_map_idx + num_regs].count += use_count;
+      if (loc.wide && WideGPRsAreAliases()) {
+        // Longs and doubles can be counted together.
         i++;
-      } else {
-        counts[p_map_idx].count += use_count;
       }
+      counts[p_map_idx].count += use_count;
     }
   }
 }
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index b0c54e8..6655a59 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -933,6 +933,13 @@
 
   InToRegStorageMapping in_to_reg_storage_mapping_;
 
+  bool WideGPRsAreAliases() OVERRIDE {
+    return cu_->target64;  // On 64b, we have 64b GPRs.
+  }
+  bool WideFPRsAreAliases() OVERRIDE {
+    return true;  // xmm registers have 64b views even on x86.
+  }
+
  private:
   // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
   int num_reserved_vector_regs_;