ARM64: Move from FPRegister to VRegister based API

VIXL has had FPRegister as an alias for VRegister for backward
compatibility. In the latest upstream VIXL the alias has been removed and all
FPRegister based API has became VRegister based. As AOSP VIXL is being
updated to the latest upstream VIXL all uses of FPRegister based API
must be replaced with VRegister based API.
This CL moves ART from FPRegister based API to VRegister based API.

Test: test.py --host --optimizing --jit --gtest
Test: test.py --target --optimizing --jit
Test: run-gtests.sh
Change-Id: I12541c16d0557835ea19c8667ae18c6601359b05
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 3a2988f..10397e8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1183,7 +1183,7 @@
 CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
   DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
                                          GetNumberOfFloatingPointRegisters()));
-  return CPURegList(CPURegister::kFPRegister, kDRegSize,
+  return CPURegList(CPURegister::kVRegister, kDRegSize,
                     fpu_spill_mask_);
 }
 
@@ -1316,10 +1316,10 @@
   } else if (constant->IsNullConstant()) {
     __ Mov(Register(destination), 0);
   } else if (constant->IsFloatConstant()) {
-    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
+    __ Fmov(VRegister(destination), constant->AsFloatConstant()->GetValue());
   } else {
     DCHECK(constant->IsDoubleConstant());
-    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
+    __ Fmov(VRegister(destination), constant->AsDoubleConstant()->GetValue());
   }
 }
 
@@ -1343,7 +1343,7 @@
 static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssembler* masm,
                                                     vixl::aarch64::UseScratchRegisterScope* temps,
                                                     int size_in_bits) {
-  return masm->GetScratchFPRegisterList()->IsEmpty()
+  return masm->GetScratchVRegisterList()->IsEmpty()
       ? CPURegister(temps->AcquireRegisterOfSize(size_in_bits))
       : CPURegister(temps->AcquireVRegisterOfSize(size_in_bits));
 }
@@ -1411,7 +1411,7 @@
         if (GetGraph()->HasSIMD()) {
           __ Mov(QRegisterFrom(destination), QRegisterFrom(source));
         } else {
-          __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
+          __ Fmov(VRegister(dst), FPRegisterFrom(source, dst_type));
         }
       }
     }
@@ -1421,14 +1421,14 @@
     } else {
       DCHECK(source.IsSIMDStackSlot());
       UseScratchRegisterScope temps(GetVIXLAssembler());
-      if (GetVIXLAssembler()->GetScratchFPRegisterList()->IsEmpty()) {
+      if (GetVIXLAssembler()->GetScratchVRegisterList()->IsEmpty()) {
         Register temp = temps.AcquireX();
         __ Ldr(temp, MemOperand(sp, source.GetStackIndex()));
         __ Str(temp, MemOperand(sp, destination.GetStackIndex()));
         __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize));
         __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize));
       } else {
-        FPRegister temp = temps.AcquireVRegisterOfSize(kQRegSize);
+        VRegister temp = temps.AcquireVRegisterOfSize(kQRegSize);
         __ Ldr(temp, StackOperandFrom(source));
         __ Str(temp, StackOperandFrom(destination));
       }
@@ -1602,7 +1602,7 @@
             MaybeRecordImplicitNullCheck(instruction);
           }
         }
-        __ Fmov(FPRegister(dst), temp);
+        __ Fmov(VRegister(dst), temp);
         break;
       }
       case DataType::Type::kUint32:
@@ -1702,7 +1702,7 @@
       } else {
         DCHECK(src.IsFPRegister());
         temp_src = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
-        __ Fmov(temp_src, FPRegister(src));
+        __ Fmov(temp_src, VRegister(src));
       }
       {
         ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -2057,9 +2057,9 @@
     }
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64: {
-      FPRegister dst = OutputFPRegister(instr);
-      FPRegister lhs = InputFPRegisterAt(instr, 0);
-      FPRegister rhs = InputFPRegisterAt(instr, 1);
+      VRegister dst = OutputFPRegister(instr);
+      VRegister lhs = InputFPRegisterAt(instr, 0);
+      VRegister rhs = InputFPRegisterAt(instr, 1);
       if (instr->IsAdd()) {
         __ Fadd(dst, lhs, rhs);
       } else if (instr->IsSub()) {
@@ -2805,7 +2805,7 @@
 }
 
 void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) {
-  FPRegister lhs_reg = InputFPRegisterAt(instruction, 0);
+  VRegister lhs_reg = InputFPRegisterAt(instruction, 0);
   Location rhs_loc = instruction->GetLocations()->InAt(1);
   if (rhs_loc.IsConstant()) {
     // 0.0 is the only immediate that can be encoded directly in
@@ -5428,8 +5428,8 @@
     }
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64: {
-      FPRegister in_reg = InputFPRegisterAt(abs, 0);
-      FPRegister out_reg = OutputFPRegister(abs);
+      VRegister in_reg = InputFPRegisterAt(abs, 0);
+      VRegister out_reg = OutputFPRegister(abs);
       __ Fabs(out_reg, in_reg);
       break;
     }
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1a9b700..a669094 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -63,7 +63,7 @@
   vixl::aarch64::x7
 };
 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-static const vixl::aarch64::FPRegister kParameterFPRegisters[] = {
+static const vixl::aarch64::VRegister kParameterFPRegisters[] = {
   vixl::aarch64::d0,
   vixl::aarch64::d1,
   vixl::aarch64::d2,
@@ -111,7 +111,7 @@
          ? vixl::aarch64::x21.GetCode()
          : vixl::aarch64::x20.GetCode()),
      vixl::aarch64::x30.GetCode());
-const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kFPRegister,
+const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister,
                                                           vixl::aarch64::kDRegSize,
                                                           vixl::aarch64::d8.GetCode(),
                                                           vixl::aarch64::d15.GetCode());
@@ -162,7 +162,7 @@
       vixl::aarch64::x7 };
 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
-static const vixl::aarch64::FPRegister kRuntimeParameterFpuRegisters[] =
+static const vixl::aarch64::VRegister kRuntimeParameterFpuRegisters[] =
     { vixl::aarch64::d0,
       vixl::aarch64::d1,
       vixl::aarch64::d2,
@@ -175,7 +175,7 @@
     arraysize(kRuntimeParameterCoreRegisters);
 
 class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
-                                                                vixl::aarch64::FPRegister> {
+                                                                vixl::aarch64::VRegister> {
  public:
   static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
 
@@ -193,7 +193,7 @@
 };
 
 class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
-                                                            vixl::aarch64::FPRegister> {
+                                                            vixl::aarch64::VRegister> {
  public:
   InvokeDexCallingConvention()
       : CallingConvention(kParameterCoreRegisters,
@@ -480,7 +480,7 @@
   // requirements, etc.). This also facilitates our task as all other registers
   // can easily be mapped via to or from their type and index or code.
   static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
-  static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
+  static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfVRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
   void DumpCoreRegister(std::ostream& stream, int reg) const override;
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 5556f16..9c80f32 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -87,36 +87,36 @@
                       instr->InputAt(input_index)->GetType());
 }
 
-inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister DRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetDRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister QRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister QRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetQRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetQRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister VRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister VRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetVRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetVRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister SRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetSRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, DataType::Type type) {
+inline vixl::aarch64::VRegister FPRegisterFrom(Location location, DataType::Type type) {
   DCHECK(DataType::IsFloatingPointType(type)) << type;
   return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location);
 }
 
-inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
+inline vixl::aarch64::VRegister OutputFPRegister(HInstruction* instr) {
   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
 }
 
-inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::VRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
                         instr->InputAt(input_index)->GetType());
 }
@@ -201,7 +201,7 @@
   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
 }
 
-inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
+inline Location LocationFrom(const vixl::aarch64::VRegister& fpreg) {
   return Location::FpuRegisterLocation(fpreg.GetCode());
 }
 
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 6a666c9..c48aaf5 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -90,8 +90,8 @@
     Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type);
     __ Mov(trg_reg, res_reg, kDiscardForSameWReg);
   } else {
-    FPRegister trg_reg = FPRegisterFrom(trg, type);
-    FPRegister res_reg = FPRegisterFrom(ARM64ReturnLocation(type), type);
+    VRegister trg_reg = FPRegisterFrom(trg, type);
+    VRegister res_reg = FPRegisterFrom(ARM64ReturnLocation(type), type);
     __ Fmov(trg_reg, res_reg);
   }
 }
@@ -435,7 +435,7 @@
 
   Register src = InputRegisterAt(instr, 0);
   Register dst = RegisterFrom(instr->GetLocations()->Out(), type);
-  FPRegister fpr = (type == DataType::Type::kInt64) ? temps.AcquireD() : temps.AcquireS();
+  VRegister fpr = (type == DataType::Type::kInt64) ? temps.AcquireD() : temps.AcquireS();
 
   __ Fmov(fpr, src);
   __ Cnt(fpr.V8B(), fpr.V8B());
@@ -591,8 +591,8 @@
   // For example, FCVTPS(-1.9) = -1 and FCVTPS(1.1) = 2.
   // If we were using this instruction, for most inputs, more handling code would be needed.
   LocationSummary* l = invoke->GetLocations();
-  FPRegister in_reg = is_double ? DRegisterFrom(l->InAt(0)) : SRegisterFrom(l->InAt(0));
-  FPRegister tmp_fp = is_double ? DRegisterFrom(l->GetTemp(0)) : SRegisterFrom(l->GetTemp(0));
+  VRegister in_reg = is_double ? DRegisterFrom(l->InAt(0)) : SRegisterFrom(l->InAt(0));
+  VRegister tmp_fp = is_double ? DRegisterFrom(l->GetTemp(0)) : SRegisterFrom(l->GetTemp(0));
   Register out_reg = is_double ? XRegisterFrom(l->Out()) : WRegisterFrom(l->Out());
   vixl::aarch64::Label done;
 
@@ -2015,7 +2015,7 @@
 
   if (mirror::kUseStringCompression) {
     // For compressed strings, acquire a SIMD temporary register.
-    FPRegister vtmp1 = temps.AcquireVRegisterOfSize(kQRegSize);
+    VRegister vtmp1 = temps.AcquireVRegisterOfSize(kQRegSize);
     const size_t c_char_size = DataType::Size(DataType::Type::kInt8);
     DCHECK_EQ(c_char_size, 1u);
     __ Bind(&compressed_string_preloop);
@@ -3210,8 +3210,8 @@
   MacroAssembler* masm = GetVIXLAssembler();
   UseScratchRegisterScope scratch_scope(masm);
   Register bits = InputRegisterAt(invoke, 0);
-  FPRegister out = SRegisterFrom(invoke->GetLocations()->Out());
-  FPRegister half = scratch_scope.AcquireH();
+  VRegister out = SRegisterFrom(invoke->GetLocations()->Out());
+  VRegister half = scratch_scope.AcquireH();
   __ Fmov(half, bits);  // ARMv8.2
   __ Fcvt(out, half);
 }
@@ -3232,8 +3232,8 @@
   DCHECK(codegen_->GetInstructionSetFeatures().HasFP16());
   MacroAssembler* masm = GetVIXLAssembler();
   UseScratchRegisterScope scratch_scope(masm);
-  FPRegister in = SRegisterFrom(invoke->GetLocations()->InAt(0));
-  FPRegister half = scratch_scope.AcquireH();
+  VRegister in = SRegisterFrom(invoke->GetLocations()->InAt(0));
+  VRegister half = scratch_scope.AcquireH();
   Register out = WRegisterFrom(invoke->GetLocations()->Out());
   __ Fcvt(half, in);
   __ Fmov(out, half);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 594c6b4..fe2f176 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -150,12 +150,12 @@
     return vixl::aarch64::Register::GetWRegFromCode(code);
   }
 
-  static vixl::aarch64::FPRegister reg_d(int code) {
-    return vixl::aarch64::FPRegister::GetDRegFromCode(code);
+  static vixl::aarch64::VRegister reg_d(int code) {
+    return vixl::aarch64::VRegister::GetDRegFromCode(code);
   }
 
-  static vixl::aarch64::FPRegister reg_s(int code) {
-    return vixl::aarch64::FPRegister::GetSRegFromCode(code);
+  static vixl::aarch64::VRegister reg_s(int code) {
+    return vixl::aarch64::VRegister::GetSRegFromCode(code);
   }
 
  private:
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index d6ce033..0eab49f 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -689,7 +689,7 @@
                                         const ManagedRegisterEntrySpills& entry_spills) {
   // Setup VIXL CPURegList for callee-saves.
   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kVRegister, kDRegSize, 0);
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {
@@ -745,7 +745,7 @@
                                          bool may_suspend) {
   // Setup VIXL CPURegList for callee-saves.
   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kVRegister, kDRegSize, 0);
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {