ART: ARM64: Improve FP IsInfinity intrinsic.

Improve instruction choice; the change brings 4.5%
perf improvement on a simple microbenchmark.

Test: test-art-target.
Test: 082-inline-execute.
Change-Id: I5117d9740caf788d7f0170d3f90a3631e6e57c1b
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index bdeb6a4..d88e034 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2829,22 +2829,25 @@
                           bool is64bit,
                           MacroAssembler* masm) {
   Operand infinity;
+  Operand tst_mask;
   Register out;
 
   if (is64bit) {
     infinity = kPositiveInfinityDouble;
+    tst_mask = MaskLeastSignificant<uint64_t>(63);
     out = XRegisterFrom(locations->Out());
   } else {
     infinity = kPositiveInfinityFloat;
+    tst_mask = MaskLeastSignificant<uint32_t>(31);
     out = WRegisterFrom(locations->Out());
   }
 
-  const Register zero = vixl::aarch64::Assembler::AppropriateZeroRegFor(out);
-
   MoveFPToInt(locations, is64bit, masm);
+  // Checks whether exponent bits are all 1 and fraction bits are all 0.
   __ Eor(out, out, infinity);
-  // We don't care about the sign bit, so shift left.
-  __ Cmp(zero, Operand(out, LSL, 1));
+  // TST bitmask is used to mask out the sign bit: either 0x7fffffff or 0x7fffffffffffffff
+  // depending on is64bit.
+  __ Tst(out, tst_mask);
   __ Cset(out, eq);
 }