ART: Fix GenSelect for ARM64

Add CSINV and replace CSNEG in GenSelect.
Some tests were failing in 083-complier-regression as CSNEG
was used instead of CSINV. CSNEG on xzr yields 0, whereas
CSINV negates the bits and yields -1, which was the intention.

Change-Id: I60557e34483f98310f7d33f18d8db203fba6e78f
Signed-off-by: Stuart Monteith <stuart.monteith@arm.com>
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 5077d11..7490646 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -245,6 +245,7 @@
   kA64Cmp3RdT,       // cmp [01110001] shift[23-22] imm_12[21-10] rn[9-5] [11111].
   kA64Csel4rrrc,     // csel[s0011010100] rm[20-16] cond[15-12] [00] rn[9-5] rd[4-0].
   kA64Csinc4rrrc,    // csinc [s0011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
+  kA64Csinv4rrrc,    // csinv [s1011010100] rm[20-16] cond[15-12] [00] rn[9-5] rd[4-0].
   kA64Csneg4rrrc,    // csneg [s1011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
   kA64Dmb1B,         // dmb [11010101000000110011] CRm[11-8] [10111111].
   kA64Eor3Rrl,       // eor [s10100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index e10f7cf..3bfe9a1 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -204,6 +204,10 @@
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
                  kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
                  "csinc", "!0r, !1r, !2r, !3c", kFixupNone),
+    ENCODING_MAP(WIDE(kA64Csinv4rrrc), SF_VARIANTS(0x5a800000),
+                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
+                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
+                 "csinv", "!0r, !1r, !2r, !3c", kFixupNone),
     ENCODING_MAP(WIDE(kA64Csneg4rrrc), SF_VARIANTS(0x5a800400),
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
                  kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 3ee3e2e..a7ca685 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -131,7 +131,7 @@
       }
 
       left_op = right_op = zero_reg;
-      opcode = is_wide ? WIDE(kA64Csneg4rrrc) : kA64Csneg4rrrc;
+      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
     } else if (true_val == 0 || false_val == 0) {
       // Csel half cheap based on wzr.
       rl_result = EvalLoc(rl_dest, result_reg_class, true);
@@ -167,7 +167,7 @@
       LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val);
       left_op = rl_result.reg.GetReg();
       right_op = zero_reg;
-      opcode = is_wide ? WIDE(kA64Csneg4rrrc) : kA64Csneg4rrrc;
+      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
     } else {
       // Csel. The rest. Use rl_result and a temp.
       // TODO: To minimize the constants being loaded, check whether one can be inexpensively