AArch64: address some outstanding TODOs.

Fix comments in arm64_lir.h. Rename Arm* to A64* and replace FWIDE,
FUNWIDE, ... with WIDE, UNWIDE, ...

Change-Id: I4900902e28463ea5e00e34ea40ddfc15704c0bfa
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index d001dd6..ab71921 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -22,77 +22,75 @@
 namespace art {
 
 /*
- * TODO(Arm64): the comments below are outdated.
- *
  * Runtime register usage conventions.
  *
- * r0-r3: Argument registers in both Dalvik and C/C++ conventions.
- *        However, for Dalvik->Dalvik calls we'll pass the target's Method*
- *        pointer in r0 as a hidden arg0. Otherwise used as codegen scratch
- *        registers.
- * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit
- * r4   : (rA64_SUSPEND) is reserved (suspend check/debugger assist)
- * r5   : Callee save (promotion target)
- * r6   : Callee save (promotion target)
- * r7   : Callee save (promotion target)
- * r8   : Callee save (promotion target)
- * r9   : (rA64_SELF) is reserved (pointer to thread-local storage)
- * r10  : Callee save (promotion target)
- * r11  : Callee save (promotion target)
- * r12  : Scratch, may be trashed by linkage stubs
- * r13  : (sp) is reserved
- * r14  : (lr) is reserved
- * r15  : (pc) is reserved
+ * r0     : As in C/C++ w0 is 32-bit return register and x0 is 64-bit.
+ * r0-r7  : Argument registers in both Dalvik and C/C++ conventions.
+ *          However, for Dalvik->Dalvik calls we'll pass the target's Method*
+ *          pointer in x0 as a hidden arg0. Otherwise used as codegen scratch
+ *          registers.
+ * r8-r15 : Caller save registers (used as temporary registers).
+ * r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
+ *          the linker, by the trampolines and other stubs (the backend uses
+ *          these as temporary registers).
+ * r18    : (rxSELF) is reserved (pointer to thread-local storage).
+ * r19    : (rwSUSPEND) is reserved (suspend check/debugger assist).
+ * r20-r29: Callee save registers (promotion targets).
+ * r30    : (lr) is reserved (the link register).
+ * rsp    : (sp) is reserved (the stack pointer).
+ * rzr    : (zr) is reserved (the zero register).
  *
- * 5 core temps that codegen can use (r0, r1, r2, r3, r12)
- * 7 core registers that can be used for promotion
+ * 18 core temps that codegen can use (r0-r17).
+ * 10 core registers that can be used for promotion.
  *
- * Floating pointer registers
- * s0-s31
- * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31}
+ * Floating-point registers
+ * v0-v31
  *
- * s16-s31 (d8-d15) preserved across C calls
- * s0-s15 (d0-d7) trashed across C calls
+ * v0     : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
+ *          This is analogous to the C/C++ (hard-float) calling convention.
+ * v0-v7  : Floating-point argument registers in both Dalvik and C/C++ conventions.
+ *          Also used as temporary and codegen scratch registers.
  *
- * s0-s15/d0-d7 used as codegen temp/scratch
- * s16-s31/d8-d31 can be used for promotion.
+ * v0-v7 and v16-v31 : trashed across C calls.
+ * v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
  *
- * Calling convention
- *     o On a call to a Dalvik method, pass target's Method* in r0
- *     o r1-r3 will be used for up to the first 3 words of arguments
- *     o Arguments past the first 3 words will be placed in appropriate
+ * v16-v31: Used as codegen temp/scratch.
+ * v8-v15 : Can be used for promotion.
+ *
+ * Calling convention (Hard-float)
+ *     o On a call to a Dalvik method, pass target's Method* in x0
+ *     o r1-r7, v0-v7 will be used for the first 7+8 arguments
+ *     o Arguments which cannot be put in registers are placed in appropriate
  *       out slots by the caller.
- *     o If a 64-bit argument would span the register/memory argument
- *       boundary, it will instead be fully passed in the frame.
  *     o Maintain a 16-byte stack alignment
  *
  *  Stack frame diagram (stack grows down, higher addresses at top):
  *
- * +------------------------+
- * | IN[ins-1]              |  {Note: resides in caller's frame}
- * |       .                |
- * | IN[0]                  |
- * | caller's Method*       |
- * +========================+  {Note: start of callee's frame}
- * | spill region           |  {variable sized - will include lr if non-leaf.}
- * +------------------------+
- * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1]            |
- * | V[locals-2]            |
- * |      .                 |
- * |      .                 |
- * | V[1]                   |
- * | V[0]                   |
- * +------------------------+
- * |  0 to 3 words padding  |
- * +------------------------+
- * | OUT[outs-1]            |
- * | OUT[outs-2]            |
- * |       .                |
- * | OUT[0]                 |
- * | cur_method*            | <<== sp w/ 16-byte alignment
- * +========================+
+ * +--------------------------------------------+
+ * | IN[ins-1]                                  |  {Note: resides in caller's frame}
+ * |       .                                    |
+ * | IN[0]                                      |
+ * | caller's method (StackReference<ArtMethod>)|  {This is a compressed (4-bytes) reference}
+ * +============================================+  {Note: start of callee's frame}
+ * | spill region                               |  {variable sized - will include lr if non-leaf}
+ * +--------------------------------------------+
+ * |   ...filler word...                        |  {Note: used as 2nd word of V[locals-1] if long}
+ * +--------------------------------------------+
+ * | V[locals-1]                                |
+ * | V[locals-2]                                |
+ * |      .                                     |
+ * |      .                                     |
+ * | V[1]                                       |
+ * | V[0]                                       |
+ * +--------------------------------------------+
+ * |   0 to 3 words padding                     |
+ * +--------------------------------------------+
+ * | OUT[outs-1]                                |
+ * | OUT[outs-2]                                |
+ * |       .                                    |
+ * | OUT[0]                                     |
+ * | current method (StackReference<ArtMethod>) | <<== sp w/ 16-byte alignment
+ * +============================================+
  */
 
 // First FP callee save.
@@ -103,12 +101,12 @@
 #define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
 #define A64_REGSTORAGE_IS_SP_OR_ZR(rs) (((rs).GetRegNum() & 0x1f) == 0x1f)
 
-enum Arm64ResourceEncodingPos {
-  kArm64GPReg0   = 0,
-  kArm64RegLR    = 30,
-  kArm64RegSP    = 31,
-  kArm64FPReg0   = 32,
-  kArm64RegEnd   = 64,
+enum A64ResourceEncodingPos {
+  kA64GPReg0   = 0,
+  kA64RegLR    = 30,
+  kA64RegSP    = 31,
+  kA64FPReg0   = 32,
+  kA64RegEnd   = 64,
 };
 
 #define IS_SIGNED_IMM(size, value) \
@@ -186,15 +184,15 @@
 constexpr RegStorage rs_wLR(RegStorage::kValid | rwLR);
 
 // RegisterLocation templates return values (following the hard-float calling convention).
-const RegLocation arm_loc_c_return =
+const RegLocation a64_loc_c_return =
     {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_w0, INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_ref =
+const RegLocation a64_loc_c_return_ref =
     {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1, rs_x0, INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_wide =
+const RegLocation a64_loc_c_return_wide =
     {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_x0, INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_float =
+const RegLocation a64_loc_c_return_float =
     {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, rs_f0, INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_double =
+const RegLocation a64_loc_c_return_double =
     {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, rs_d0, INVALID_SREG, INVALID_SREG};
 
 /**
@@ -228,7 +226,7 @@
  * assembler. Their corresponding EncodingMap positions will be defined in
  * assemble_arm64.cc.
  */
-enum ArmOpcode {
+enum A64Opcode {
   kA64First = 0,
   kA64Adc3rrr = kA64First,  // adc [00011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
   kA64Add4RRdT,      // add [s001000100] imm_12[21-10] rn[9-5] rd[4-0].
@@ -375,22 +373,13 @@
  */
 
 // Return the wide and no-wide variants of the given opcode.
-#define WIDE(op) ((ArmOpcode)((op) | kA64Wide))
-#define UNWIDE(op) ((ArmOpcode)((op) & ~kA64Wide))
+#define WIDE(op) ((A64Opcode)((op) | kA64Wide))
+#define UNWIDE(op) ((A64Opcode)((op) & ~kA64Wide))
 
 // Whether the given opcode is wide.
 #define IS_WIDE(op) (((op) & kA64Wide) != 0)
 
-/*
- * Floating point variants. These are just aliases of the macros above which we use for floating
- * point instructions, just for readibility reasons.
- * TODO(Arm64): should we remove these and use the original macros?
- */
-#define FWIDE WIDE
-#define FUNWIDE UNWIDE
-#define IS_FWIDE IS_WIDE
-
-enum ArmOpDmbOptions {
+enum A64OpDmbOptions {
   kSY = 0xf,
   kST = 0xe,
   kISH = 0xb,
@@ -401,7 +390,7 @@
 };
 
 // Instruction assembly field_loc kind.
-enum ArmEncodingKind {
+enum A64EncodingKind {
   // All the formats below are encoded in the same way (as a kFmtBitBlt).
   // These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
   kFmtRegW = 0,   // Word register (w) or wzr.
@@ -425,15 +414,15 @@
 };
 
 // Struct used to define the snippet positions for each A64 opcode.
-struct ArmEncodingMap {
+struct A64EncodingMap {
   uint32_t wskeleton;
   uint32_t xskeleton;
   struct {
-    ArmEncodingKind kind;
+    A64EncodingKind kind;
     int end;         // end for kFmtBitBlt, 1-bit slice end for FP regs.
     int start;       // start for kFmtBitBlt, 4-bit slice end for FP regs.
   } field_loc[4];
-  ArmOpcode opcode;  // can be WIDE()-ned to indicate it has a wide variant.
+  A64Opcode opcode;  // can be WIDE()-ned to indicate it has a wide variant.
   uint64_t flags;
   const char* name;
   const char* fmt;
@@ -441,25 +430,6 @@
   FixupKind fixup;
 };
 
-#if 0
-// TODO(Arm64): try the following alternative, which fits exactly in one cache line (64 bytes).
-struct ArmEncodingMap {
-  uint32_t wskeleton;
-  uint32_t xskeleton;
-  uint64_t flags;
-  const char* name;
-  const char* fmt;
-  struct {
-    uint8_t kind;
-    int8_t end;         // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int8_t start;       // start for kFmtBitBlt, 4-bit slice end for FP regs.
-  } field_loc[4];
-  uint32_t fixup;
-  uint32_t opcode;         // can be WIDE()-ned to indicate it has a wide variant.
-  uint32_t padding[3];
-};
-#endif
-
 }  // namespace art
 
 #endif  // ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 5115246..b1cf279 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -47,7 +47,7 @@
   CUSTOM_VARIANTS(type00_skeleton, (type00_skeleton | 0x00400000))
 
 /*
- * opcode: ArmOpcode enum
+ * opcode: A64Opcode enum
  * variants: instruction skeletons supplied via CUSTOM_VARIANTS or derived macros.
  * a{n}k: key to applying argument {n}    \
  * a{n}s: argument {n} start bit position | n = 0, 1, 2, 3
@@ -102,8 +102,8 @@
  *
  *  [!] escape.  To insert "!", use "!!"
  */
-/* NOTE: must be kept in sync with enum ArmOpcode from arm64_lir.h */
-const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
+/* NOTE: must be kept in sync with enum A64Opcode from arm64_lir.h */
+const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
     ENCODING_MAP(WIDE(kA64Adc3rrr), SF_VARIANTS(0x1a000000),
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -229,27 +229,27 @@
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
                  kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE12,
                  "extr", "!0r, !1r, !2r, #!3d", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fabs2ff), FLOAT_VARIANTS(0x1e20c000),
+    ENCODING_MAP(WIDE(kA64Fabs2ff), FLOAT_VARIANTS(0x1e20c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP| REG_DEF0_USE1,
                  "fabs", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fadd3fff), FLOAT_VARIANTS(0x1e202800),
+    ENCODING_MAP(WIDE(kA64Fadd3fff), FLOAT_VARIANTS(0x1e202800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fadd", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fcmp1f), FLOAT_VARIANTS(0x1e202008),
+    ENCODING_MAP(WIDE(kA64Fcmp1f), FLOAT_VARIANTS(0x1e202008),
                  kFmtRegF, 9, 5, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | SETS_CCODES,
                  "fcmp", "!0f, #0", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fcmp2ff), FLOAT_VARIANTS(0x1e202000),
+    ENCODING_MAP(WIDE(kA64Fcmp2ff), FLOAT_VARIANTS(0x1e202000),
                  kFmtRegF, 9, 5, kFmtRegF, 20, 16, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
                  "fcmp", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fcvtzs2wf), FLOAT_VARIANTS(0x1e380000),
+    ENCODING_MAP(WIDE(kA64Fcvtzs2wf), FLOAT_VARIANTS(0x1e380000),
                  kFmtRegW, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fcvtzs", "!0w, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fcvtzs2xf), FLOAT_VARIANTS(0x9e380000),
+    ENCODING_MAP(WIDE(kA64Fcvtzs2xf), FLOAT_VARIANTS(0x9e380000),
                  kFmtRegX, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fcvtzs", "!0x, !1f", kFixupNone),
@@ -269,23 +269,23 @@
                  kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fcvtms", "!0x, !1S", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
+    ENCODING_MAP(WIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fdiv", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fmax3fff), FLOAT_VARIANTS(0x1e204800),
+    ENCODING_MAP(WIDE(kA64Fmax3fff), FLOAT_VARIANTS(0x1e204800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fmax", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fmin3fff), FLOAT_VARIANTS(0x1e205800),
+    ENCODING_MAP(WIDE(kA64Fmin3fff), FLOAT_VARIANTS(0x1e205800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fmin", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
+    ENCODING_MAP(WIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
                  "fmov", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
+    ENCODING_MAP(WIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
                  "fmov", "!0f, #!1I", kFixupNone),
@@ -305,35 +305,35 @@
                  kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fmov", "!0x, !1S", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fmul3fff), FLOAT_VARIANTS(0x1e200800),
+    ENCODING_MAP(WIDE(kA64Fmul3fff), FLOAT_VARIANTS(0x1e200800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fmul", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fneg2ff), FLOAT_VARIANTS(0x1e214000),
+    ENCODING_MAP(WIDE(kA64Fneg2ff), FLOAT_VARIANTS(0x1e214000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fneg", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
+    ENCODING_MAP(WIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "frintp", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
+    ENCODING_MAP(WIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "frintm", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
+    ENCODING_MAP(WIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "frintn", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
+    ENCODING_MAP(WIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "frintz", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fsqrt2ff), FLOAT_VARIANTS(0x1e61c000),
+    ENCODING_MAP(WIDE(kA64Fsqrt2ff), FLOAT_VARIANTS(0x1e61c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fsqrt", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Fsub3fff), FLOAT_VARIANTS(0x1e203800),
+    ENCODING_MAP(WIDE(kA64Fsub3fff), FLOAT_VARIANTS(0x1e203800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
                  "fsub", "!0f, !1f, !2f", kFixupNone),
@@ -369,7 +369,7 @@
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
                  kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
                  "ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
+    ENCODING_MAP(WIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
                  kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1,
                  IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
@@ -379,7 +379,7 @@
                  kFmtUnused, -1, -1,
                  IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
                  "ldr", "!0r, !1p", kFixupLoad),
-    ENCODING_MAP(FWIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
+    ENCODING_MAP(WIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0f, [!1X, #!2D]", kFixupNone),
@@ -387,7 +387,7 @@
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
                  "ldr", "!0r, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
+    ENCODING_MAP(WIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
                  kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
                  "ldr", "!0f, [!1X, !2x!3G]", kFixupNone),
@@ -411,7 +411,7 @@
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF012 | IS_LOAD,
                  "ldp", "!0r, !1r, [!2X], #!3D", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Ldur3fXd), CUSTOM_VARIANTS(0xbc400000, 0xfc400000),
+    ENCODING_MAP(WIDE(kA64Ldur3fXd), CUSTOM_VARIANTS(0xbc400000, 0xfc400000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
                  "ldur", "!0f, [!1X, #!2d]", kFixupNone),
@@ -507,11 +507,11 @@
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
                  kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
                  "sbfm", "!0r, !1r, #!2d, #!3d", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Scvtf2fw), FLOAT_VARIANTS(0x1e220000),
+    ENCODING_MAP(WIDE(kA64Scvtf2fw), FLOAT_VARIANTS(0x1e220000),
                  kFmtRegF, 4, 0, kFmtRegW, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "scvtf", "!0f, !1w", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Scvtf2fx), FLOAT_VARIANTS(0x9e220000),
+    ENCODING_MAP(WIDE(kA64Scvtf2fx), FLOAT_VARIANTS(0x9e220000),
                  kFmtRegF, 4, 0, kFmtRegX, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "scvtf", "!0f, !1x", kFixupNone),
@@ -547,11 +547,11 @@
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
+    ENCODING_MAP(WIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
                  "str", "!0f, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
+    ENCODING_MAP(WIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
                  kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_USE012 | IS_STORE,
                  "str", "!0f, [!1X, !2x!3G]", kFixupNone),
@@ -583,7 +583,7 @@
                  kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | REG_DEF1 | IS_STORE,
                  "str", "!0r, [!1X], #!2d", kFixupNone),
-    ENCODING_MAP(FWIDE(kA64Stur3fXd), CUSTOM_VARIANTS(0xbc000000, 0xfc000000),
+    ENCODING_MAP(WIDE(kA64Stur3fXd), CUSTOM_VARIANTS(0xbc000000, 0xfc000000),
                  kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
                  "stur", "!0f, [!1X, #!2d]", kFixupNone),
@@ -667,21 +667,21 @@
 uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
   for (; lir != nullptr; lir = NEXT_LIR(lir)) {
     bool opcode_is_wide = IS_WIDE(lir->opcode);
-    ArmOpcode opcode = UNWIDE(lir->opcode);
+    A64Opcode opcode = UNWIDE(lir->opcode);
 
     if (UNLIKELY(IsPseudoLirOp(opcode))) {
       continue;
     }
 
     if (LIKELY(!lir->flags.is_nop)) {
-      const ArmEncodingMap *encoder = &EncodingMap[opcode];
+      const A64EncodingMap *encoder = &EncodingMap[opcode];
 
       // Select the right variant of the skeleton.
       uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
       DCHECK(!opcode_is_wide || IS_WIDE(encoder->opcode));
 
       for (int i = 0; i < 4; i++) {
-        ArmEncodingKind kind = encoder->field_loc[i].kind;
+        A64EncodingKind kind = encoder->field_loc[i].kind;
         uint32_t operand = lir->operands[i];
         uint32_t value;
 
@@ -1027,7 +1027,7 @@
 }
 
 size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
-  ArmOpcode opcode = UNWIDE(lir->opcode);
+  A64Opcode opcode = UNWIDE(lir->opcode);
   DCHECK(!IsPseudoLirOp(opcode));
   return EncodingMap[opcode].size;
 }
@@ -1038,7 +1038,7 @@
 
   LIR* last_fixup = NULL;
   for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
-    ArmOpcode opcode = UNWIDE(lir->opcode);
+    A64Opcode opcode = UNWIDE(lir->opcode);
     if (!lir->flags.is_nop) {
       if (lir->flags.fixup != kFixupNone) {
         if (!IsPseudoLirOp(opcode)) {
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 1c40292..55cc938 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -395,7 +395,7 @@
                      RegLocation rl_src2, bool is_div);
 
   InToRegStorageMapping in_to_reg_storage_mapping_;
-  static const ArmEncodingMap EncodingMap[kA64Last];
+  static const A64EncodingMap EncodingMap[kA64Last];
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index a39d151..db24d12 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -112,7 +112,7 @@
   rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(FWIDE(op), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+  NewLIR3(WIDE(op), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -145,17 +145,17 @@
       dst_reg_class = kFPReg;
       break;
     case Instruction::INT_TO_DOUBLE:
-      op = FWIDE(kA64Scvtf2fw);
+      op = WIDE(kA64Scvtf2fw);
       src_reg_class = kCoreReg;
       dst_reg_class = kFPReg;
       break;
     case Instruction::DOUBLE_TO_INT:
-      op = FWIDE(kA64Fcvtzs2wf);
+      op = WIDE(kA64Fcvtzs2wf);
       src_reg_class = kFPReg;
       dst_reg_class = kCoreReg;
       break;
     case Instruction::LONG_TO_DOUBLE:
-      op = FWIDE(kA64Scvtf2fx);
+      op = WIDE(kA64Scvtf2fx);
       src_reg_class = kCoreReg;
       dst_reg_class = kFPReg;
       break;
@@ -170,7 +170,7 @@
       dst_reg_class = kFPReg;
       break;
     case Instruction::DOUBLE_TO_LONG:
-      op = FWIDE(kA64Fcvtzs2xf);
+      op = WIDE(kA64Fcvtzs2xf);
       src_reg_class = kFPReg;
       dst_reg_class = kCoreReg;
       break;
@@ -208,7 +208,7 @@
     rl_src2 = mir_graph_->GetSrcWide(mir, 2);
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(FWIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+    NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
     rl_src1 = mir_graph_->GetSrc(mir, 0);
     rl_src2 = mir_graph_->GetSrc(mir, 1);
@@ -281,7 +281,7 @@
     ClobberSReg(rl_dest.s_reg_low);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
     LoadConstant(rl_result.reg, default_result);
-    NewLIR2(FWIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+    NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   } else {
     rl_src1 = LoadValue(rl_src1, kFPReg);
     rl_src2 = LoadValue(rl_src2, kFPReg);
@@ -318,7 +318,7 @@
   RegLocation rl_result;
   rl_src = LoadValueWide(rl_src, kFPReg);
   rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(FWIDE(kA64Fneg2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(WIDE(kA64Fneg2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -370,7 +370,7 @@
   rl_src = LoadValueWide(rl_src, reg_class);
   RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
   if (reg_class == kFPReg) {
-    NewLIR2(FWIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+    NewLIR2(WIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   } else {
     // Clear the sign bit in an integer register.
     OpRegRegImm64(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffffffffffff);
@@ -384,7 +384,7 @@
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(FWIDE(kA64Fsqrt2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(WIDE(kA64Fsqrt2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
@@ -394,7 +394,7 @@
   RegLocation rl_dest = InlineTargetWide(info);
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(FWIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(WIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
@@ -404,7 +404,7 @@
   RegLocation rl_dest = InlineTargetWide(info);
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(FWIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(WIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
@@ -414,14 +414,14 @@
   RegLocation rl_dest = InlineTargetWide(info);
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(FWIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(WIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
 
 bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
   int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
-  ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+  A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src = info->args[0];
   RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
   rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
@@ -439,7 +439,7 @@
 bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
   DCHECK_EQ(cu_->instruction_set, kArm64);
   int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
-  ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+  A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src1 = info->args[0];
   RegLocation rl_src2 = (is_double) ? info->args[2] : info->args[1];
   rl_src1 = (is_double) ? LoadValueWide(rl_src1, kFPReg) : LoadValue(rl_src1, kFPReg);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 094db4c..88123e1 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -262,18 +262,18 @@
   ArmConditionCode arm_cond = ArmConditionEncoding(cond);
   if (check_value == 0) {
     if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
-      ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
-      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+      A64Opcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
+      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
       branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
     } else if (arm_cond == kArmCondLs) {
       // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
       // This case happens for a bounds check of array[0].
-      ArmOpcode opcode = kA64Cbz2rt;
-      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+      A64Opcode opcode = kA64Cbz2rt;
+      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
       branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
     } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
-      ArmOpcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
-      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+      A64Opcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
+      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
       int value = reg.Is64Bit() ? 63 : 31;
       branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
     }
@@ -305,7 +305,7 @@
 LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
   bool dest_is_fp = r_dest.IsFloat();
   bool src_is_fp = r_src.IsFloat();
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
   LIR* res;
 
   if (LIKELY(dest_is_fp == src_is_fp)) {
@@ -333,7 +333,7 @@
       DCHECK_EQ(dest_is_double, src_is_double);
 
       // Homogeneous float/float copy.
-      opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
+      opcode = (dest_is_double) ? WIDE(kA64Fmov2ff) : kA64Fmov2ff;
     }
   } else {
     // Inhomogeneous register copy.
@@ -630,7 +630,7 @@
     // temp = r_src1 / r_src2
     // dest = r_src1 - temp * r_src2
     RegStorage temp;
-    ArmOpcode wide;
+    A64Opcode wide;
     if (rl_result.reg.Is64Bit()) {
       temp = AllocTempWide();
       wide = WIDE(0);
@@ -770,7 +770,7 @@
   RegStorage r_tmp;
   RegStorage r_tmp_stored;
   RegStorage rl_new_value_stored = rl_new_value.reg;
-  ArmOpcode wide = UNWIDE(0);
+  A64Opcode wide = UNWIDE(0);
   if (is_long) {
     r_tmp_stored = r_tmp = AllocTempWide();
     wide = WIDE(0);
@@ -962,7 +962,7 @@
   // Combine sub & test using sub setflags encoding here.  We need to make sure a
   // subtract form that sets carry is used, so generate explicitly.
   // TODO: might be best to add a new op, kOpSubs, and handle it generically.
-  ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
+  A64Opcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
   NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1);  // For value == 1, this should set flags.
   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
   return OpCondBranch(c_code, target);
@@ -1459,7 +1459,7 @@
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+      m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
                    offset);
     } else {
       m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
@@ -1570,7 +1570,7 @@
       // Have some FP regs to do.
       fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
       if (UNLIKELY(reg2 < 0)) {
-        m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+        m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
                      cur_offset);
         // Do not increment offset here, as the second half will be filled by a core reg.
       } else {
@@ -1643,7 +1643,7 @@
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
      reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+      m2l->NewLIR3(WIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
                    offset);
     } else {
       m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
@@ -1705,7 +1705,7 @@
 }
 
 bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
-  ArmOpcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
+  A64Opcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src_i = info->args[0];
   RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index fe0554c..0462530 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -84,23 +84,23 @@
 static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
 
 RegLocation Arm64Mir2Lir::LocCReturn() {
-  return arm_loc_c_return;
+  return a64_loc_c_return;
 }
 
 RegLocation Arm64Mir2Lir::LocCReturnRef() {
-  return arm_loc_c_return_ref;
+  return a64_loc_c_return_ref;
 }
 
 RegLocation Arm64Mir2Lir::LocCReturnWide() {
-  return arm_loc_c_return_wide;
+  return a64_loc_c_return_wide;
 }
 
 RegLocation Arm64Mir2Lir::LocCReturnFloat() {
-  return arm_loc_c_return_float;
+  return a64_loc_c_return_float;
 }
 
 RegLocation Arm64Mir2Lir::LocCReturnDouble() {
-  return arm_loc_c_return_double;
+  return a64_loc_c_return_double;
 }
 
 // Return a target-dependent special register.
@@ -153,7 +153,7 @@
 
   return ResourceMask::Bit(
       // FP register starts at bit position 32.
-      (reg.IsFloat() ? kArm64FPReg0 : 0) + reg.GetRegNum());
+      (reg.IsFloat() ? kA64FPReg0 : 0) + reg.GetRegNum());
 }
 
 ResourceMask Arm64Mir2Lir::GetPCUseDefEncoding() const {
@@ -173,15 +173,15 @@
   // These flags are somewhat uncommon - bypass if we can.
   if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LR)) != 0) {
     if (flags & REG_DEF_SP) {
-      def_mask->SetBit(kArm64RegSP);
+      def_mask->SetBit(kA64RegSP);
     }
 
     if (flags & REG_USE_SP) {
-      use_mask->SetBit(kArm64RegSP);
+      use_mask->SetBit(kA64RegSP);
     }
 
     if (flags & REG_DEF_LR) {
-      def_mask->SetBit(kArm64RegLR);
+      def_mask->SetBit(kA64RegLR);
     }
   }
 }
@@ -408,7 +408,7 @@
              snprintf(tbuf, arraysize(tbuf), "d%d", operand & RegStorage::kRegNumMask);
              break;
            case 'f':
-             snprintf(tbuf, arraysize(tbuf), "%c%d", (IS_FWIDE(lir->opcode)) ? 'd' : 's',
+             snprintf(tbuf, arraysize(tbuf), "%c%d", (IS_WIDE(lir->opcode)) ? 'd' : 's',
                       operand & RegStorage::kRegNumMask);
              break;
            case 'l': {
@@ -534,7 +534,7 @@
     char num[8];
     int i;
 
-    for (i = 0; i < kArm64RegEnd; i++) {
+    for (i = 0; i < kA64RegEnd; i++) {
       if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 5326e74..38670ff 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -89,9 +89,9 @@
 
 size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
   bool opcode_is_wide = IS_WIDE(lir->opcode);
-  ArmOpcode opcode = UNWIDE(lir->opcode);
+  A64Opcode opcode = UNWIDE(lir->opcode);
   DCHECK(!IsPseudoLirOp(opcode));
-  const ArmEncodingMap *encoder = &EncodingMap[opcode];
+  const A64EncodingMap *encoder = &EncodingMap[opcode];
   uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
   return (bits >> 30);
 }
@@ -138,7 +138,7 @@
   } else {
     int32_t encoded_imm = EncodeImmDouble(value);
     if (encoded_imm >= 0) {
-      return NewLIR2(FWIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
+      return NewLIR2(WIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
     }
   }
 
@@ -151,7 +151,7 @@
   }
 
   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
+  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2fp),
                             r_dest.GetReg(), 0, 0, 0, 0, data_target);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
@@ -415,7 +415,7 @@
     // 1 instruction is enough to load the immediate.
     if (LIKELY(low_bits == high_bits)) {
       // Value is either 0 or -1: we can just use wzr.
-      ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
+      A64Opcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
       res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
     } else {
       uint16_t uniform_bits, useful_bits;
@@ -466,7 +466,7 @@
 
   if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
     // value is either 0 or -1: we can just use xzr.
-    ArmOpcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
+    A64Opcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
     return NewLIR2(opcode, r_dest.GetReg(), rxzr);
   }
 
@@ -486,7 +486,7 @@
 
   if (num_slow_halfwords <= max_num_ops_per_const_load) {
     // We can encode the number using a movz/movn followed by one or more movk.
-    ArmOpcode op;
+    A64Opcode op;
     uint16_t background;
     LIR* res = nullptr;
 
@@ -548,15 +548,11 @@
 }
 
 LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
   switch (op) {
     case kOpBlx:
       opcode = kA64Blr1x;
       break;
-    // TODO(Arm64): port kThumbBx.
-    // case kOpBx:
-    //   opcode = kThumbBx;
-    //   break;
     default:
       LOG(FATAL) << "Bad opcode " << op;
   }
@@ -564,9 +560,9 @@
 }
 
 LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
-  ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
+  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
   CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
 
   switch (op) {
     case kOpCmn:
@@ -621,7 +617,7 @@
     DCHECK_EQ(shift, ENCODE_NO_SHIFT);
     return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
   } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
-    ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
+    A64EncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
     if (kind == kFmtShift) {
       return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
     }
@@ -633,8 +629,8 @@
 
 LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
                                   A64RegExtEncodings ext, uint8_t amount) {
-  ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
+  A64Opcode opcode = kA64Brk1d;
 
   switch (op) {
     case kOpCmn:
@@ -655,7 +651,7 @@
 
   DCHECK(!IsPseudoLirOp(opcode));
   if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
-    ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
+    A64EncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
     if (kind == kFmtExtend) {
       return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
                      EncodeExtend(ext, amount));
@@ -694,7 +690,7 @@
 
 LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
                                     RegStorage r_src2, int shift) {
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
 
   switch (op) {
     case kOpAdd:
@@ -747,7 +743,7 @@
   // The instructions above belong to two kinds:
   // - 4-operands instructions, where the last operand is a shift/extend immediate,
   // - 3-operands instructions with no shift/extend.
-  ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
+  A64Opcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
   CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
   CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
   if (EncodingMap[opcode].flags & IS_QUAD_OP) {
@@ -762,7 +758,7 @@
 
 LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
                                      RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
 
   switch (op) {
     case kOpAdd:
@@ -775,7 +771,7 @@
       LOG(FATAL) << "Unimplemented opcode: " << op;
       break;
   }
-  ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
+  A64Opcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
 
   if (r_dest.Is64Bit()) {
     CHECK(r_src1.Is64Bit());
@@ -810,11 +806,11 @@
   LIR* res;
   bool neg = (value < 0);
   int64_t abs_value = (neg) ? -value : value;
-  ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode alt_opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
+  A64Opcode alt_opcode = kA64Brk1d;
   bool is_logical = false;
   bool is_wide = r_dest.Is64Bit();
-  ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
+  A64Opcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
   int info = 0;
 
   switch (op) {
@@ -937,9 +933,9 @@
 }
 
 LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
-  ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
-  ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode neg_opcode = kA64Brk1d;
+  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
+  A64Opcode opcode = kA64Brk1d;
+  A64Opcode neg_opcode = kA64Brk1d;
   bool shift;
   bool neg = (value < 0);
   uint64_t abs_value = (neg) ? -value : value;
@@ -1025,7 +1021,7 @@
                                    int scale, OpSize size) {
   LIR* load;
   int expected_scale = 0;
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
   r_base = Check64BitReg(r_base);
 
   // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
@@ -1040,7 +1036,7 @@
     if (r_dest.IsDouble()) {
       DCHECK(size == k64 || size == kDouble);
       expected_scale = 3;
-      opcode = FWIDE(kA64Ldr4fXxG);
+      opcode = WIDE(kA64Ldr4fXxG);
     } else {
       DCHECK(r_dest.IsSingle());
       DCHECK(size == k32 || size == kSingle);
@@ -1113,7 +1109,7 @@
                                     int scale, OpSize size) {
   LIR* store;
   int expected_scale = 0;
-  ArmOpcode opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
   r_base = Check64BitReg(r_base);
 
   // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
@@ -1128,7 +1124,7 @@
     if (r_src.IsDouble()) {
       DCHECK(size == k64 || size == kDouble);
       expected_scale = 3;
-      opcode = FWIDE(kA64Str4fXxG);
+      opcode = WIDE(kA64Str4fXxG);
     } else {
       DCHECK(r_src.IsSingle());
       DCHECK(size == k32 || size == kSingle);
@@ -1197,8 +1193,8 @@
 LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
                                     OpSize size) {
   LIR* load = NULL;
-  ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode alt_opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
+  A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
 
   switch (size) {
@@ -1209,8 +1205,8 @@
       scale = 3;
       if (r_dest.IsFloat()) {
         DCHECK(r_dest.IsDouble());
-        opcode = FWIDE(kA64Ldr3fXD);
-        alt_opcode = FWIDE(kA64Ldur3fXd);
+        opcode = WIDE(kA64Ldr3fXD);
+        alt_opcode = WIDE(kA64Ldur3fXd);
       } else {
         opcode = WIDE(kA64Ldr3rXD);
         alt_opcode = WIDE(kA64Ldur3rXd);
@@ -1294,8 +1290,8 @@
 LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                      OpSize size) {
   LIR* store = NULL;
-  ArmOpcode opcode = kA64Brk1d;
-  ArmOpcode alt_opcode = kA64Brk1d;
+  A64Opcode opcode = kA64Brk1d;
+  A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
 
   switch (size) {
@@ -1306,11 +1302,11 @@
       scale = 3;
       if (r_src.IsFloat()) {
         DCHECK(r_src.IsDouble());
-        opcode = FWIDE(kA64Str3fXD);
-        alt_opcode = FWIDE(kA64Stur3fXd);
+        opcode = WIDE(kA64Str3fXD);
+        alt_opcode = WIDE(kA64Stur3fXd);
       } else {
-        opcode = FWIDE(kA64Str3rXD);
-        alt_opcode = FWIDE(kA64Stur3rXd);
+        opcode = WIDE(kA64Str3rXD);
+        alt_opcode = WIDE(kA64Stur3rXd);
       }
       break;
     case kSingle:     // Intentional fall-through.