blob: 0b3bdb206a72123ef8507e273620b5f8371b3c18 [file] [log] [blame]
/*---------------------------------------------------------------*/
/*--- begin host_arm64_defs.c ---*/
/*---------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2013-2013 OpenWorks
info@open-works.net
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "libvex_basictypes.h"
#include "libvex.h"
#include "libvex_trc_values.h"
#include "main_util.h"
#include "host_generic_regs.h"
#include "host_arm64_defs.h"
//ZZ UInt arm_hwcaps = 0;
/* --------- Registers. --------- */
/* The usual HReg abstraction. We use the following classes only:
X regs (64 bit int)
D regs (64 bit float, also used for 32 bit float)
Q regs (128 bit vector)
*/
void ppHRegARM64 ( HReg reg ) {
Int r;
/* Be generic for all virtual regs. */
if (hregIsVirtual(reg)) {
ppHReg(reg);
return;
}
/* But specific for real regs. */
switch (hregClass(reg)) {
case HRcInt64:
r = hregNumber(reg);
vassert(r >= 0 && r < 31);
vex_printf("x%d", r);
return;
case HRcFlt64:
r = hregNumber(reg);
vassert(r >= 0 && r < 32);
vex_printf("d%d", r);
return;
case HRcVec128:
r = hregNumber(reg);
vassert(r >= 0 && r < 32);
vex_printf("q%d", r);
return;
default:
vpanic("ppHRegARM64");
}
}
static void ppHRegARM64asSreg ( HReg reg ) {
ppHRegARM64(reg);
vex_printf("(S-reg)");
}
HReg hregARM64_X0 ( void ) { return mkHReg(0, HRcInt64, False); }
HReg hregARM64_X1 ( void ) { return mkHReg(1, HRcInt64, False); }
HReg hregARM64_X2 ( void ) { return mkHReg(2, HRcInt64, False); }
HReg hregARM64_X3 ( void ) { return mkHReg(3, HRcInt64, False); }
HReg hregARM64_X4 ( void ) { return mkHReg(4, HRcInt64, False); }
HReg hregARM64_X5 ( void ) { return mkHReg(5, HRcInt64, False); }
HReg hregARM64_X6 ( void ) { return mkHReg(6, HRcInt64, False); }
HReg hregARM64_X7 ( void ) { return mkHReg(7, HRcInt64, False); }
//ZZ HReg hregARM_R8 ( void ) { return mkHReg(8, HRcInt32, False); }
HReg hregARM64_X9 ( void ) { return mkHReg(9, HRcInt64, False); }
HReg hregARM64_X10 ( void ) { return mkHReg(10, HRcInt64, False); }
HReg hregARM64_X11 ( void ) { return mkHReg(11, HRcInt64, False); }
HReg hregARM64_X12 ( void ) { return mkHReg(12, HRcInt64, False); }
HReg hregARM64_X13 ( void ) { return mkHReg(13, HRcInt64, False); }
HReg hregARM64_X14 ( void ) { return mkHReg(14, HRcInt64, False); }
HReg hregARM64_X15 ( void ) { return mkHReg(15, HRcInt64, False); }
HReg hregARM64_X21 ( void ) { return mkHReg(21, HRcInt64, False); }
HReg hregARM64_X22 ( void ) { return mkHReg(22, HRcInt64, False); }
HReg hregARM64_X23 ( void ) { return mkHReg(23, HRcInt64, False); }
HReg hregARM64_X24 ( void ) { return mkHReg(24, HRcInt64, False); }
HReg hregARM64_X25 ( void ) { return mkHReg(25, HRcInt64, False); }
HReg hregARM64_X26 ( void ) { return mkHReg(26, HRcInt64, False); }
HReg hregARM64_X27 ( void ) { return mkHReg(27, HRcInt64, False); }
HReg hregARM64_X28 ( void ) { return mkHReg(28, HRcInt64, False); }
// Should really use D8 .. D15 for class F64, since they are callee
// save
HReg hregARM64_D8 ( void ) { return mkHReg(8, HRcFlt64, False); }
HReg hregARM64_D9 ( void ) { return mkHReg(9, HRcFlt64, False); }
HReg hregARM64_D10 ( void ) { return mkHReg(10, HRcFlt64, False); }
HReg hregARM64_D11 ( void ) { return mkHReg(11, HRcFlt64, False); }
HReg hregARM64_D12 ( void ) { return mkHReg(12, HRcFlt64, False); }
HReg hregARM64_D13 ( void ) { return mkHReg(13, HRcFlt64, False); }
//ZZ HReg hregARM_S26 ( void ) { return mkHReg(26, HRcFlt32, False); }
//ZZ HReg hregARM_S27 ( void ) { return mkHReg(27, HRcFlt32, False); }
//ZZ HReg hregARM_S28 ( void ) { return mkHReg(28, HRcFlt32, False); }
//ZZ HReg hregARM_S29 ( void ) { return mkHReg(29, HRcFlt32, False); }
//ZZ HReg hregARM_S30 ( void ) { return mkHReg(30, HRcFlt32, False); }
HReg hregARM64_Q16 ( void ) { return mkHReg(16, HRcVec128, False); }
HReg hregARM64_Q17 ( void ) { return mkHReg(17, HRcVec128, False); }
HReg hregARM64_Q18 ( void ) { return mkHReg(18, HRcVec128, False); }
HReg hregARM64_Q19 ( void ) { return mkHReg(19, HRcVec128, False); }
HReg hregARM64_Q20 ( void ) { return mkHReg(20, HRcVec128, False); }
//ZZ HReg hregARM_Q11 ( void ) { return mkHReg(11, HRcVec128, False); }
//ZZ HReg hregARM_Q12 ( void ) { return mkHReg(12, HRcVec128, False); }
//ZZ HReg hregARM_Q13 ( void ) { return mkHReg(13, HRcVec128, False); }
//ZZ HReg hregARM_Q14 ( void ) { return mkHReg(14, HRcVec128, False); }
//ZZ HReg hregARM_Q15 ( void ) { return mkHReg(15, HRcVec128, False); }
void getAllocableRegs_ARM64 ( Int* nregs, HReg** arr )
{
Int i = 0;
*nregs = 26;
*arr = LibVEX_Alloc(*nregs * sizeof(HReg));
// callee saves ones (22 to 28) are listed first, since we prefer
// them if they're available
(*arr)[i++] = hregARM64_X22();
(*arr)[i++] = hregARM64_X23();
(*arr)[i++] = hregARM64_X24();
(*arr)[i++] = hregARM64_X25();
(*arr)[i++] = hregARM64_X26();
(*arr)[i++] = hregARM64_X27();
(*arr)[i++] = hregARM64_X28();
(*arr)[i++] = hregARM64_X0();
(*arr)[i++] = hregARM64_X1();
(*arr)[i++] = hregARM64_X2();
(*arr)[i++] = hregARM64_X3();
(*arr)[i++] = hregARM64_X4();
(*arr)[i++] = hregARM64_X5();
(*arr)[i++] = hregARM64_X6();
(*arr)[i++] = hregARM64_X7();
// X8 .. who knows.
// X9 is a chaining/spill temporary, not available to regalloc.
// Do we really need all these?
//(*arr)[i++] = hregARM64_X10();
//(*arr)[i++] = hregARM64_X11();
//(*arr)[i++] = hregARM64_X12();
//(*arr)[i++] = hregARM64_X13();
//(*arr)[i++] = hregARM64_X14();
//(*arr)[i++] = hregARM64_X15();
// X21 is the guest state pointer, not available to regalloc.
// vector regs. Unfortunately not callee-saved.
(*arr)[i++] = hregARM64_Q16();
(*arr)[i++] = hregARM64_Q17();
(*arr)[i++] = hregARM64_Q18();
(*arr)[i++] = hregARM64_Q19();
(*arr)[i++] = hregARM64_Q20();
// F64 regs, all of which are callee-saved
(*arr)[i++] = hregARM64_D8();
(*arr)[i++] = hregARM64_D9();
(*arr)[i++] = hregARM64_D10();
(*arr)[i++] = hregARM64_D11();
(*arr)[i++] = hregARM64_D12();
(*arr)[i++] = hregARM64_D13();
// unavail: x21 as GSP
// x9 is used as a spill/reload/chaining/call temporary
// x8 is unassigned
// x30 as LR
// x31 because dealing with the SP-vs-ZR overloading is too
// confusing, and we don't need to do so, so let's just avoid
// the problem
//
// Currently, we have 15 allocatable integer registers:
// 0 1 2 3 4 5 6 7 22 23 24 25 26 27 28
//
// Hence for the allocatable integer registers we have:
//
// callee-saved: 22 23 24 25 26 27 28
// caller-saved: 0 1 2 3 4 5 6 7
//
// If the set of available registers changes or if the e/r status
// changes, be sure to re-check/sync the definition of
// getHRegUsage for ARMInstr_Call too.
vassert(i == *nregs);
}
/* --------- Condition codes, ARM64 encoding. --------- */
static const HChar* showARM64CondCode ( ARM64CondCode cond ) {
switch (cond) {
case ARM64cc_EQ: return "eq";
case ARM64cc_NE: return "ne";
case ARM64cc_CS: return "cs";
case ARM64cc_CC: return "cc";
case ARM64cc_MI: return "mi";
case ARM64cc_PL: return "pl";
case ARM64cc_VS: return "vs";
case ARM64cc_VC: return "vc";
case ARM64cc_HI: return "hi";
case ARM64cc_LS: return "ls";
case ARM64cc_GE: return "ge";
case ARM64cc_LT: return "lt";
case ARM64cc_GT: return "gt";
case ARM64cc_LE: return "le";
case ARM64cc_AL: return "al"; // default
case ARM64cc_NV: return "nv";
default: vpanic("showARM64CondCode");
}
}
/* --------- Memory address expressions (amodes). --------- */
ARM64AMode* ARM64AMode_RI9 ( HReg reg, Int simm9 ) {
ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
am->tag = ARM64am_RI9;
am->ARM64am.RI9.reg = reg;
am->ARM64am.RI9.simm9 = simm9;
vassert(-256 <= simm9 && simm9 <= 255);
return am;
}
ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) {
ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
am->tag = ARM64am_RI12;
am->ARM64am.RI12.reg = reg;
am->ARM64am.RI12.uimm12 = uimm12;
am->ARM64am.RI12.szB = szB;
vassert(uimm12 >= 0 && uimm12 <= 4095);
switch (szB) {
case 1: case 2: case 4: case 8: break;
default: vassert(0);
}
return am;
}
ARM64AMode* ARM64AMode_RR ( HReg base, HReg index ) {
ARM64AMode* am = LibVEX_Alloc(sizeof(ARM64AMode));
am->tag = ARM64am_RR;
am->ARM64am.RR.base = base;
am->ARM64am.RR.index = index;
return am;
}
static void ppARM64AMode ( ARM64AMode* am ) {
switch (am->tag) {
case ARM64am_RI9:
vex_printf("%d(", am->ARM64am.RI9.simm9);
ppHRegARM64(am->ARM64am.RI9.reg);
vex_printf(")");
break;
case ARM64am_RI12:
vex_printf("%u(", (UInt)am->ARM64am.RI12.szB
* (UInt)am->ARM64am.RI12.uimm12);
ppHRegARM64(am->ARM64am.RI12.reg);
vex_printf(")");
break;
case ARM64am_RR:
vex_printf("(");
ppHRegARM64(am->ARM64am.RR.base);
vex_printf(",");
ppHRegARM64(am->ARM64am.RR.index);
vex_printf(")");
break;
default:
vassert(0);
}
}
static void addRegUsage_ARM64AMode ( HRegUsage* u, ARM64AMode* am ) {
switch (am->tag) {
case ARM64am_RI9:
addHRegUse(u, HRmRead, am->ARM64am.RI9.reg);
return;
case ARM64am_RI12:
addHRegUse(u, HRmRead, am->ARM64am.RI12.reg);
return;
case ARM64am_RR:
addHRegUse(u, HRmRead, am->ARM64am.RR.base);
addHRegUse(u, HRmRead, am->ARM64am.RR.index);
return;
default:
vpanic("addRegUsage_ARM64Amode");
}
}
static void mapRegs_ARM64AMode ( HRegRemap* m, ARM64AMode* am ) {
switch (am->tag) {
case ARM64am_RI9:
am->ARM64am.RI9.reg = lookupHRegRemap(m, am->ARM64am.RI9.reg);
return;
case ARM64am_RI12:
am->ARM64am.RI12.reg = lookupHRegRemap(m, am->ARM64am.RI12.reg);
return;
case ARM64am_RR:
am->ARM64am.RR.base = lookupHRegRemap(m, am->ARM64am.RR.base);
am->ARM64am.RR.index = lookupHRegRemap(m, am->ARM64am.RR.index);
return;
default:
vpanic("mapRegs_ARM64Amode");
}
}
//ZZ /* --------- Mem AModes: Addressing Mode 2 --------- */
//ZZ
//ZZ ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
//ZZ ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
//ZZ am->tag = ARMam2_RI;
//ZZ am->ARMam2.RI.reg = reg;
//ZZ am->ARMam2.RI.simm9 = simm9;
//ZZ vassert(-255 <= simm9 && simm9 <= 255);
//ZZ return am;
//ZZ }
//ZZ ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
//ZZ ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
//ZZ am->tag = ARMam2_RR;
//ZZ am->ARMam2.RR.base = base;
//ZZ am->ARMam2.RR.index = index;
//ZZ return am;
//ZZ }
//ZZ
//ZZ void ppARMAMode2 ( ARMAMode2* am ) {
//ZZ switch (am->tag) {
//ZZ case ARMam2_RI:
//ZZ vex_printf("%d(", am->ARMam2.RI.simm9);
//ZZ ppHRegARM(am->ARMam2.RI.reg);
//ZZ vex_printf(")");
//ZZ break;
//ZZ case ARMam2_RR:
//ZZ vex_printf("(");
//ZZ ppHRegARM(am->ARMam2.RR.base);
//ZZ vex_printf(",");
//ZZ ppHRegARM(am->ARMam2.RR.index);
//ZZ vex_printf(")");
//ZZ break;
//ZZ default:
//ZZ vassert(0);
//ZZ }
//ZZ }
//ZZ
//ZZ static void addRegUsage_ARMAMode2 ( HRegUsage* u, ARMAMode2* am ) {
//ZZ switch (am->tag) {
//ZZ case ARMam2_RI:
//ZZ addHRegUse(u, HRmRead, am->ARMam2.RI.reg);
//ZZ return;
//ZZ case ARMam2_RR:
//ZZ // addHRegUse(u, HRmRead, am->ARMam2.RR.base);
//ZZ // addHRegUse(u, HRmRead, am->ARMam2.RR.index);
//ZZ // return;
//ZZ default:
//ZZ vpanic("addRegUsage_ARMAmode2");
//ZZ }
//ZZ }
//ZZ
//ZZ static void mapRegs_ARMAMode2 ( HRegRemap* m, ARMAMode2* am ) {
//ZZ switch (am->tag) {
//ZZ case ARMam2_RI:
//ZZ am->ARMam2.RI.reg = lookupHRegRemap(m, am->ARMam2.RI.reg);
//ZZ return;
//ZZ case ARMam2_RR:
//ZZ //am->ARMam2.RR.base =lookupHRegRemap(m, am->ARMam2.RR.base);
//ZZ //am->ARMam2.RR.index = lookupHRegRemap(m, am->ARMam2.RR.index);
//ZZ //return;
//ZZ default:
//ZZ vpanic("mapRegs_ARMAmode2");
//ZZ }
//ZZ }
//ZZ
//ZZ
//ZZ /* --------- Mem AModes: Addressing Mode VFP --------- */
//ZZ
//ZZ ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
//ZZ ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV));
//ZZ vassert(simm11 >= -1020 && simm11 <= 1020);
//ZZ vassert(0 == (simm11 & 3));
//ZZ am->reg = reg;
//ZZ am->simm11 = simm11;
//ZZ return am;
//ZZ }
//ZZ
//ZZ void ppARMAModeV ( ARMAModeV* am ) {
//ZZ vex_printf("%d(", am->simm11);
//ZZ ppHRegARM(am->reg);
//ZZ vex_printf(")");
//ZZ }
//ZZ
//ZZ static void addRegUsage_ARMAModeV ( HRegUsage* u, ARMAModeV* am ) {
//ZZ addHRegUse(u, HRmRead, am->reg);
//ZZ }
//ZZ
//ZZ static void mapRegs_ARMAModeV ( HRegRemap* m, ARMAModeV* am ) {
//ZZ am->reg = lookupHRegRemap(m, am->reg);
//ZZ }
//ZZ
//ZZ
//ZZ /* --------- Mem AModes: Addressing Mode Neon ------- */
//ZZ
//ZZ ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
//ZZ ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
//ZZ am->tag = ARMamN_RR;
//ZZ am->ARMamN.RR.rN = rN;
//ZZ am->ARMamN.RR.rM = rM;
//ZZ return am;
//ZZ }
//ZZ
//ZZ ARMAModeN *mkARMAModeN_R ( HReg rN ) {
//ZZ ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
//ZZ am->tag = ARMamN_R;
//ZZ am->ARMamN.R.rN = rN;
//ZZ return am;
//ZZ }
//ZZ
//ZZ static void addRegUsage_ARMAModeN ( HRegUsage* u, ARMAModeN* am ) {
//ZZ if (am->tag == ARMamN_R) {
//ZZ addHRegUse(u, HRmRead, am->ARMamN.R.rN);
//ZZ } else {
//ZZ addHRegUse(u, HRmRead, am->ARMamN.RR.rN);
//ZZ addHRegUse(u, HRmRead, am->ARMamN.RR.rM);
//ZZ }
//ZZ }
//ZZ
//ZZ static void mapRegs_ARMAModeN ( HRegRemap* m, ARMAModeN* am ) {
//ZZ if (am->tag == ARMamN_R) {
//ZZ am->ARMamN.R.rN = lookupHRegRemap(m, am->ARMamN.R.rN);
//ZZ } else {
//ZZ am->ARMamN.RR.rN = lookupHRegRemap(m, am->ARMamN.RR.rN);
//ZZ am->ARMamN.RR.rM = lookupHRegRemap(m, am->ARMamN.RR.rM);
//ZZ }
//ZZ }
//ZZ
//ZZ void ppARMAModeN ( ARMAModeN* am ) {
//ZZ vex_printf("[");
//ZZ if (am->tag == ARMamN_R) {
//ZZ ppHRegARM(am->ARMamN.R.rN);
//ZZ } else {
//ZZ ppHRegARM(am->ARMamN.RR.rN);
//ZZ }
//ZZ vex_printf("]");
//ZZ if (am->tag == ARMamN_RR) {
//ZZ vex_printf(", ");
//ZZ ppHRegARM(am->ARMamN.RR.rM);
//ZZ }
//ZZ }
/* --------- Reg or uimm12<<{0,12} operands --------- */
ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) {
ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
riA->tag = ARM64riA_I12;
riA->ARM64riA.I12.imm12 = imm12;
riA->ARM64riA.I12.shift = shift;
vassert(imm12 < 4096);
vassert(shift == 0 || shift == 12);
return riA;
}
ARM64RIA* ARM64RIA_R ( HReg reg ) {
ARM64RIA* riA = LibVEX_Alloc(sizeof(ARM64RIA));
riA->tag = ARM64riA_R;
riA->ARM64riA.R.reg = reg;
return riA;
}
static void ppARM64RIA ( ARM64RIA* riA ) {
switch (riA->tag) {
case ARM64riA_I12:
vex_printf("#%u",(UInt)(riA->ARM64riA.I12.imm12
<< riA->ARM64riA.I12.shift));
break;
case ARM64riA_R:
ppHRegARM64(riA->ARM64riA.R.reg);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARM64RIA ( HRegUsage* u, ARM64RIA* riA ) {
switch (riA->tag) {
case ARM64riA_I12:
return;
case ARM64riA_R:
addHRegUse(u, HRmRead, riA->ARM64riA.R.reg);
return;
default:
vpanic("addRegUsage_ARM64RIA");
}
}
static void mapRegs_ARM64RIA ( HRegRemap* m, ARM64RIA* riA ) {
switch (riA->tag) {
case ARM64riA_I12:
return;
case ARM64riA_R:
riA->ARM64riA.R.reg = lookupHRegRemap(m, riA->ARM64riA.R.reg);
return;
default:
vpanic("mapRegs_ARM64RIA");
}
}
/* --------- Reg or "bitfield" (logic immediate) operands --------- */
ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) {
ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
riL->tag = ARM64riL_I13;
riL->ARM64riL.I13.bitN = bitN;
riL->ARM64riL.I13.immR = immR;
riL->ARM64riL.I13.immS = immS;
vassert(bitN < 2);
vassert(immR < 64);
vassert(immS < 64);
return riL;
}
ARM64RIL* ARM64RIL_R ( HReg reg ) {
ARM64RIL* riL = LibVEX_Alloc(sizeof(ARM64RIL));
riL->tag = ARM64riL_R;
riL->ARM64riL.R.reg = reg;
return riL;
}
static void ppARM64RIL ( ARM64RIL* riL ) {
switch (riL->tag) {
case ARM64riL_I13:
vex_printf("#nrs(%u,%u,%u)",
(UInt)riL->ARM64riL.I13.bitN,
(UInt)riL->ARM64riL.I13.immR,
(UInt)riL->ARM64riL.I13.immS);
break;
case ARM64riL_R:
ppHRegARM64(riL->ARM64riL.R.reg);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARM64RIL ( HRegUsage* u, ARM64RIL* riL ) {
switch (riL->tag) {
case ARM64riL_I13:
return;
case ARM64riL_R:
addHRegUse(u, HRmRead, riL->ARM64riL.R.reg);
return;
default:
vpanic("addRegUsage_ARM64RIL");
}
}
static void mapRegs_ARM64RIL ( HRegRemap* m, ARM64RIL* riL ) {
switch (riL->tag) {
case ARM64riL_I13:
return;
case ARM64riL_R:
riL->ARM64riL.R.reg = lookupHRegRemap(m, riL->ARM64riL.R.reg);
return;
default:
vpanic("mapRegs_ARM64RIL");
}
}
/* --------------- Reg or uimm6 operands --------------- */
ARM64RI6* ARM64RI6_I6 ( UInt imm6 ) {
ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_I6;
ri6->ARM64ri6.I6.imm6 = imm6;
vassert(imm6 > 0 && imm6 < 64);
return ri6;
}
ARM64RI6* ARM64RI6_R ( HReg reg ) {
ARM64RI6* ri6 = LibVEX_Alloc(sizeof(ARM64RI6));
ri6->tag = ARM64ri6_R;
ri6->ARM64ri6.R.reg = reg;
return ri6;
}
static void ppARM64RI6 ( ARM64RI6* ri6 ) {
switch (ri6->tag) {
case ARM64ri6_I6:
vex_printf("#%u", ri6->ARM64ri6.I6.imm6);
break;
case ARM64ri6_R:
ppHRegARM64(ri6->ARM64ri6.R.reg);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARM64RI6 ( HRegUsage* u, ARM64RI6* ri6 ) {
switch (ri6->tag) {
case ARM64ri6_I6:
return;
case ARM64ri6_R:
addHRegUse(u, HRmRead, ri6->ARM64ri6.R.reg);
return;
default:
vpanic("addRegUsage_ARM64RI6");
}
}
static void mapRegs_ARM64RI6 ( HRegRemap* m, ARM64RI6* ri6 ) {
switch (ri6->tag) {
case ARM64ri6_I6:
return;
case ARM64ri6_R:
ri6->ARM64ri6.R.reg = lookupHRegRemap(m, ri6->ARM64ri6.R.reg);
return;
default:
vpanic("mapRegs_ARM64RI6");
}
}
//ZZ /* -------- Neon Immediate operatnd --------- */
//ZZ
//ZZ ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
//ZZ ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm));
//ZZ i->type = type;
//ZZ i->imm8 = imm8;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ULong ARMNImm_to_Imm64 ( ARMNImm* imm ) {
//ZZ int i, j;
//ZZ ULong y, x = imm->imm8;
//ZZ switch (imm->type) {
//ZZ case 3:
//ZZ x = x << 8; /* fallthrough */
//ZZ case 2:
//ZZ x = x << 8; /* fallthrough */
//ZZ case 1:
//ZZ x = x << 8; /* fallthrough */
//ZZ case 0:
//ZZ return (x << 32) | x;
//ZZ case 5:
//ZZ case 6:
//ZZ if (imm->type == 5)
//ZZ x = x << 8;
//ZZ else
//ZZ x = (x << 8) | x;
//ZZ /* fallthrough */
//ZZ case 4:
//ZZ x = (x << 16) | x;
//ZZ return (x << 32) | x;
//ZZ case 8:
//ZZ x = (x << 8) | 0xFF;
//ZZ /* fallthrough */
//ZZ case 7:
//ZZ x = (x << 8) | 0xFF;
//ZZ return (x << 32) | x;
//ZZ case 9:
//ZZ x = 0;
//ZZ for (i = 7; i >= 0; i--) {
//ZZ y = ((ULong)imm->imm8 >> i) & 1;
//ZZ for (j = 0; j < 8; j++) {
//ZZ x = (x << 1) | y;
//ZZ }
//ZZ }
//ZZ return x;
//ZZ case 10:
//ZZ x |= (x & 0x80) << 5;
//ZZ x |= (~x & 0x40) << 5;
//ZZ x &= 0x187F; /* 0001 1000 0111 1111 */
//ZZ x |= (x & 0x40) << 4;
//ZZ x |= (x & 0x40) << 3;
//ZZ x |= (x & 0x40) << 2;
//ZZ x |= (x & 0x40) << 1;
//ZZ x = x << 19;
//ZZ x = (x << 32) | x;
//ZZ return x;
//ZZ default:
//ZZ vpanic("ARMNImm_to_Imm64");
//ZZ }
//ZZ }
//ZZ
//ZZ ARMNImm* Imm64_to_ARMNImm ( ULong x ) {
//ZZ ARMNImm tmp;
//ZZ if ((x & 0xFFFFFFFF) == (x >> 32)) {
//ZZ if ((x & 0xFFFFFF00) == 0)
//ZZ return ARMNImm_TI(0, x & 0xFF);
//ZZ if ((x & 0xFFFF00FF) == 0)
//ZZ return ARMNImm_TI(1, (x >> 8) & 0xFF);
//ZZ if ((x & 0xFF00FFFF) == 0)
//ZZ return ARMNImm_TI(2, (x >> 16) & 0xFF);
//ZZ if ((x & 0x00FFFFFF) == 0)
//ZZ return ARMNImm_TI(3, (x >> 24) & 0xFF);
//ZZ if ((x & 0xFFFF00FF) == 0xFF)
//ZZ return ARMNImm_TI(7, (x >> 8) & 0xFF);
//ZZ if ((x & 0xFF00FFFF) == 0xFFFF)
//ZZ return ARMNImm_TI(8, (x >> 16) & 0xFF);
//ZZ if ((x & 0xFFFF) == ((x >> 16) & 0xFFFF)) {
//ZZ if ((x & 0xFF00) == 0)
//ZZ return ARMNImm_TI(4, x & 0xFF);
//ZZ if ((x & 0x00FF) == 0)
//ZZ return ARMNImm_TI(5, (x >> 8) & 0xFF);
//ZZ if ((x & 0xFF) == ((x >> 8) & 0xFF))
//ZZ return ARMNImm_TI(6, x & 0xFF);
//ZZ }
//ZZ if ((x & 0x7FFFF) == 0) {
//ZZ tmp.type = 10;
//ZZ tmp.imm8 = ((x >> 19) & 0x7F) | ((x >> 24) & 0x80);
//ZZ if (ARMNImm_to_Imm64(&tmp) == x)
//ZZ return ARMNImm_TI(tmp.type, tmp.imm8);
//ZZ }
//ZZ } else {
//ZZ /* This can only be type 9. */
//ZZ tmp.imm8 = (((x >> 56) & 1) << 7)
//ZZ | (((x >> 48) & 1) << 6)
//ZZ | (((x >> 40) & 1) << 5)
//ZZ | (((x >> 32) & 1) << 4)
//ZZ | (((x >> 24) & 1) << 3)
//ZZ | (((x >> 16) & 1) << 2)
//ZZ | (((x >> 8) & 1) << 1)
//ZZ | (((x >> 0) & 1) << 0);
//ZZ tmp.type = 9;
//ZZ if (ARMNImm_to_Imm64 (&tmp) == x)
//ZZ return ARMNImm_TI(tmp.type, tmp.imm8);
//ZZ }
//ZZ return NULL;
//ZZ }
//ZZ
//ZZ void ppARMNImm (ARMNImm* i) {
//ZZ ULong x = ARMNImm_to_Imm64(i);
//ZZ vex_printf("0x%llX%llX", x, x);
//ZZ }
//ZZ
//ZZ /* -- Register or scalar operand --- */
//ZZ
//ZZ ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
//ZZ {
//ZZ ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS));
//ZZ p->tag = tag;
//ZZ p->reg = reg;
//ZZ p->index = index;
//ZZ return p;
//ZZ }
//ZZ
//ZZ void ppARMNRS(ARMNRS *p)
//ZZ {
//ZZ ppHRegARM(p->reg);
//ZZ if (p->tag == ARMNRS_Scalar) {
//ZZ vex_printf("[%d]", p->index);
//ZZ }
//ZZ }
/* --------- Instructions. --------- */
static const HChar* showARM64LogicOp ( ARM64LogicOp op ) {
switch (op) {
case ARM64lo_AND: return "and";
case ARM64lo_OR: return "orr";
case ARM64lo_XOR: return "eor";
default: vpanic("showARM64LogicOp");
}
}
static const HChar* showARM64ShiftOp ( ARM64ShiftOp op ) {
switch (op) {
case ARM64sh_SHL: return "lsl";
case ARM64sh_SHR: return "lsr";
case ARM64sh_SAR: return "asr";
default: vpanic("showARM64ShiftOp");
}
}
static const HChar* showARM64UnaryOp ( ARM64UnaryOp op ) {
switch (op) {
case ARM64un_NEG: return "neg";
case ARM64un_NOT: return "not";
case ARM64un_CLZ: return "clz";
default: vpanic("showARM64UnaryOp");
}
}
static const HChar* showARM64MulOp ( ARM64MulOp op ) {
switch (op) {
case ARM64mul_PLAIN: return "mul ";
case ARM64mul_ZX: return "umulh";
case ARM64mul_SX: return "smulh";
default: vpanic("showARM64MulOp");
}
}
static void characteriseARM64CvtOp ( /*OUT*/HChar* syn,
/*OUT*/UInt* fszB, /*OUT*/UInt* iszB,
ARM64CvtOp op ) {
switch (op) {
case ARM64cvt_F32_I32S:
*syn = 's'; *fszB = 4; *iszB = 4; break;
case ARM64cvt_F64_I32S:
*syn = 's'; *fszB = 8; *iszB = 4; break;
case ARM64cvt_F32_I64S:
*syn = 's'; *fszB = 4; *iszB = 8; break;
case ARM64cvt_F64_I64S:
*syn = 's'; *fszB = 8; *iszB = 8; break;
case ARM64cvt_F32_I32U:
*syn = 'u'; *fszB = 4; *iszB = 4; break;
case ARM64cvt_F64_I32U:
*syn = 'u'; *fszB = 8; *iszB = 4; break;
case ARM64cvt_F32_I64U:
*syn = 'u'; *fszB = 4; *iszB = 8; break;
case ARM64cvt_F64_I64U:
*syn = 'u'; *fszB = 8; *iszB = 8; break;
default:
vpanic("characteriseARM64CvtOp");
}
}
static const HChar* showARM64FpBinOp ( ARM64FpBinOp op ) {
switch (op) {
case ARM64fpb_ADD: return "add";
case ARM64fpb_SUB: return "sub";
case ARM64fpb_MUL: return "mul";
case ARM64fpb_DIV: return "div";
default: vpanic("showARM64FpBinOp");
}
}
static const HChar* showARM64FpUnaryOp ( ARM64FpUnaryOp op ) {
switch (op) {
case ARM64fpu_NEG: return "neg ";
case ARM64fpu_ABS: return "abs ";
case ARM64fpu_SQRT: return "sqrt ";
case ARM64fpu_RINT: return "rinti";
default: vpanic("showARM64FpUnaryOp");
}
}
static void showARM64VecBinOp(/*OUT*/const HChar** nm,
/*OUT*/const HChar** ar, ARM64VecBinOp op ) {
switch (op) {
case ARM64vecb_ADD64x2: *nm = "add "; *ar = "2d"; return;
case ARM64vecb_ADD32x4: *nm = "add "; *ar = "4s"; return;
case ARM64vecb_ADD16x8: *nm = "add "; *ar = "8h"; return;
case ARM64vecb_ADD8x16: *nm = "add "; *ar = "16b"; return;
case ARM64vecb_SUB64x2: *nm = "sub "; *ar = "2d"; return;
case ARM64vecb_SUB32x4: *nm = "sub "; *ar = "4s"; return;
case ARM64vecb_SUB16x8: *nm = "sub "; *ar = "8h"; return;
case ARM64vecb_SUB8x16: *nm = "sub "; *ar = "16b"; return;
case ARM64vecb_MUL32x4: *nm = "mul "; *ar = "4s"; return;
case ARM64vecb_MUL16x8: *nm = "mul "; *ar = "8h"; return;
case ARM64vecb_MUL8x16: *nm = "mul "; *ar = "16b"; return;
case ARM64vecb_FADD64x2: *nm = "fadd "; *ar = "2d"; return;
case ARM64vecb_FSUB64x2: *nm = "fsub "; *ar = "2d"; return;
case ARM64vecb_FMUL64x2: *nm = "fmul "; *ar = "2d"; return;
case ARM64vecb_FDIV64x2: *nm = "fdiv "; *ar = "2d"; return;
case ARM64vecb_FADD32x4: *nm = "fadd "; *ar = "4s"; return;
case ARM64vecb_FSUB32x4: *nm = "fsub "; *ar = "4s"; return;
case ARM64vecb_FMUL32x4: *nm = "fmul "; *ar = "4s"; return;
case ARM64vecb_FDIV32x4: *nm = "fdiv "; *ar = "4s"; return;
case ARM64vecb_UMAX32x4: *nm = "umax "; *ar = "4s"; return;
case ARM64vecb_UMAX16x8: *nm = "umax "; *ar = "8h"; return;
case ARM64vecb_UMAX8x16: *nm = "umax "; *ar = "16b"; return;
case ARM64vecb_UMIN32x4: *nm = "umin "; *ar = "4s"; return;
case ARM64vecb_UMIN16x8: *nm = "umin "; *ar = "8h"; return;
case ARM64vecb_UMIN8x16: *nm = "umin "; *ar = "16b"; return;
case ARM64vecb_SMAX32x4: *nm = "smax "; *ar = "4s"; return;
case ARM64vecb_SMAX16x8: *nm = "smax "; *ar = "8h"; return;
case ARM64vecb_SMAX8x16: *nm = "smax "; *ar = "16b"; return;
case ARM64vecb_SMIN32x4: *nm = "smin "; *ar = "4s"; return;
case ARM64vecb_SMIN16x8: *nm = "smin "; *ar = "8h"; return;
case ARM64vecb_SMIN8x16: *nm = "smin "; *ar = "16b"; return;
case ARM64vecb_AND: *nm = "and "; *ar = "16b"; return;
case ARM64vecb_ORR: *nm = "orr "; *ar = "16b"; return;
case ARM64vecb_XOR: *nm = "eor "; *ar = "16b"; return;
case ARM64vecb_CMEQ64x2: *nm = "cmeq "; *ar = "2d"; return;
case ARM64vecb_CMEQ32x4: *nm = "cmeq "; *ar = "4s"; return;
case ARM64vecb_CMEQ16x8: *nm = "cmeq "; *ar = "8h"; return;
case ARM64vecb_CMEQ8x16: *nm = "cmeq "; *ar = "16b"; return;
case ARM64vecb_CMHI64x2: *nm = "cmhi "; *ar = "2d"; return;
case ARM64vecb_CMHI32x4: *nm = "cmhi "; *ar = "4s"; return;
case ARM64vecb_CMHI16x8: *nm = "cmhi "; *ar = "8h"; return;
case ARM64vecb_CMHI8x16: *nm = "cmhi "; *ar = "16b"; return;
case ARM64vecb_CMGT64x2: *nm = "cmgt "; *ar = "2d"; return;
case ARM64vecb_CMGT32x4: *nm = "cmgt "; *ar = "4s"; return;
case ARM64vecb_CMGT16x8: *nm = "cmgt "; *ar = "8h"; return;
case ARM64vecb_CMGT8x16: *nm = "cmgt "; *ar = "16b"; return;
case ARM64vecb_FCMEQ64x2: *nm = "fcmeq"; *ar = "2d"; return;
case ARM64vecb_FCMEQ32x4: *nm = "fcmeq"; *ar = "4s"; return;
case ARM64vecb_FCMGE64x2: *nm = "fcmge"; *ar = "2d"; return;
case ARM64vecb_FCMGE32x4: *nm = "fcmge"; *ar = "4s"; return;
case ARM64vecb_FCMGT64x2: *nm = "fcmgt"; *ar = "2d"; return;
case ARM64vecb_FCMGT32x4: *nm = "fcmgt"; *ar = "4s"; return;
case ARM64vecb_TBL1: *nm = "tbl "; *ar = "16b"; return;
case ARM64vecb_UZP164x2: *nm = "uzp1 "; *ar = "2d"; return;
case ARM64vecb_UZP132x4: *nm = "uzp1 "; *ar = "4s"; return;
case ARM64vecb_UZP116x8: *nm = "uzp1 "; *ar = "8h"; return;
case ARM64vecb_UZP18x16: *nm = "uzp1 "; *ar = "16b"; return;
case ARM64vecb_UZP264x2: *nm = "uzp2 "; *ar = "2d"; return;
case ARM64vecb_UZP232x4: *nm = "uzp2 "; *ar = "4s"; return;
case ARM64vecb_UZP216x8: *nm = "uzp2 "; *ar = "8h"; return;
case ARM64vecb_UZP28x16: *nm = "uzp2 "; *ar = "16b"; return;
case ARM64vecb_ZIP132x4: *nm = "zip1 "; *ar = "4s"; return;
case ARM64vecb_ZIP116x8: *nm = "zip1 "; *ar = "8h"; return;
case ARM64vecb_ZIP18x16: *nm = "zip1 "; *ar = "16b"; return;
case ARM64vecb_ZIP232x4: *nm = "zip2 "; *ar = "4s"; return;
case ARM64vecb_ZIP216x8: *nm = "zip2 "; *ar = "8h"; return;
case ARM64vecb_ZIP28x16: *nm = "zip2 "; *ar = "16b"; return;
case ARM64vecb_PMUL8x16: *nm = "pmul "; *ar = "16b"; return;
case ARM64vecb_PMULL8x8: *nm = "pmull"; *ar = "8hbb"; return;
case ARM64vecb_UMULL2DSS: *nm = "umull"; *ar = "2dss"; return;
case ARM64vecb_UMULL4SHH: *nm = "umull"; *ar = "4shh"; return;
case ARM64vecb_UMULL8HBB: *nm = "umull"; *ar = "8hbb"; return;
case ARM64vecb_SMULL2DSS: *nm = "smull"; *ar = "2dss"; return;
case ARM64vecb_SMULL4SHH: *nm = "smull"; *ar = "4shh"; return;
case ARM64vecb_SMULL8HBB: *nm = "smull"; *ar = "8hbb"; return;
case ARM64vecb_SQADD64x2: *nm = "sqadd"; *ar = "2d"; return;
case ARM64vecb_SQADD32x4: *nm = "sqadd"; *ar = "4s"; return;
case ARM64vecb_SQADD16x8: *nm = "sqadd"; *ar = "8h"; return;
case ARM64vecb_SQADD8x16: *nm = "sqadd"; *ar = "16b"; return;
case ARM64vecb_UQADD64x2: *nm = "uqadd"; *ar = "2d"; return;
case ARM64vecb_UQADD32x4: *nm = "uqadd"; *ar = "4s"; return;
case ARM64vecb_UQADD16x8: *nm = "uqadd"; *ar = "8h"; return;
case ARM64vecb_UQADD8x16: *nm = "uqadd"; *ar = "16b"; return;
case ARM64vecb_SQSUB64x2: *nm = "sqsub"; *ar = "2d"; return;
case ARM64vecb_SQSUB32x4: *nm = "sqsub"; *ar = "4s"; return;
case ARM64vecb_SQSUB16x8: *nm = "sqsub"; *ar = "8h"; return;
case ARM64vecb_SQSUB8x16: *nm = "sqsub"; *ar = "16b"; return;
case ARM64vecb_UQSUB64x2: *nm = "uqsub"; *ar = "2d"; return;
case ARM64vecb_UQSUB32x4: *nm = "uqsub"; *ar = "4s"; return;
case ARM64vecb_UQSUB16x8: *nm = "uqsub"; *ar = "8h"; return;
case ARM64vecb_UQSUB8x16: *nm = "uqsub"; *ar = "16b"; return;
default: vpanic("showARM64VecBinOp");
}
}
static void showARM64VecUnaryOp(/*OUT*/const HChar** nm,
/*OUT*/const HChar** ar, ARM64VecUnaryOp op )
{
switch (op) {
case ARM64vecu_FNEG64x2: *nm = "fneg "; *ar = "2d"; return;
case ARM64vecu_FNEG32x4: *nm = "fneg "; *ar = "4s"; return;
case ARM64vecu_FABS64x2: *nm = "fabs "; *ar = "2d"; return;
case ARM64vecu_FABS32x4: *nm = "fabs "; *ar = "4s"; return;
case ARM64vecu_NOT: *nm = "not "; *ar = "all"; return;
case ARM64vecu_ABS64x2: *nm = "abs "; *ar = "2d"; return;
case ARM64vecu_ABS32x4: *nm = "abs "; *ar = "4s"; return;
case ARM64vecu_ABS16x8: *nm = "abs "; *ar = "8h"; return;
case ARM64vecu_ABS8x16: *nm = "abs "; *ar = "16b"; return;
case ARM64vecu_CLS32x4: *nm = "cls "; *ar = "4s"; return;
case ARM64vecu_CLS16x8: *nm = "cls "; *ar = "8h"; return;
case ARM64vecu_CLS8x16: *nm = "cls "; *ar = "16b"; return;
case ARM64vecu_CLZ32x4: *nm = "clz "; *ar = "4s"; return;
case ARM64vecu_CLZ16x8: *nm = "clz "; *ar = "8h"; return;
case ARM64vecu_CLZ8x16: *nm = "clz "; *ar = "16b"; return;
case ARM64vecu_CNT8x16: *nm = "cnt "; *ar = "16b"; return;
case ARM64vecu_RBIT: *nm = "rbit "; *ar = "16b"; return;
case ARM64vecu_REV1616B: *nm = "rev16"; *ar = "16b"; return;
case ARM64vecu_REV3216B: *nm = "rev32"; *ar = "16b"; return;
case ARM64vecu_REV328H: *nm = "rev32"; *ar = "8h"; return;
case ARM64vecu_REV6416B: *nm = "rev64"; *ar = "16b"; return;
case ARM64vecu_REV648H: *nm = "rev64"; *ar = "8h"; return;
case ARM64vecu_REV644S: *nm = "rev64"; *ar = "4s"; return;
default: vpanic("showARM64VecUnaryOp");
}
}
static void showARM64VecShiftOp(/*OUT*/const HChar** nm,
/*OUT*/const HChar** ar,
ARM64VecShiftOp op )
{
switch (op) {
case ARM64vecsh_USHR64x2: *nm = "ushr "; *ar = "2d"; return;
case ARM64vecsh_USHR32x4: *nm = "ushr "; *ar = "4s"; return;
case ARM64vecsh_USHR16x8: *nm = "ushr "; *ar = "8h"; return;
case ARM64vecsh_USHR8x16: *nm = "ushr "; *ar = "16b"; return;
case ARM64vecsh_SSHR64x2: *nm = "sshr "; *ar = "2d"; return;
case ARM64vecsh_SSHR32x4: *nm = "sshr "; *ar = "4s"; return;
case ARM64vecsh_SSHR16x8: *nm = "sshr "; *ar = "8h"; return;
case ARM64vecsh_SSHR8x16: *nm = "sshr "; *ar = "16b"; return;
case ARM64vecsh_SHL64x2: *nm = "shl "; *ar = "2d"; return;
case ARM64vecsh_SHL32x4: *nm = "shl "; *ar = "4s"; return;
case ARM64vecsh_SHL16x8: *nm = "shl "; *ar = "8h"; return;
case ARM64vecsh_SHL8x16: *nm = "shl "; *ar = "16b"; return;
default: vpanic("showARM64VecShiftImmOp");
}
}
//ZZ const HChar* showARMNeonBinOp ( ARMNeonBinOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_VAND: return "vand";
//ZZ case ARMneon_VORR: return "vorr";
//ZZ case ARMneon_VXOR: return "veor";
//ZZ case ARMneon_VADD: return "vadd";
//ZZ case ARMneon_VRHADDS: return "vrhadd";
//ZZ case ARMneon_VRHADDU: return "vrhadd";
//ZZ case ARMneon_VADDFP: return "vadd";
//ZZ case ARMneon_VPADDFP: return "vpadd";
//ZZ case ARMneon_VABDFP: return "vabd";
//ZZ case ARMneon_VSUB: return "vsub";
//ZZ case ARMneon_VSUBFP: return "vsub";
//ZZ case ARMneon_VMINU: return "vmin";
//ZZ case ARMneon_VMINS: return "vmin";
//ZZ case ARMneon_VMINF: return "vmin";
//ZZ case ARMneon_VMAXU: return "vmax";
//ZZ case ARMneon_VMAXS: return "vmax";
//ZZ case ARMneon_VMAXF: return "vmax";
//ZZ case ARMneon_VQADDU: return "vqadd";
//ZZ case ARMneon_VQADDS: return "vqadd";
//ZZ case ARMneon_VQSUBU: return "vqsub";
//ZZ case ARMneon_VQSUBS: return "vqsub";
//ZZ case ARMneon_VCGTU: return "vcgt";
//ZZ case ARMneon_VCGTS: return "vcgt";
//ZZ case ARMneon_VCGTF: return "vcgt";
//ZZ case ARMneon_VCGEF: return "vcgt";
//ZZ case ARMneon_VCGEU: return "vcge";
//ZZ case ARMneon_VCGES: return "vcge";
//ZZ case ARMneon_VCEQ: return "vceq";
//ZZ case ARMneon_VCEQF: return "vceq";
//ZZ case ARMneon_VPADD: return "vpadd";
//ZZ case ARMneon_VPMINU: return "vpmin";
//ZZ case ARMneon_VPMINS: return "vpmin";
//ZZ case ARMneon_VPMINF: return "vpmin";
//ZZ case ARMneon_VPMAXU: return "vpmax";
//ZZ case ARMneon_VPMAXS: return "vpmax";
//ZZ case ARMneon_VPMAXF: return "vpmax";
//ZZ case ARMneon_VEXT: return "vext";
//ZZ case ARMneon_VMUL: return "vmuli";
//ZZ case ARMneon_VMULLU: return "vmull";
//ZZ case ARMneon_VMULLS: return "vmull";
//ZZ case ARMneon_VMULP: return "vmul";
//ZZ case ARMneon_VMULFP: return "vmul";
//ZZ case ARMneon_VMULLP: return "vmul";
//ZZ case ARMneon_VQDMULH: return "vqdmulh";
//ZZ case ARMneon_VQRDMULH: return "vqrdmulh";
//ZZ case ARMneon_VQDMULL: return "vqdmull";
//ZZ case ARMneon_VTBL: return "vtbl";
//ZZ case ARMneon_VRECPS: return "vrecps";
//ZZ case ARMneon_VRSQRTS: return "vrecps";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonBinOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonBinOpDataType ( ARMNeonBinOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_VAND:
//ZZ case ARMneon_VORR:
//ZZ case ARMneon_VXOR:
//ZZ return "";
//ZZ case ARMneon_VADD:
//ZZ case ARMneon_VSUB:
//ZZ case ARMneon_VEXT:
//ZZ case ARMneon_VMUL:
//ZZ case ARMneon_VPADD:
//ZZ case ARMneon_VTBL:
//ZZ case ARMneon_VCEQ:
//ZZ return ".i";
//ZZ case ARMneon_VRHADDU:
//ZZ case ARMneon_VMINU:
//ZZ case ARMneon_VMAXU:
//ZZ case ARMneon_VQADDU:
//ZZ case ARMneon_VQSUBU:
//ZZ case ARMneon_VCGTU:
//ZZ case ARMneon_VCGEU:
//ZZ case ARMneon_VMULLU:
//ZZ case ARMneon_VPMINU:
//ZZ case ARMneon_VPMAXU:
//ZZ return ".u";
//ZZ case ARMneon_VRHADDS:
//ZZ case ARMneon_VMINS:
//ZZ case ARMneon_VMAXS:
//ZZ case ARMneon_VQADDS:
//ZZ case ARMneon_VQSUBS:
//ZZ case ARMneon_VCGTS:
//ZZ case ARMneon_VCGES:
//ZZ case ARMneon_VQDMULL:
//ZZ case ARMneon_VMULLS:
//ZZ case ARMneon_VPMINS:
//ZZ case ARMneon_VPMAXS:
//ZZ case ARMneon_VQDMULH:
//ZZ case ARMneon_VQRDMULH:
//ZZ return ".s";
//ZZ case ARMneon_VMULP:
//ZZ case ARMneon_VMULLP:
//ZZ return ".p";
//ZZ case ARMneon_VADDFP:
//ZZ case ARMneon_VABDFP:
//ZZ case ARMneon_VPADDFP:
//ZZ case ARMneon_VSUBFP:
//ZZ case ARMneon_VMULFP:
//ZZ case ARMneon_VMINF:
//ZZ case ARMneon_VMAXF:
//ZZ case ARMneon_VPMINF:
//ZZ case ARMneon_VPMAXF:
//ZZ case ARMneon_VCGTF:
//ZZ case ARMneon_VCGEF:
//ZZ case ARMneon_VCEQF:
//ZZ case ARMneon_VRECPS:
//ZZ case ARMneon_VRSQRTS:
//ZZ return ".f";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonBinOpDataType");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonUnOp ( ARMNeonUnOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_COPY: return "vmov";
//ZZ case ARMneon_COPYLS: return "vmov";
//ZZ case ARMneon_COPYLU: return "vmov";
//ZZ case ARMneon_COPYN: return "vmov";
//ZZ case ARMneon_COPYQNSS: return "vqmovn";
//ZZ case ARMneon_COPYQNUS: return "vqmovun";
//ZZ case ARMneon_COPYQNUU: return "vqmovn";
//ZZ case ARMneon_NOT: return "vmvn";
//ZZ case ARMneon_EQZ: return "vceq";
//ZZ case ARMneon_CNT: return "vcnt";
//ZZ case ARMneon_CLS: return "vcls";
//ZZ case ARMneon_CLZ: return "vclz";
//ZZ case ARMneon_DUP: return "vdup";
//ZZ case ARMneon_PADDLS: return "vpaddl";
//ZZ case ARMneon_PADDLU: return "vpaddl";
//ZZ case ARMneon_VQSHLNSS: return "vqshl";
//ZZ case ARMneon_VQSHLNUU: return "vqshl";
//ZZ case ARMneon_VQSHLNUS: return "vqshlu";
//ZZ case ARMneon_REV16: return "vrev16";
//ZZ case ARMneon_REV32: return "vrev32";
//ZZ case ARMneon_REV64: return "vrev64";
//ZZ case ARMneon_VCVTFtoU: return "vcvt";
//ZZ case ARMneon_VCVTFtoS: return "vcvt";
//ZZ case ARMneon_VCVTUtoF: return "vcvt";
//ZZ case ARMneon_VCVTStoF: return "vcvt";
//ZZ case ARMneon_VCVTFtoFixedU: return "vcvt";
//ZZ case ARMneon_VCVTFtoFixedS: return "vcvt";
//ZZ case ARMneon_VCVTFixedUtoF: return "vcvt";
//ZZ case ARMneon_VCVTFixedStoF: return "vcvt";
//ZZ case ARMneon_VCVTF32toF16: return "vcvt";
//ZZ case ARMneon_VCVTF16toF32: return "vcvt";
//ZZ case ARMneon_VRECIP: return "vrecip";
//ZZ case ARMneon_VRECIPF: return "vrecipf";
//ZZ case ARMneon_VNEGF: return "vneg";
//ZZ case ARMneon_ABS: return "vabs";
//ZZ case ARMneon_VABSFP: return "vabsfp";
//ZZ case ARMneon_VRSQRTEFP: return "vrsqrtefp";
//ZZ case ARMneon_VRSQRTE: return "vrsqrte";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonUnOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonUnOpDataType ( ARMNeonUnOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_COPY:
//ZZ case ARMneon_NOT:
//ZZ return "";
//ZZ case ARMneon_COPYN:
//ZZ case ARMneon_EQZ:
//ZZ case ARMneon_CNT:
//ZZ case ARMneon_DUP:
//ZZ case ARMneon_REV16:
//ZZ case ARMneon_REV32:
//ZZ case ARMneon_REV64:
//ZZ return ".i";
//ZZ case ARMneon_COPYLU:
//ZZ case ARMneon_PADDLU:
//ZZ case ARMneon_COPYQNUU:
//ZZ case ARMneon_VQSHLNUU:
//ZZ case ARMneon_VRECIP:
//ZZ case ARMneon_VRSQRTE:
//ZZ return ".u";
//ZZ case ARMneon_CLS:
//ZZ case ARMneon_CLZ:
//ZZ case ARMneon_COPYLS:
//ZZ case ARMneon_PADDLS:
//ZZ case ARMneon_COPYQNSS:
//ZZ case ARMneon_COPYQNUS:
//ZZ case ARMneon_VQSHLNSS:
//ZZ case ARMneon_VQSHLNUS:
//ZZ case ARMneon_ABS:
//ZZ return ".s";
//ZZ case ARMneon_VRECIPF:
//ZZ case ARMneon_VNEGF:
//ZZ case ARMneon_VABSFP:
//ZZ case ARMneon_VRSQRTEFP:
//ZZ return ".f";
//ZZ case ARMneon_VCVTFtoU: return ".u32.f32";
//ZZ case ARMneon_VCVTFtoS: return ".s32.f32";
//ZZ case ARMneon_VCVTUtoF: return ".f32.u32";
//ZZ case ARMneon_VCVTStoF: return ".f32.s32";
//ZZ case ARMneon_VCVTF16toF32: return ".f32.f16";
//ZZ case ARMneon_VCVTF32toF16: return ".f16.f32";
//ZZ case ARMneon_VCVTFtoFixedU: return ".u32.f32";
//ZZ case ARMneon_VCVTFtoFixedS: return ".s32.f32";
//ZZ case ARMneon_VCVTFixedUtoF: return ".f32.u32";
//ZZ case ARMneon_VCVTFixedStoF: return ".f32.s32";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonUnOpDataType");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonUnOpS ( ARMNeonUnOpS op ) {
//ZZ switch (op) {
//ZZ case ARMneon_SETELEM: return "vmov";
//ZZ case ARMneon_GETELEMU: return "vmov";
//ZZ case ARMneon_GETELEMS: return "vmov";
//ZZ case ARMneon_VDUP: return "vdup";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonUnarySOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonUnOpSDataType ( ARMNeonUnOpS op ) {
//ZZ switch (op) {
//ZZ case ARMneon_SETELEM:
//ZZ case ARMneon_VDUP:
//ZZ return ".i";
//ZZ case ARMneon_GETELEMS:
//ZZ return ".s";
//ZZ case ARMneon_GETELEMU:
//ZZ return ".u";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonUnarySOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonShiftOp ( ARMNeonShiftOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_VSHL: return "vshl";
//ZZ case ARMneon_VSAL: return "vshl";
//ZZ case ARMneon_VQSHL: return "vqshl";
//ZZ case ARMneon_VQSAL: return "vqshl";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonShiftOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonShiftOpDataType ( ARMNeonShiftOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_VSHL:
//ZZ case ARMneon_VQSHL:
//ZZ return ".u";
//ZZ case ARMneon_VSAL:
//ZZ case ARMneon_VQSAL:
//ZZ return ".s";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonShiftOpDataType");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonDualOp ( ARMNeonDualOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_TRN: return "vtrn";
//ZZ case ARMneon_ZIP: return "vzip";
//ZZ case ARMneon_UZP: return "vuzp";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonDualOp");
//ZZ }
//ZZ }
//ZZ
//ZZ const HChar* showARMNeonDualOpDataType ( ARMNeonDualOp op ) {
//ZZ switch (op) {
//ZZ case ARMneon_TRN:
//ZZ case ARMneon_ZIP:
//ZZ case ARMneon_UZP:
//ZZ return "i";
//ZZ /* ... */
//ZZ default: vpanic("showARMNeonDualOp");
//ZZ }
//ZZ }
//ZZ
//ZZ static const HChar* showARMNeonDataSize_wrk ( UInt size )
//ZZ {
//ZZ switch (size) {
//ZZ case 0: return "8";
//ZZ case 1: return "16";
//ZZ case 2: return "32";
//ZZ case 3: return "64";
//ZZ default: vpanic("showARMNeonDataSize");
//ZZ }
//ZZ }
//ZZ
//ZZ static const HChar* showARMNeonDataSize ( ARMInstr* i )
//ZZ {
//ZZ switch (i->tag) {
//ZZ case ARMin_NBinary:
//ZZ if (i->ARMin.NBinary.op == ARMneon_VEXT)
//ZZ return "8";
//ZZ if (i->ARMin.NBinary.op == ARMneon_VAND ||
//ZZ i->ARMin.NBinary.op == ARMneon_VORR ||
//ZZ i->ARMin.NBinary.op == ARMneon_VXOR)
//ZZ return "";
//ZZ return showARMNeonDataSize_wrk(i->ARMin.NBinary.size);
//ZZ case ARMin_NUnary:
//ZZ if (i->ARMin.NUnary.op == ARMneon_COPY ||
//ZZ i->ARMin.NUnary.op == ARMneon_NOT ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTF32toF16||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTF16toF32||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoS ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoU ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTStoF ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTUtoF)
//ZZ return "";
//ZZ if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
//ZZ UInt size;
//ZZ size = i->ARMin.NUnary.size;
//ZZ if (size & 0x40)
//ZZ return "64";
//ZZ if (size & 0x20)
//ZZ return "32";
//ZZ if (size & 0x10)
//ZZ return "16";
//ZZ if (size & 0x08)
//ZZ return "8";
//ZZ vpanic("showARMNeonDataSize");
//ZZ }
//ZZ return showARMNeonDataSize_wrk(i->ARMin.NUnary.size);
//ZZ case ARMin_NUnaryS:
//ZZ if (i->ARMin.NUnaryS.op == ARMneon_VDUP) {
//ZZ int size;
//ZZ size = i->ARMin.NUnaryS.size;
//ZZ if ((size & 1) == 1)
//ZZ return "8";
//ZZ if ((size & 3) == 2)
//ZZ return "16";
//ZZ if ((size & 7) == 4)
//ZZ return "32";
//ZZ vpanic("showARMNeonDataSize");
//ZZ }
//ZZ return showARMNeonDataSize_wrk(i->ARMin.NUnaryS.size);
//ZZ case ARMin_NShift:
//ZZ return showARMNeonDataSize_wrk(i->ARMin.NShift.size);
//ZZ case ARMin_NDual:
//ZZ return showARMNeonDataSize_wrk(i->ARMin.NDual.size);
//ZZ default:
//ZZ vpanic("showARMNeonDataSize");
//ZZ }
//ZZ }
ARM64Instr* ARM64Instr_Arith ( HReg dst,
HReg argL, ARM64RIA* argR, Bool isAdd ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Arith;
i->ARM64in.Arith.dst = dst;
i->ARM64in.Arith.argL = argL;
i->ARM64in.Arith.argR = argR;
i->ARM64in.Arith.isAdd = isAdd;
return i;
}
ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Cmp;
i->ARM64in.Cmp.argL = argL;
i->ARM64in.Cmp.argR = argR;
i->ARM64in.Cmp.is64 = is64;
return i;
}
ARM64Instr* ARM64Instr_Logic ( HReg dst,
HReg argL, ARM64RIL* argR, ARM64LogicOp op ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Logic;
i->ARM64in.Logic.dst = dst;
i->ARM64in.Logic.argL = argL;
i->ARM64in.Logic.argR = argR;
i->ARM64in.Logic.op = op;
return i;
}
ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Test;
i->ARM64in.Test.argL = argL;
i->ARM64in.Test.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_Shift ( HReg dst,
HReg argL, ARM64RI6* argR, ARM64ShiftOp op ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Shift;
i->ARM64in.Shift.dst = dst;
i->ARM64in.Shift.argL = argL;
i->ARM64in.Shift.argR = argR;
i->ARM64in.Shift.op = op;
return i;
}
ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Unary;
i->ARM64in.Unary.dst = dst;
i->ARM64in.Unary.src = src;
i->ARM64in.Unary.op = op;
return i;
}
ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_MovI;
i->ARM64in.MovI.dst = dst;
i->ARM64in.MovI.src = src;
vassert(hregClass(src) == HRcInt64);
vassert(hregClass(dst) == HRcInt64);
return i;
}
ARM64Instr* ARM64Instr_Imm64 ( HReg dst, ULong imm64 ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Imm64;
i->ARM64in.Imm64.dst = dst;
i->ARM64in.Imm64.imm64 = imm64;
return i;
}
ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt64;
i->ARM64in.LdSt64.isLoad = isLoad;
i->ARM64in.LdSt64.rD = rD;
i->ARM64in.LdSt64.amode = amode;
return i;
}
ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt32;
i->ARM64in.LdSt32.isLoad = isLoad;
i->ARM64in.LdSt32.rD = rD;
i->ARM64in.LdSt32.amode = amode;
return i;
}
ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt16;
i->ARM64in.LdSt16.isLoad = isLoad;
i->ARM64in.LdSt16.rD = rD;
i->ARM64in.LdSt16.amode = amode;
return i;
}
ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_LdSt8;
i->ARM64in.LdSt8.isLoad = isLoad;
i->ARM64in.LdSt8.rD = rD;
i->ARM64in.LdSt8.amode = amode;
return i;
}
ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC,
ARM64CondCode cond, Bool toFastEP ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_XDirect;
i->ARM64in.XDirect.dstGA = dstGA;
i->ARM64in.XDirect.amPC = amPC;
i->ARM64in.XDirect.cond = cond;
i->ARM64in.XDirect.toFastEP = toFastEP;
return i;
}
ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_XIndir;
i->ARM64in.XIndir.dstGA = dstGA;
i->ARM64in.XIndir.amPC = amPC;
i->ARM64in.XIndir.cond = cond;
return i;
}
ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC,
ARM64CondCode cond, IRJumpKind jk ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_XAssisted;
i->ARM64in.XAssisted.dstGA = dstGA;
i->ARM64in.XAssisted.amPC = amPC;
i->ARM64in.XAssisted.cond = cond;
i->ARM64in.XAssisted.jk = jk;
return i;
}
ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR,
ARM64CondCode cond ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_CSel;
i->ARM64in.CSel.dst = dst;
i->ARM64in.CSel.argL = argL;
i->ARM64in.CSel.argR = argR;
i->ARM64in.CSel.cond = cond;
return i;
}
ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, HWord target, Int nArgRegs,
RetLoc rloc ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Call;
i->ARM64in.Call.cond = cond;
i->ARM64in.Call.target = target;
i->ARM64in.Call.nArgRegs = nArgRegs;
i->ARM64in.Call.rloc = rloc;
vassert(is_sane_RetLoc(rloc));
return i;
}
extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_AddToSP;
i->ARM64in.AddToSP.simm = simm;
vassert(-4096 < simm && simm < 4096);
vassert(0 == (simm & 0xF));
return i;
}
extern ARM64Instr* ARM64Instr_FromSP ( HReg dst ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_FromSP;
i->ARM64in.FromSP.dst = dst;
return i;
}
ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR,
ARM64MulOp op ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_Mul;
i->ARM64in.Mul.dst = dst;
i->ARM64in.Mul.argL = argL;
i->ARM64in.Mul.argR = argR;
i->ARM64in.Mul.op = op;
return i;
}
ARM64Instr* ARM64Instr_LdrEX ( Int szB ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_LdrEX;
i->ARM64in.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_StrEX ( Int szB ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_StrEX;
i->ARM64in.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARM64Instr* ARM64Instr_MFence ( void ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_MFence;
return i;
}
//ZZ ARM64Instr* ARM64Instr_CLREX( void ) {
//ZZ ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
//ZZ i->tag = ARM64in_CLREX;
//ZZ return i;
//ZZ }
ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStS;
i->ARM64in.VLdStS.isLoad = isLoad;
i->ARM64in.VLdStS.sD = sD;
i->ARM64in.VLdStS.rN = rN;
i->ARM64in.VLdStS.uimm12 = uimm12;
vassert(uimm12 < 16384 && 0 == (uimm12 & 3));
return i;
}
ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStD;
i->ARM64in.VLdStD.isLoad = isLoad;
i->ARM64in.VLdStD.dD = dD;
i->ARM64in.VLdStD.rN = rN;
i->ARM64in.VLdStD.uimm12 = uimm12;
vassert(uimm12 < 32768 && 0 == (uimm12 & 7));
return i;
}
ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VLdStQ;
i->ARM64in.VLdStQ.isLoad = isLoad;
i->ARM64in.VLdStQ.rQ = rQ;
i->ARM64in.VLdStQ.rN = rN;
return i;
}
ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtI2F;
i->ARM64in.VCvtI2F.how = how;
i->ARM64in.VCvtI2F.rD = rD;
i->ARM64in.VCvtI2F.rS = rS;
return i;
}
ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS,
UChar armRM ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtF2I;
i->ARM64in.VCvtF2I.how = how;
i->ARM64in.VCvtF2I.rD = rD;
i->ARM64in.VCvtF2I.rS = rS;
i->ARM64in.VCvtF2I.armRM = armRM;
vassert(armRM <= 3);
return i;
}
ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VCvtSD;
i->ARM64in.VCvtSD.sToD = sToD;
i->ARM64in.VCvtSD.dst = dst;
i->ARM64in.VCvtSD.src = src;
return i;
}
ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryD;
i->ARM64in.VUnaryD.op = op;
i->ARM64in.VUnaryD.dst = dst;
i->ARM64in.VUnaryD.src = src;
return i;
}
ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryS;
i->ARM64in.VUnaryS.op = op;
i->ARM64in.VUnaryS.dst = dst;
i->ARM64in.VUnaryS.src = src;
return i;
}
ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VBinD;
i->ARM64in.VBinD.op = op;
i->ARM64in.VBinD.dst = dst;
i->ARM64in.VBinD.argL = argL;
i->ARM64in.VBinD.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op,
HReg dst, HReg argL, HReg argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VBinS;
i->ARM64in.VBinS.op = op;
i->ARM64in.VBinS.dst = dst;
i->ARM64in.VBinS.argL = argL;
i->ARM64in.VBinS.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VCmpD ( HReg argL, HReg argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpD;
i->ARM64in.VCmpD.argL = argL;
i->ARM64in.VCmpD.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VCmpS;
i->ARM64in.VCmpS.argL = argL;
i->ARM64in.VCmpS.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_FPCR ( Bool toFPCR, HReg iReg ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_FPCR;
i->ARM64in.FPCR.toFPCR = toFPCR;
i->ARM64in.FPCR.iReg = iReg;
return i;
}
ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op,
HReg dst, HReg argL, HReg argR ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VBinV;
i->ARM64in.VBinV.op = op;
i->ARM64in.VBinV.dst = dst;
i->ARM64in.VBinV.argL = argL;
i->ARM64in.VBinV.argR = argR;
return i;
}
ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VUnaryV;
i->ARM64in.VUnaryV.op = op;
i->ARM64in.VUnaryV.dst = dst;
i->ARM64in.VUnaryV.arg = arg;
return i;
}
ARM64Instr* ARM64Instr_VNarrowV ( UInt dszBlg2, HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VNarrowV;
i->ARM64in.VNarrowV.dszBlg2 = dszBlg2;
i->ARM64in.VNarrowV.dst = dst;
i->ARM64in.VNarrowV.src = src;
vassert(dszBlg2 == 0 || dszBlg2 == 1 || dszBlg2 == 2);
return i;
}
ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftOp op,
HReg dst, HReg src, UInt amt ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VShiftImmV;
i->ARM64in.VShiftImmV.op = op;
i->ARM64in.VShiftImmV.dst = dst;
i->ARM64in.VShiftImmV.src = src;
i->ARM64in.VShiftImmV.amt = amt;
UInt maxSh = 0;
switch (op) {
case ARM64vecsh_USHR64x2: case ARM64vecsh_SSHR64x2:
case ARM64vecsh_SHL64x2:
maxSh = 63; break;
case ARM64vecsh_USHR32x4: case ARM64vecsh_SSHR32x4:
case ARM64vecsh_SHL32x4:
maxSh = 31; break;
case ARM64vecsh_USHR16x8: case ARM64vecsh_SSHR16x8:
case ARM64vecsh_SHL16x8:
maxSh = 15; break;
case ARM64vecsh_USHR8x16: case ARM64vecsh_SSHR8x16:
case ARM64vecsh_SHL8x16:
maxSh = 7; break;
default:
vassert(0);
}
vassert(maxSh > 0);
vassert(amt > 0 && amt <= maxSh);
return i;
}
ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VExtV;
i->ARM64in.VExtV.dst = dst;
i->ARM64in.VExtV.srcLo = srcLo;
i->ARM64in.VExtV.srcHi = srcHi;
i->ARM64in.VExtV.amtB = amtB;
vassert(amtB >= 1 && amtB <= 15);
return i;
}
//ZZ ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VAluS;
//ZZ i->ARMin.VAluS.op = op;
//ZZ i->ARMin.VAluS.dst = dst;
//ZZ i->ARMin.VAluS.argL = argL;
//ZZ i->ARMin.VAluS.argR = argR;
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VCMovD;
//ZZ i->ARMin.VCMovD.cond = cond;
//ZZ i->ARMin.VCMovD.dst = dst;
//ZZ i->ARMin.VCMovD.src = src;
//ZZ vassert(cond != ARMcc_AL);
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VCMovS;
//ZZ i->ARMin.VCMovS.cond = cond;
//ZZ i->ARMin.VCMovS.dst = dst;
//ZZ i->ARMin.VCMovS.src = src;
//ZZ vassert(cond != ARMcc_AL);
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VXferD;
//ZZ i->ARMin.VXferD.toD = toD;
//ZZ i->ARMin.VXferD.dD = dD;
//ZZ i->ARMin.VXferD.rHi = rHi;
//ZZ i->ARMin.VXferD.rLo = rLo;
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VXferS;
//ZZ i->ARMin.VXferS.toS = toS;
//ZZ i->ARMin.VXferS.fD = fD;
//ZZ i->ARMin.VXferS.rLo = rLo;
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
//ZZ HReg dst, HReg src ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_VCvtID;
//ZZ i->ARMin.VCvtID.iToD = iToD;
//ZZ i->ARMin.VCvtID.syned = syned;
//ZZ i->ARMin.VCvtID.dst = dst;
//ZZ i->ARMin.VCvtID.src = src;
//ZZ return i;
//ZZ }
//ZZ ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NLdStD;
//ZZ i->ARMin.NLdStD.isLoad = isLoad;
//ZZ i->ARMin.NLdStD.dD = dD;
//ZZ i->ARMin.NLdStD.amode = amode;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
//ZZ UInt size, Bool Q ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NUnary;
//ZZ i->ARMin.NUnary.op = op;
//ZZ i->ARMin.NUnary.src = nQ;
//ZZ i->ARMin.NUnary.dst = dQ;
//ZZ i->ARMin.NUnary.size = size;
//ZZ i->ARMin.NUnary.Q = Q;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src,
//ZZ UInt size, Bool Q ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NUnaryS;
//ZZ i->ARMin.NUnaryS.op = op;
//ZZ i->ARMin.NUnaryS.src = src;
//ZZ i->ARMin.NUnaryS.dst = dst;
//ZZ i->ARMin.NUnaryS.size = size;
//ZZ i->ARMin.NUnaryS.Q = Q;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ,
//ZZ UInt size, Bool Q ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NDual;
//ZZ i->ARMin.NDual.op = op;
//ZZ i->ARMin.NDual.arg1 = nQ;
//ZZ i->ARMin.NDual.arg2 = mQ;
//ZZ i->ARMin.NDual.size = size;
//ZZ i->ARMin.NDual.Q = Q;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op,
//ZZ HReg dst, HReg argL, HReg argR,
//ZZ UInt size, Bool Q ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NBinary;
//ZZ i->ARMin.NBinary.op = op;
//ZZ i->ARMin.NBinary.argL = argL;
//ZZ i->ARMin.NBinary.argR = argR;
//ZZ i->ARMin.NBinary.dst = dst;
//ZZ i->ARMin.NBinary.size = size;
//ZZ i->ARMin.NBinary.Q = Q;
//ZZ return i;
//ZZ }
ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VImmQ;
i->ARM64in.VImmQ.rQ = rQ;
i->ARM64in.VImmQ.imm = imm;
return i;
}
ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VDfromX;
i->ARM64in.VDfromX.rD = rD;
i->ARM64in.VDfromX.rX = rX;
return i;
}
ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VQfromXX;
i->ARM64in.VQfromXX.rQ = rQ;
i->ARM64in.VQfromXX.rXhi = rXhi;
i->ARM64in.VQfromXX.rXlo = rXlo;
return i;
}
ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromQ;
i->ARM64in.VXfromQ.rX = rX;
i->ARM64in.VXfromQ.rQ = rQ;
i->ARM64in.VXfromQ.laneNo = laneNo;
vassert(laneNo <= 1);
return i;
}
ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VXfromDorS;
i->ARM64in.VXfromDorS.rX = rX;
i->ARM64in.VXfromDorS.rDorS = rDorS;
i->ARM64in.VXfromDorS.fromD = fromD;
return i;
}
ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_VMov;
i->ARM64in.VMov.szB = szB;
i->ARM64in.VMov.dst = dst;
i->ARM64in.VMov.src = src;
switch (szB) {
case 16:
vassert(hregClass(src) == HRcVec128);
vassert(hregClass(dst) == HRcVec128);
break;
case 8:
vassert(hregClass(src) == HRcFlt64);
vassert(hregClass(dst) == HRcFlt64);
break;
default:
vpanic("ARM64Instr_VMov");
}
return i;
}
//ZZ ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NCMovQ;
//ZZ i->ARMin.NCMovQ.cond = cond;
//ZZ i->ARMin.NCMovQ.dst = dst;
//ZZ i->ARMin.NCMovQ.src = src;
//ZZ vassert(cond != ARMcc_AL);
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op,
//ZZ HReg dst, HReg argL, HReg argR,
//ZZ UInt size, Bool Q ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NShift;
//ZZ i->ARMin.NShift.op = op;
//ZZ i->ARMin.NShift.argL = argL;
//ZZ i->ARMin.NShift.argR = argR;
//ZZ i->ARMin.NShift.dst = dst;
//ZZ i->ARMin.NShift.size = size;
//ZZ i->ARMin.NShift.Q = Q;
//ZZ return i;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt )
//ZZ {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_NShl64;
//ZZ i->ARMin.NShl64.dst = dst;
//ZZ i->ARMin.NShl64.src = src;
//ZZ i->ARMin.NShl64.amt = amt;
//ZZ vassert(amt >= 1 && amt <= 63);
//ZZ return i;
//ZZ }
//ZZ
//ZZ /* Helper copy-pasted from isel.c */
//ZZ static Bool fitsIn8x4 ( UInt* u8, UInt* u4, UInt u )
//ZZ {
//ZZ UInt i;
//ZZ for (i = 0; i < 16; i++) {
//ZZ if (0 == (u & 0xFFFFFF00)) {
//ZZ *u8 = u;
//ZZ *u4 = i;
//ZZ return True;
//ZZ }
//ZZ u = ROR32(u, 30);
//ZZ }
//ZZ vassert(i == 16);
//ZZ return False;
//ZZ }
//ZZ
//ZZ ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) {
//ZZ UInt u8, u4;
//ZZ ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ /* Try to generate single ADD if possible */
//ZZ if (fitsIn8x4(&u8, &u4, imm32)) {
//ZZ i->tag = ARMin_Alu;
//ZZ i->ARMin.Alu.op = ARMalu_ADD;
//ZZ i->ARMin.Alu.dst = rD;
//ZZ i->ARMin.Alu.argL = rN;
//ZZ i->ARMin.Alu.argR = ARMRI84_I84(u8, u4);
//ZZ } else {
//ZZ i->tag = ARMin_Add32;
//ZZ i->ARMin.Add32.rD = rD;
//ZZ i->ARMin.Add32.rN = rN;
//ZZ i->ARMin.Add32.imm32 = imm32;
//ZZ }
//ZZ return i;
//ZZ }
ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
ARM64AMode* amFailAddr ) {
ARM64Instr* i = LibVEX_Alloc(sizeof(ARM64Instr));
i->tag = ARM64in_EvCheck;
i->ARM64in.EvCheck.amCounter = amCounter;
i->ARM64in.EvCheck.amFailAddr = amFailAddr;
return i;
}
//ZZ ARMInstr* ARMInstr_ProfInc ( void ) {
//ZZ ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
//ZZ i->tag = ARMin_ProfInc;
//ZZ return i;
//ZZ }
/* ... */
void ppARM64Instr ( ARM64Instr* i ) {
switch (i->tag) {
case ARM64in_Arith:
vex_printf("%s ", i->ARM64in.Arith.isAdd ? "add" : "sub");
ppHRegARM64(i->ARM64in.Arith.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Arith.argL);
vex_printf(", ");
ppARM64RIA(i->ARM64in.Arith.argR);
return;
case ARM64in_Cmp:
vex_printf("cmp%s ", i->ARM64in.Cmp.is64 ? " " : "(w)" );
ppHRegARM64(i->ARM64in.Cmp.argL);
vex_printf(", ");
ppARM64RIA(i->ARM64in.Cmp.argR);
return;
case ARM64in_Logic:
vex_printf("%s ", showARM64LogicOp(i->ARM64in.Logic.op));
ppHRegARM64(i->ARM64in.Logic.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Logic.argL);
vex_printf(", ");
ppARM64RIL(i->ARM64in.Logic.argR);
return;
case ARM64in_Test:
vex_printf("tst ");
ppHRegARM64(i->ARM64in.Test.argL);
vex_printf(", ");
ppARM64RIL(i->ARM64in.Test.argR);
return;
case ARM64in_Shift:
vex_printf("%s ", showARM64ShiftOp(i->ARM64in.Shift.op));
ppHRegARM64(i->ARM64in.Shift.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Shift.argL);
vex_printf(", ");
ppARM64RI6(i->ARM64in.Shift.argR);
return;
case ARM64in_Unary:
vex_printf("%s ", showARM64UnaryOp(i->ARM64in.Unary.op));
ppHRegARM64(i->ARM64in.Unary.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Unary.src);
return;
case ARM64in_MovI:
vex_printf("mov ");
ppHRegARM64(i->ARM64in.MovI.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.MovI.src);
return;
case ARM64in_Imm64:
vex_printf("imm64 ");
ppHRegARM64(i->ARM64in.Imm64.dst);
vex_printf(", 0x%llx", i->ARM64in.Imm64.imm64);
return;
case ARM64in_LdSt64:
if (i->ARM64in.LdSt64.isLoad) {
vex_printf("ldr ");
ppHRegARM64(i->ARM64in.LdSt64.rD);
vex_printf(", ");
ppARM64AMode(i->ARM64in.LdSt64.amode);
} else {
vex_printf("str ");
ppARM64AMode(i->ARM64in.LdSt64.amode);
vex_printf(", ");
ppHRegARM64(i->ARM64in.LdSt64.rD);
}
return;
case ARM64in_LdSt32:
if (i->ARM64in.LdSt32.isLoad) {
vex_printf("ldruw ");
ppHRegARM64(i->ARM64in.LdSt32.rD);
vex_printf(", ");
ppARM64AMode(i->ARM64in.LdSt32.amode);
} else {
vex_printf("strw ");
ppARM64AMode(i->ARM64in.LdSt32.amode);
vex_printf(", ");
ppHRegARM64(i->ARM64in.LdSt32.rD);
}
return;
case ARM64in_LdSt16:
if (i->ARM64in.LdSt16.isLoad) {
vex_printf("ldruh ");
ppHRegARM64(i->ARM64in.LdSt16.rD);
vex_printf(", ");
ppARM64AMode(i->ARM64in.LdSt16.amode);
} else {
vex_printf("strh ");
ppARM64AMode(i->ARM64in.LdSt16.amode);
vex_printf(", ");
ppHRegARM64(i->ARM64in.LdSt16.rD);
}
return;
case ARM64in_LdSt8:
if (i->ARM64in.LdSt8.isLoad) {
vex_printf("ldrub ");
ppHRegARM64(i->ARM64in.LdSt8.rD);
vex_printf(", ");
ppARM64AMode(i->ARM64in.LdSt8.amode);
} else {
vex_printf("strb ");
ppARM64AMode(i->ARM64in.LdSt8.amode);
vex_printf(", ");
ppHRegARM64(i->ARM64in.LdSt8.rD);
}
return;
case ARM64in_XDirect:
vex_printf("(xDirect) ");
vex_printf("if (%%pstate.%s) { ",
showARM64CondCode(i->ARM64in.XDirect.cond));
vex_printf("imm64 x9,0x%llx; ", i->ARM64in.XDirect.dstGA);
vex_printf("str x9,");
ppARM64AMode(i->ARM64in.XDirect.amPC);
vex_printf("; imm64-exactly4 x9,$disp_cp_chain_me_to_%sEP; ",
i->ARM64in.XDirect.toFastEP ? "fast" : "slow");
vex_printf("blr x9 }");
return;
case ARM64in_XIndir:
vex_printf("(xIndir) ");
vex_printf("if (%%pstate.%s) { ",
showARM64CondCode(i->ARM64in.XIndir.cond));
vex_printf("str ");
ppHRegARM64(i->ARM64in.XIndir.dstGA);
vex_printf(",");
ppARM64AMode(i->ARM64in.XIndir.amPC);
vex_printf("; imm64 x9,$disp_cp_xindir; ");
vex_printf("br x9 }");
return;
case ARM64in_XAssisted:
vex_printf("(xAssisted) ");
vex_printf("if (%%pstate.%s) { ",
showARM64CondCode(i->ARM64in.XAssisted.cond));
vex_printf("str ");
ppHRegARM64(i->ARM64in.XAssisted.dstGA);
vex_printf(",");
ppARM64AMode(i->ARM64in.XAssisted.amPC);
vex_printf("; movw x21,$IRJumpKind_to_TRCVAL(%d); ",
(Int)i->ARM64in.XAssisted.jk);
vex_printf("imm64 x9,$disp_cp_xassisted; ");
vex_printf("br x9 }");
return;
case ARM64in_CSel:
vex_printf("csel ");
ppHRegARM64(i->ARM64in.CSel.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.CSel.argL);
vex_printf(", ");
ppHRegARM64(i->ARM64in.CSel.argR);
vex_printf(", %s", showARM64CondCode(i->ARM64in.CSel.cond));
return;
case ARM64in_Call:
vex_printf("call%s ",
i->ARM64in.Call.cond==ARM64cc_AL
? " " : showARM64CondCode(i->ARM64in.Call.cond));
vex_printf("0x%lx [nArgRegs=%d, ",
i->ARM64in.Call.target, i->ARM64in.Call.nArgRegs);
ppRetLoc(i->ARM64in.Call.rloc);
vex_printf("]");
return;
case ARM64in_AddToSP: {
Int simm = i->ARM64in.AddToSP.simm;
vex_printf("%s xsp, xsp, #%d", simm < 0 ? "sub" : "add",
simm < 0 ? -simm : simm);
return;
}
case ARM64in_FromSP:
vex_printf("mov ");
ppHRegARM64(i->ARM64in.FromSP.dst);
vex_printf(", xsp");
return;
case ARM64in_Mul:
vex_printf("%s ", showARM64MulOp(i->ARM64in.Mul.op));
ppHRegARM64(i->ARM64in.Mul.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Mul.argL);
vex_printf(", ");
ppHRegARM64(i->ARM64in.Mul.argR);
return;
case ARM64in_LdrEX: {
const HChar* sz = " ";
switch (i->ARM64in.LdrEX.szB) {
case 1: sz = "b"; break;
case 2: sz = "h"; break;
case 4: case 8: break;
default: vassert(0);
}
vex_printf("ldxr%s %c2, [x4]",
sz, i->ARM64in.LdrEX.szB == 8 ? 'x' : 'w');
return;
}
case ARM64in_StrEX: {
const HChar* sz = " ";
switch (i->ARM64in.StrEX.szB) {
case 1: sz = "b"; break;
case 2: sz = "h"; break;
case 4: case 8: break;
default: vassert(0);
}
vex_printf("stxr%s w0, %c2, [x4]",
sz, i->ARM64in.StrEX.szB == 8 ? 'x' : 'w');
return;
}
case ARM64in_MFence:
vex_printf("(mfence) dsb sy; dmb sy; isb");
return;
//ZZ case ARM64in_CLREX:
//ZZ vex_printf("clrex");
//ZZ return;
case ARM64in_VLdStS:
if (i->ARM64in.VLdStS.isLoad) {
vex_printf("ldr ");
ppHRegARM64asSreg(i->ARM64in.VLdStS.sD);
vex_printf(", %u(", i->ARM64in.VLdStS.uimm12);
ppHRegARM64(i->ARM64in.VLdStS.rN);
vex_printf(")");
} else {
vex_printf("str ");
vex_printf("%u(", i->ARM64in.VLdStS.uimm12);
ppHRegARM64(i->ARM64in.VLdStS.rN);
vex_printf("), ");
ppHRegARM64asSreg(i->ARM64in.VLdStS.sD);
}
return;
case ARM64in_VLdStD:
if (i->ARM64in.VLdStD.isLoad) {
vex_printf("ldr ");
ppHRegARM64(i->ARM64in.VLdStD.dD);
vex_printf(", %u(", i->ARM64in.VLdStD.uimm12);
ppHRegARM64(i->ARM64in.VLdStD.rN);
vex_printf(")");
} else {
vex_printf("str ");
vex_printf("%u(", i->ARM64in.VLdStD.uimm12);
ppHRegARM64(i->ARM64in.VLdStD.rN);
vex_printf("), ");
ppHRegARM64(i->ARM64in.VLdStD.dD);
}
return;
case ARM64in_VLdStQ:
if (i->ARM64in.VLdStQ.isLoad)
vex_printf("ld1.2d {");
else
vex_printf("st1.2d {");
ppHRegARM64(i->ARM64in.VLdStQ.rQ);
vex_printf("}, [");
ppHRegARM64(i->ARM64in.VLdStQ.rN);
vex_printf("]");
return;
case ARM64in_VCvtI2F: {
HChar syn = '?';
UInt fszB = 0;
UInt iszB = 0;
characteriseARM64CvtOp(&syn, &fszB, &iszB, i->ARM64in.VCvtI2F.how);
vex_printf("%ccvtf ", syn);
ppHRegARM64(i->ARM64in.VCvtI2F.rD);
vex_printf("(%c-reg), ", fszB == 4 ? 'S' : 'D');
ppHRegARM64(i->ARM64in.VCvtI2F.rS);
vex_printf("(%c-reg)", iszB == 4 ? 'W' : 'X');
return;
}
case ARM64in_VCvtF2I: {
HChar syn = '?';
UInt fszB = 0;
UInt iszB = 0;
HChar rmo = '?';
characteriseARM64CvtOp(&syn, &fszB, &iszB, i->ARM64in.VCvtF2I.how);
UChar armRM = i->ARM64in.VCvtF2I.armRM;
if (armRM < 4) rmo = "npmz"[armRM];
vex_printf("fcvt%c%c ", rmo, syn);
ppHRegARM64(i->ARM64in.VCvtF2I.rD);
vex_printf("(%c-reg), ", iszB == 4 ? 'W' : 'X');
ppHRegARM64(i->ARM64in.VCvtF2I.rS);
vex_printf("(%c-reg)", fszB == 4 ? 'S' : 'D');
return;
}
case ARM64in_VCvtSD:
vex_printf("fcvt%s ", i->ARM64in.VCvtSD.sToD ? "s2d" : "d2s");
if (i->ARM64in.VCvtSD.sToD) {
ppHRegARM64(i->ARM64in.VCvtSD.dst);
vex_printf(", ");
ppHRegARM64asSreg(i->ARM64in.VCvtSD.src);
} else {
ppHRegARM64asSreg(i->ARM64in.VCvtSD.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VCvtSD.src);
}
return;
case ARM64in_VUnaryD:
vex_printf("f%s ", showARM64FpUnaryOp(i->ARM64in.VUnaryD.op));
ppHRegARM64(i->ARM64in.VUnaryD.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VUnaryD.src);
return;
case ARM64in_VUnaryS:
vex_printf("f%s ", showARM64FpUnaryOp(i->ARM64in.VUnaryS.op));
ppHRegARM64asSreg(i->ARM64in.VUnaryS.dst);
vex_printf(", ");
ppHRegARM64asSreg(i->ARM64in.VUnaryS.src);
return;
case ARM64in_VBinD:
vex_printf("f%s ", showARM64FpBinOp(i->ARM64in.VBinD.op));
ppHRegARM64(i->ARM64in.VBinD.dst);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VBinD.argL);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VBinD.argR);
return;
case ARM64in_VBinS:
vex_printf("f%s ", showARM64FpBinOp(i->ARM64in.VBinS.op));
ppHRegARM64asSreg(i->ARM64in.VBinS.dst);
vex_printf(", ");
ppHRegARM64asSreg(i->ARM64in.VBinS.argL);
vex_printf(", ");
ppHRegARM64asSreg(i->ARM64in.VBinS.argR);
return;
case ARM64in_VCmpD:
vex_printf("fcmp ");
ppHRegARM64(i->ARM64in.VCmpD.argL);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VCmpD.argR);
return;
case ARM64in_VCmpS:
vex_printf("fcmp ");
ppHRegARM64asSreg(i->ARM64in.VCmpS.argL);
vex_printf(", ");
ppHRegARM64asSreg(i->ARM64in.VCmpS.argR);
return;
case ARM64in_FPCR:
if (i->ARM64in.FPCR.toFPCR) {
vex_printf("msr fpcr, ");
ppHRegARM64(i->ARM64in.FPCR.iReg);
} else {
vex_printf("mrs ");
ppHRegARM64(i->ARM64in.FPCR.iReg);
vex_printf(", fpcr");
}
return;
case ARM64in_VBinV: {
const HChar* nm = "??";
const HChar* ar = "??";
showARM64VecBinOp(&nm, &ar, i->ARM64in.VBinV.op);
vex_printf("%s ", nm);
ppHRegARM64(i->ARM64in.VBinV.dst);
vex_printf(".%s, ", ar);
ppHRegARM64(i->ARM64in.VBinV.argL);
vex_printf(".%s, ", ar);
ppHRegARM64(i->ARM64in.VBinV.argR);
vex_printf(".%s", ar);
return;
}
case ARM64in_VUnaryV: {
const HChar* nm = "??";
const HChar* ar = "??";
showARM64VecUnaryOp(&nm, &ar, i->ARM64in.VUnaryV.op);
vex_printf("%s ", nm);
ppHRegARM64(i->ARM64in.VUnaryV.dst);
vex_printf(".%s, ", ar);
ppHRegARM64(i->ARM64in.VUnaryV.arg);
vex_printf(".%s", ar);
return;
}
case ARM64in_VNarrowV: {
UInt dszBlg2 = i->ARM64in.VNarrowV.dszBlg2;
const HChar* darr[3] = { "8b", "4h", "2s" };
const HChar* sarr[3] = { "8h", "4s", "2d" };
vex_printf("xtn ");
ppHRegARM64(i->ARM64in.VNarrowV.dst);
vex_printf(".%s, ", dszBlg2 < 3 ? darr[dszBlg2] : "??");
ppHRegARM64(i->ARM64in.VNarrowV.src);
vex_printf(".%s", dszBlg2 < 3 ? sarr[dszBlg2] : "??");
return;
}
case ARM64in_VShiftImmV: {
const HChar* nm = "??";
const HChar* ar = "??";
showARM64VecShiftOp(&nm, &ar, i->ARM64in.VShiftImmV.op);
vex_printf("%s ", nm);
ppHRegARM64(i->ARM64in.VShiftImmV.dst);
vex_printf(".%s, ", ar);
ppHRegARM64(i->ARM64in.VShiftImmV.src);
vex_printf(".%s, #%u", ar, i->ARM64in.VShiftImmV.amt);
return;
}
case ARM64in_VExtV: {
vex_printf("ext ");
ppHRegARM64(i->ARM64in.VExtV.dst);
vex_printf(".16b, ");
ppHRegARM64(i->ARM64in.VExtV.srcLo);
vex_printf(".16b, ");
ppHRegARM64(i->ARM64in.VExtV.srcHi);
vex_printf(".16b, #%u", i->ARM64in.VExtV.amtB);
return;
}
//ZZ case ARMin_VAluS:
//ZZ vex_printf("f%-3ss ", showARMVfpOp(i->ARMin.VAluS.op));
//ZZ ppHRegARM(i->ARMin.VAluS.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VAluS.argL);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VAluS.argR);
//ZZ return;
//ZZ case ARMin_VCMovD:
//ZZ vex_printf("fcpyd%s ", showARMCondCode(i->ARMin.VCMovD.cond));
//ZZ ppHRegARM(i->ARMin.VCMovD.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VCMovD.src);
//ZZ return;
//ZZ case ARMin_VCMovS:
//ZZ vex_printf("fcpys%s ", showARMCondCode(i->ARMin.VCMovS.cond));
//ZZ ppHRegARM(i->ARMin.VCMovS.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VCMovS.src);
//ZZ return;
//ZZ case ARMin_VXferD:
//ZZ vex_printf("vmov ");
//ZZ if (i->ARMin.VXferD.toD) {
//ZZ ppHRegARM(i->ARMin.VXferD.dD);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferD.rLo);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferD.rHi);
//ZZ } else {
//ZZ ppHRegARM(i->ARMin.VXferD.rLo);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferD.rHi);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferD.dD);
//ZZ }
//ZZ return;
//ZZ case ARMin_VXferS:
//ZZ vex_printf("vmov ");
//ZZ if (i->ARMin.VXferS.toS) {
//ZZ ppHRegARM(i->ARMin.VXferS.fD);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferS.rLo);
//ZZ } else {
//ZZ ppHRegARM(i->ARMin.VXferS.rLo);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VXferS.fD);
//ZZ }
//ZZ return;
//ZZ case ARMin_VCvtID: {
//ZZ const HChar* nm = "?";
//ZZ if (i->ARMin.VCvtID.iToD) {
//ZZ nm = i->ARMin.VCvtID.syned ? "fsitod" : "fuitod";
//ZZ } else {
//ZZ nm = i->ARMin.VCvtID.syned ? "ftosid" : "ftouid";
//ZZ }
//ZZ vex_printf("%s ", nm);
//ZZ ppHRegARM(i->ARMin.VCvtID.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.VCvtID.src);
//ZZ return;
//ZZ }
//ZZ case ARMin_NLdStD:
//ZZ if (i->ARMin.NLdStD.isLoad)
//ZZ vex_printf("vld1.32 {");
//ZZ else
//ZZ vex_printf("vst1.32 {");
//ZZ ppHRegARM(i->ARMin.NLdStD.dD);
//ZZ vex_printf("} ");
//ZZ ppARMAModeN(i->ARMin.NLdStD.amode);
//ZZ return;
//ZZ case ARMin_NUnary:
//ZZ vex_printf("%s%s%s ",
//ZZ showARMNeonUnOp(i->ARMin.NUnary.op),
//ZZ showARMNeonUnOpDataType(i->ARMin.NUnary.op),
//ZZ showARMNeonDataSize(i));
//ZZ ppHRegARM(i->ARMin.NUnary.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NUnary.src);
//ZZ if (i->ARMin.NUnary.op == ARMneon_EQZ)
//ZZ vex_printf(", #0");
//ZZ if (i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
//ZZ i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF) {
//ZZ vex_printf(", #%d", i->ARMin.NUnary.size);
//ZZ }
//ZZ if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
//ZZ i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
//ZZ UInt size;
//ZZ size = i->ARMin.NUnary.size;
//ZZ if (size & 0x40) {
//ZZ vex_printf(", #%d", size - 64);
//ZZ } else if (size & 0x20) {
//ZZ vex_printf(", #%d", size - 32);
//ZZ } else if (size & 0x10) {
//ZZ vex_printf(", #%d", size - 16);
//ZZ } else if (size & 0x08) {
//ZZ vex_printf(", #%d", size - 8);
//ZZ }
//ZZ }
//ZZ return;
//ZZ case ARMin_NUnaryS:
//ZZ vex_printf("%s%s%s ",
//ZZ showARMNeonUnOpS(i->ARMin.NUnaryS.op),
//ZZ showARMNeonUnOpSDataType(i->ARMin.NUnaryS.op),
//ZZ showARMNeonDataSize(i));
//ZZ ppARMNRS(i->ARMin.NUnaryS.dst);
//ZZ vex_printf(", ");
//ZZ ppARMNRS(i->ARMin.NUnaryS.src);
//ZZ return;
//ZZ case ARMin_NShift:
//ZZ vex_printf("%s%s%s ",
//ZZ showARMNeonShiftOp(i->ARMin.NShift.op),
//ZZ showARMNeonShiftOpDataType(i->ARMin.NShift.op),
//ZZ showARMNeonDataSize(i));
//ZZ ppHRegARM(i->ARMin.NShift.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NShift.argL);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NShift.argR);
//ZZ return;
//ZZ case ARMin_NShl64:
//ZZ vex_printf("vshl.i64 ");
//ZZ ppHRegARM(i->ARMin.NShl64.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NShl64.src);
//ZZ vex_printf(", #%u", i->ARMin.NShl64.amt);
//ZZ return;
//ZZ case ARMin_NDual:
//ZZ vex_printf("%s%s%s ",
//ZZ showARMNeonDualOp(i->ARMin.NDual.op),
//ZZ showARMNeonDualOpDataType(i->ARMin.NDual.op),
//ZZ showARMNeonDataSize(i));
//ZZ ppHRegARM(i->ARMin.NDual.arg1);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NDual.arg2);
//ZZ return;
//ZZ case ARMin_NBinary:
//ZZ vex_printf("%s%s%s",
//ZZ showARMNeonBinOp(i->ARMin.NBinary.op),
//ZZ showARMNeonBinOpDataType(i->ARMin.NBinary.op),
//ZZ showARMNeonDataSize(i));
//ZZ vex_printf(" ");
//ZZ ppHRegARM(i->ARMin.NBinary.dst);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NBinary.argL);
//ZZ vex_printf(", ");
//ZZ ppHRegARM(i->ARMin.NBinary.argR);
//ZZ return;
case ARM64in_VImmQ:
vex_printf("qimm ");
ppHRegARM64(i->ARM64in.VImmQ.rQ);
vex_printf(", Bits16toBytes16(0x%x)", (UInt)i->ARM64in.VImmQ.imm);
return;
case ARM64in_VDfromX:
vex_printf("fmov ");
ppHRegARM64(i->ARM64in.VDfromX.rD);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VDfromX.rX);
return;
case ARM64in_VQfromXX:
vex_printf("qFromXX ");
ppHRegARM64(i->ARM64in.VQfromXX.rQ);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VQfromXX.rXhi);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VQfromXX.rXlo);
return;
case ARM64in_VXfromQ:
vex_printf("fmov ");
ppHRegARM64(i->ARM64in.VXfromQ.rX);
vex_printf(", ");
ppHRegARM64(i->ARM64in.VXfromQ.rQ);
vex_printf(".d[%u]", i->ARM64in.VXfromQ.laneNo);
return;
case ARM64in_VXfromDorS:
vex_printf("fmov ");
ppHRegARM64(i->ARM64in.VXfromDorS.rX);
vex_printf("(%c-reg), ", i->ARM64in.VXfromDorS.fromD ? 'X':'W');
ppHRegARM64(i->ARM64in.VXfromDorS.rDorS);
vex_printf("(%c-reg)", i->ARM64in.VXfromDorS.fromD ? 'D' : 'S');
return;
case ARM64in_VMov: {
UChar aux = '?';
switch (i->ARM64in.VMov.szB) {