blob: 69bc1d9e4d28d3816d641e9c026436dc5c8b6b8e [file] [log] [blame]
/*---------------------------------------------------------------*/
/*--- begin host_arm_defs.c ---*/
/*---------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2004-2013 OpenWorks LLP
info@open-works.net
NEON support is
Copyright (C) 2010-2013 Samsung Electronics
contributed by Dmitry Zhurikhin <zhur@ispras.ru>
and Kirill Batuzov <batuzovk@ispras.ru>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "libvex_basictypes.h"
#include "libvex.h"
#include "libvex_trc_values.h"
#include "main_util.h"
#include "host_generic_regs.h"
#include "host_arm_defs.h"
UInt arm_hwcaps = 0;
/* --------- Registers. --------- */
/* The usual HReg abstraction.
There are 16 general purpose regs.
*/
void ppHRegARM ( HReg reg ) {
Int r;
/* Be generic for all virtual regs. */
if (hregIsVirtual(reg)) {
ppHReg(reg);
return;
}
/* But specific for real regs. */
switch (hregClass(reg)) {
case HRcInt32:
r = hregNumber(reg);
vassert(r >= 0 && r < 16);
vex_printf("r%d", r);
return;
case HRcFlt64:
r = hregNumber(reg);
vassert(r >= 0 && r < 32);
vex_printf("d%d", r);
return;
case HRcFlt32:
r = hregNumber(reg);
vassert(r >= 0 && r < 32);
vex_printf("s%d", r);
return;
case HRcVec128:
r = hregNumber(reg);
vassert(r >= 0 && r < 16);
vex_printf("q%d", r);
return;
default:
vpanic("ppHRegARM");
}
}
HReg hregARM_R0 ( void ) { return mkHReg(0, HRcInt32, False); }
HReg hregARM_R1 ( void ) { return mkHReg(1, HRcInt32, False); }
HReg hregARM_R2 ( void ) { return mkHReg(2, HRcInt32, False); }
HReg hregARM_R3 ( void ) { return mkHReg(3, HRcInt32, False); }
HReg hregARM_R4 ( void ) { return mkHReg(4, HRcInt32, False); }
HReg hregARM_R5 ( void ) { return mkHReg(5, HRcInt32, False); }
HReg hregARM_R6 ( void ) { return mkHReg(6, HRcInt32, False); }
HReg hregARM_R7 ( void ) { return mkHReg(7, HRcInt32, False); }
HReg hregARM_R8 ( void ) { return mkHReg(8, HRcInt32, False); }
HReg hregARM_R9 ( void ) { return mkHReg(9, HRcInt32, False); }
HReg hregARM_R10 ( void ) { return mkHReg(10, HRcInt32, False); }
HReg hregARM_R11 ( void ) { return mkHReg(11, HRcInt32, False); }
HReg hregARM_R12 ( void ) { return mkHReg(12, HRcInt32, False); }
HReg hregARM_R13 ( void ) { return mkHReg(13, HRcInt32, False); }
HReg hregARM_R14 ( void ) { return mkHReg(14, HRcInt32, False); }
HReg hregARM_R15 ( void ) { return mkHReg(15, HRcInt32, False); }
HReg hregARM_D8 ( void ) { return mkHReg(8, HRcFlt64, False); }
HReg hregARM_D9 ( void ) { return mkHReg(9, HRcFlt64, False); }
HReg hregARM_D10 ( void ) { return mkHReg(10, HRcFlt64, False); }
HReg hregARM_D11 ( void ) { return mkHReg(11, HRcFlt64, False); }
HReg hregARM_D12 ( void ) { return mkHReg(12, HRcFlt64, False); }
HReg hregARM_S26 ( void ) { return mkHReg(26, HRcFlt32, False); }
HReg hregARM_S27 ( void ) { return mkHReg(27, HRcFlt32, False); }
HReg hregARM_S28 ( void ) { return mkHReg(28, HRcFlt32, False); }
HReg hregARM_S29 ( void ) { return mkHReg(29, HRcFlt32, False); }
HReg hregARM_S30 ( void ) { return mkHReg(30, HRcFlt32, False); }
HReg hregARM_Q8 ( void ) { return mkHReg(8, HRcVec128, False); }
HReg hregARM_Q9 ( void ) { return mkHReg(9, HRcVec128, False); }
HReg hregARM_Q10 ( void ) { return mkHReg(10, HRcVec128, False); }
HReg hregARM_Q11 ( void ) { return mkHReg(11, HRcVec128, False); }
HReg hregARM_Q12 ( void ) { return mkHReg(12, HRcVec128, False); }
HReg hregARM_Q13 ( void ) { return mkHReg(13, HRcVec128, False); }
HReg hregARM_Q14 ( void ) { return mkHReg(14, HRcVec128, False); }
HReg hregARM_Q15 ( void ) { return mkHReg(15, HRcVec128, False); }
void getAllocableRegs_ARM ( Int* nregs, HReg** arr )
{
Int i = 0;
*nregs = 26;
*arr = LibVEX_Alloc(*nregs * sizeof(HReg));
// callee saves ones are listed first, since we prefer them
// if they're available
(*arr)[i++] = hregARM_R4();
(*arr)[i++] = hregARM_R5();
(*arr)[i++] = hregARM_R6();
(*arr)[i++] = hregARM_R7();
(*arr)[i++] = hregARM_R10();
(*arr)[i++] = hregARM_R11();
// otherwise we'll have to slum it out with caller-saves ones
(*arr)[i++] = hregARM_R0();
(*arr)[i++] = hregARM_R1();
(*arr)[i++] = hregARM_R2();
(*arr)[i++] = hregARM_R3();
(*arr)[i++] = hregARM_R9();
// FP hreegisters. Note: these are all callee-save. Yay!
// Hence we don't need to mention them as trashed in
// getHRegUsage for ARMInstr_Call.
(*arr)[i++] = hregARM_D8();
(*arr)[i++] = hregARM_D9();
(*arr)[i++] = hregARM_D10();
(*arr)[i++] = hregARM_D11();
(*arr)[i++] = hregARM_D12();
(*arr)[i++] = hregARM_S26();
(*arr)[i++] = hregARM_S27();
(*arr)[i++] = hregARM_S28();
(*arr)[i++] = hregARM_S29();
(*arr)[i++] = hregARM_S30();
(*arr)[i++] = hregARM_Q8();
(*arr)[i++] = hregARM_Q9();
(*arr)[i++] = hregARM_Q10();
(*arr)[i++] = hregARM_Q11();
(*arr)[i++] = hregARM_Q12();
//(*arr)[i++] = hregARM_Q13();
//(*arr)[i++] = hregARM_Q14();
//(*arr)[i++] = hregARM_Q15();
// unavail: r8 as GSP
// r12 is used as a spill/reload temporary
// r13 as SP
// r14 as LR
// r15 as PC
//
// All in all, we have 11 allocatable integer registers:
// 0 1 2 3 4 5 6 7 9 10 11, with r8 dedicated as GSP
// and r12 dedicated as a spill temporary.
// 13 14 and 15 are not under the allocator's control.
//
// Hence for the allocatable registers we have:
//
// callee-saved: 4 5 6 7 (8) 9 10 11
// caller-saved: 0 1 2 3
// Note 9 is ambiguous: the base EABI does not give an e/r-saved
// designation for it, but the Linux instantiation of the ABI
// specifies it as callee-saved.
//
// If the set of available registers changes or if the e/r status
// changes, be sure to re-check/sync the definition of
// getHRegUsage for ARMInstr_Call too.
vassert(i == *nregs);
}
/* --------- Condition codes, ARM encoding. --------- */
const HChar* showARMCondCode ( ARMCondCode cond ) {
switch (cond) {
case ARMcc_EQ: return "eq";
case ARMcc_NE: return "ne";
case ARMcc_HS: return "hs";
case ARMcc_LO: return "lo";
case ARMcc_MI: return "mi";
case ARMcc_PL: return "pl";
case ARMcc_VS: return "vs";
case ARMcc_VC: return "vc";
case ARMcc_HI: return "hi";
case ARMcc_LS: return "ls";
case ARMcc_GE: return "ge";
case ARMcc_LT: return "lt";
case ARMcc_GT: return "gt";
case ARMcc_LE: return "le";
case ARMcc_AL: return "al"; // default
case ARMcc_NV: return "nv";
default: vpanic("showARMCondCode");
}
}
/* --------- Mem AModes: Addressing Mode 1 --------- */
ARMAMode1* ARMAMode1_RI ( HReg reg, Int simm13 ) {
ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
am->tag = ARMam1_RI;
am->ARMam1.RI.reg = reg;
am->ARMam1.RI.simm13 = simm13;
vassert(-4095 <= simm13 && simm13 <= 4095);
return am;
}
ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) {
ARMAMode1* am = LibVEX_Alloc(sizeof(ARMAMode1));
am->tag = ARMam1_RRS;
am->ARMam1.RRS.base = base;
am->ARMam1.RRS.index = index;
am->ARMam1.RRS.shift = shift;
vassert(0 <= shift && shift <= 3);
return am;
}
void ppARMAMode1 ( ARMAMode1* am ) {
switch (am->tag) {
case ARMam1_RI:
vex_printf("%d(", am->ARMam1.RI.simm13);
ppHRegARM(am->ARMam1.RI.reg);
vex_printf(")");
break;
case ARMam1_RRS:
vex_printf("(");
ppHRegARM(am->ARMam1.RRS.base);
vex_printf(",");
ppHRegARM(am->ARMam1.RRS.index);
vex_printf(",%u)", am->ARMam1.RRS.shift);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARMAMode1 ( HRegUsage* u, ARMAMode1* am ) {
switch (am->tag) {
case ARMam1_RI:
addHRegUse(u, HRmRead, am->ARMam1.RI.reg);
return;
case ARMam1_RRS:
// addHRegUse(u, HRmRead, am->ARMam1.RRS.base);
// addHRegUse(u, HRmRead, am->ARMam1.RRS.index);
// return;
default:
vpanic("addRegUsage_ARMAmode1");
}
}
static void mapRegs_ARMAMode1 ( HRegRemap* m, ARMAMode1* am ) {
switch (am->tag) {
case ARMam1_RI:
am->ARMam1.RI.reg = lookupHRegRemap(m, am->ARMam1.RI.reg);
return;
case ARMam1_RRS:
//am->ARMam1.RR.base =lookupHRegRemap(m, am->ARMam1.RR.base);
//am->ARMam1.RR.index = lookupHRegRemap(m, am->ARMam1.RR.index);
//return;
default:
vpanic("mapRegs_ARMAmode1");
}
}
/* --------- Mem AModes: Addressing Mode 2 --------- */
ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
am->tag = ARMam2_RI;
am->ARMam2.RI.reg = reg;
am->ARMam2.RI.simm9 = simm9;
vassert(-255 <= simm9 && simm9 <= 255);
return am;
}
ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
ARMAMode2* am = LibVEX_Alloc(sizeof(ARMAMode2));
am->tag = ARMam2_RR;
am->ARMam2.RR.base = base;
am->ARMam2.RR.index = index;
return am;
}
void ppARMAMode2 ( ARMAMode2* am ) {
switch (am->tag) {
case ARMam2_RI:
vex_printf("%d(", am->ARMam2.RI.simm9);
ppHRegARM(am->ARMam2.RI.reg);
vex_printf(")");
break;
case ARMam2_RR:
vex_printf("(");
ppHRegARM(am->ARMam2.RR.base);
vex_printf(",");
ppHRegARM(am->ARMam2.RR.index);
vex_printf(")");
break;
default:
vassert(0);
}
}
static void addRegUsage_ARMAMode2 ( HRegUsage* u, ARMAMode2* am ) {
switch (am->tag) {
case ARMam2_RI:
addHRegUse(u, HRmRead, am->ARMam2.RI.reg);
return;
case ARMam2_RR:
// addHRegUse(u, HRmRead, am->ARMam2.RR.base);
// addHRegUse(u, HRmRead, am->ARMam2.RR.index);
// return;
default:
vpanic("addRegUsage_ARMAmode2");
}
}
static void mapRegs_ARMAMode2 ( HRegRemap* m, ARMAMode2* am ) {
switch (am->tag) {
case ARMam2_RI:
am->ARMam2.RI.reg = lookupHRegRemap(m, am->ARMam2.RI.reg);
return;
case ARMam2_RR:
//am->ARMam2.RR.base =lookupHRegRemap(m, am->ARMam2.RR.base);
//am->ARMam2.RR.index = lookupHRegRemap(m, am->ARMam2.RR.index);
//return;
default:
vpanic("mapRegs_ARMAmode2");
}
}
/* --------- Mem AModes: Addressing Mode VFP --------- */
ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
ARMAModeV* am = LibVEX_Alloc(sizeof(ARMAModeV));
vassert(simm11 >= -1020 && simm11 <= 1020);
vassert(0 == (simm11 & 3));
am->reg = reg;
am->simm11 = simm11;
return am;
}
void ppARMAModeV ( ARMAModeV* am ) {
vex_printf("%d(", am->simm11);
ppHRegARM(am->reg);
vex_printf(")");
}
static void addRegUsage_ARMAModeV ( HRegUsage* u, ARMAModeV* am ) {
addHRegUse(u, HRmRead, am->reg);
}
static void mapRegs_ARMAModeV ( HRegRemap* m, ARMAModeV* am ) {
am->reg = lookupHRegRemap(m, am->reg);
}
/* --------- Mem AModes: Addressing Mode Neon ------- */
ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
am->tag = ARMamN_RR;
am->ARMamN.RR.rN = rN;
am->ARMamN.RR.rM = rM;
return am;
}
ARMAModeN *mkARMAModeN_R ( HReg rN ) {
ARMAModeN* am = LibVEX_Alloc(sizeof(ARMAModeN));
am->tag = ARMamN_R;
am->ARMamN.R.rN = rN;
return am;
}
static void addRegUsage_ARMAModeN ( HRegUsage* u, ARMAModeN* am ) {
if (am->tag == ARMamN_R) {
addHRegUse(u, HRmRead, am->ARMamN.R.rN);
} else {
addHRegUse(u, HRmRead, am->ARMamN.RR.rN);
addHRegUse(u, HRmRead, am->ARMamN.RR.rM);
}
}
static void mapRegs_ARMAModeN ( HRegRemap* m, ARMAModeN* am ) {
if (am->tag == ARMamN_R) {
am->ARMamN.R.rN = lookupHRegRemap(m, am->ARMamN.R.rN);
} else {
am->ARMamN.RR.rN = lookupHRegRemap(m, am->ARMamN.RR.rN);
am->ARMamN.RR.rM = lookupHRegRemap(m, am->ARMamN.RR.rM);
}
}
void ppARMAModeN ( ARMAModeN* am ) {
vex_printf("[");
if (am->tag == ARMamN_R) {
ppHRegARM(am->ARMamN.R.rN);
} else {
ppHRegARM(am->ARMamN.RR.rN);
}
vex_printf("]");
if (am->tag == ARMamN_RR) {
vex_printf(", ");
ppHRegARM(am->ARMamN.RR.rM);
}
}
/* --------- Reg or imm-8x4 operands --------- */
static UInt ROR32 ( UInt x, UInt sh ) {
vassert(sh >= 0 && sh < 32);
if (sh == 0)
return x;
else
return (x << (32-sh)) | (x >> sh);
}
ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) {
ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
ri84->tag = ARMri84_I84;
ri84->ARMri84.I84.imm8 = imm8;
ri84->ARMri84.I84.imm4 = imm4;
vassert(imm8 >= 0 && imm8 <= 255);
vassert(imm4 >= 0 && imm4 <= 15);
return ri84;
}
ARMRI84* ARMRI84_R ( HReg reg ) {
ARMRI84* ri84 = LibVEX_Alloc(sizeof(ARMRI84));
ri84->tag = ARMri84_R;
ri84->ARMri84.R.reg = reg;
return ri84;
}
void ppARMRI84 ( ARMRI84* ri84 ) {
switch (ri84->tag) {
case ARMri84_I84:
vex_printf("0x%x", ROR32(ri84->ARMri84.I84.imm8,
2 * ri84->ARMri84.I84.imm4));
break;
case ARMri84_R:
ppHRegARM(ri84->ARMri84.R.reg);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARMRI84 ( HRegUsage* u, ARMRI84* ri84 ) {
switch (ri84->tag) {
case ARMri84_I84:
return;
case ARMri84_R:
addHRegUse(u, HRmRead, ri84->ARMri84.R.reg);
return;
default:
vpanic("addRegUsage_ARMRI84");
}
}
static void mapRegs_ARMRI84 ( HRegRemap* m, ARMRI84* ri84 ) {
switch (ri84->tag) {
case ARMri84_I84:
return;
case ARMri84_R:
ri84->ARMri84.R.reg = lookupHRegRemap(m, ri84->ARMri84.R.reg);
return;
default:
vpanic("mapRegs_ARMRI84");
}
}
/* --------- Reg or imm5 operands --------- */
ARMRI5* ARMRI5_I5 ( UInt imm5 ) {
ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
ri5->tag = ARMri5_I5;
ri5->ARMri5.I5.imm5 = imm5;
vassert(imm5 > 0 && imm5 <= 31); // zero is not allowed
return ri5;
}
ARMRI5* ARMRI5_R ( HReg reg ) {
ARMRI5* ri5 = LibVEX_Alloc(sizeof(ARMRI5));
ri5->tag = ARMri5_R;
ri5->ARMri5.R.reg = reg;
return ri5;
}
void ppARMRI5 ( ARMRI5* ri5 ) {
switch (ri5->tag) {
case ARMri5_I5:
vex_printf("%u", ri5->ARMri5.I5.imm5);
break;
case ARMri5_R:
ppHRegARM(ri5->ARMri5.R.reg);
break;
default:
vassert(0);
}
}
static void addRegUsage_ARMRI5 ( HRegUsage* u, ARMRI5* ri5 ) {
switch (ri5->tag) {
case ARMri5_I5:
return;
case ARMri5_R:
addHRegUse(u, HRmRead, ri5->ARMri5.R.reg);
return;
default:
vpanic("addRegUsage_ARMRI5");
}
}
static void mapRegs_ARMRI5 ( HRegRemap* m, ARMRI5* ri5 ) {
switch (ri5->tag) {
case ARMri5_I5:
return;
case ARMri5_R:
ri5->ARMri5.R.reg = lookupHRegRemap(m, ri5->ARMri5.R.reg);
return;
default:
vpanic("mapRegs_ARMRI5");
}
}
/* -------- Neon Immediate operatnd --------- */
ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
ARMNImm* i = LibVEX_Alloc(sizeof(ARMNImm));
i->type = type;
i->imm8 = imm8;
return i;
}
ULong ARMNImm_to_Imm64 ( ARMNImm* imm ) {
int i, j;
ULong y, x = imm->imm8;
switch (imm->type) {
case 3:
x = x << 8; /* fallthrough */
case 2:
x = x << 8; /* fallthrough */
case 1:
x = x << 8; /* fallthrough */
case 0:
return (x << 32) | x;
case 5:
case 6:
if (imm->type == 5)
x = x << 8;
else
x = (x << 8) | x;
/* fallthrough */
case 4:
x = (x << 16) | x;
return (x << 32) | x;
case 8:
x = (x << 8) | 0xFF;
/* fallthrough */
case 7:
x = (x << 8) | 0xFF;
return (x << 32) | x;
case 9:
x = 0;
for (i = 7; i >= 0; i--) {
y = ((ULong)imm->imm8 >> i) & 1;
for (j = 0; j < 8; j++) {
x = (x << 1) | y;
}
}
return x;
case 10:
x |= (x & 0x80) << 5;
x |= (~x & 0x40) << 5;
x &= 0x187F; /* 0001 1000 0111 1111 */
x |= (x & 0x40) << 4;
x |= (x & 0x40) << 3;
x |= (x & 0x40) << 2;
x |= (x & 0x40) << 1;
x = x << 19;
x = (x << 32) | x;
return x;
default:
vpanic("ARMNImm_to_Imm64");
}
}
ARMNImm* Imm64_to_ARMNImm ( ULong x ) {
ARMNImm tmp;
if ((x & 0xFFFFFFFF) == (x >> 32)) {
if ((x & 0xFFFFFF00) == 0)
return ARMNImm_TI(0, x & 0xFF);
if ((x & 0xFFFF00FF) == 0)
return ARMNImm_TI(1, (x >> 8) & 0xFF);
if ((x & 0xFF00FFFF) == 0)
return ARMNImm_TI(2, (x >> 16) & 0xFF);
if ((x & 0x00FFFFFF) == 0)
return ARMNImm_TI(3, (x >> 24) & 0xFF);
if ((x & 0xFFFF00FF) == 0xFF)
return ARMNImm_TI(7, (x >> 8) & 0xFF);
if ((x & 0xFF00FFFF) == 0xFFFF)
return ARMNImm_TI(8, (x >> 16) & 0xFF);
if ((x & 0xFFFF) == ((x >> 16) & 0xFFFF)) {
if ((x & 0xFF00) == 0)
return ARMNImm_TI(4, x & 0xFF);
if ((x & 0x00FF) == 0)
return ARMNImm_TI(5, (x >> 8) & 0xFF);
if ((x & 0xFF) == ((x >> 8) & 0xFF))
return ARMNImm_TI(6, x & 0xFF);
}
if ((x & 0x7FFFF) == 0) {
tmp.type = 10;
tmp.imm8 = ((x >> 19) & 0x7F) | ((x >> 24) & 0x80);
if (ARMNImm_to_Imm64(&tmp) == x)
return ARMNImm_TI(tmp.type, tmp.imm8);
}
} else {
/* This can only be type 9. */
tmp.imm8 = (((x >> 56) & 1) << 7)
| (((x >> 48) & 1) << 6)
| (((x >> 40) & 1) << 5)
| (((x >> 32) & 1) << 4)
| (((x >> 24) & 1) << 3)
| (((x >> 16) & 1) << 2)
| (((x >> 8) & 1) << 1)
| (((x >> 0) & 1) << 0);
tmp.type = 9;
if (ARMNImm_to_Imm64 (&tmp) == x)
return ARMNImm_TI(tmp.type, tmp.imm8);
}
return NULL;
}
void ppARMNImm (ARMNImm* i) {
ULong x = ARMNImm_to_Imm64(i);
vex_printf("0x%llX%llX", x, x);
}
/* -- Register or scalar operand --- */
ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
{
ARMNRS *p = LibVEX_Alloc(sizeof(ARMNRS));
p->tag = tag;
p->reg = reg;
p->index = index;
return p;
}
void ppARMNRS(ARMNRS *p)
{
ppHRegARM(p->reg);
if (p->tag == ARMNRS_Scalar) {
vex_printf("[%d]", p->index);
}
}
/* --------- Instructions. --------- */
const HChar* showARMAluOp ( ARMAluOp op ) {
switch (op) {
case ARMalu_ADD: return "add";
case ARMalu_ADDS: return "adds";
case ARMalu_ADC: return "adc";
case ARMalu_SUB: return "sub";
case ARMalu_SUBS: return "subs";
case ARMalu_SBC: return "sbc";
case ARMalu_AND: return "and";
case ARMalu_BIC: return "bic";
case ARMalu_OR: return "orr";
case ARMalu_XOR: return "xor";
default: vpanic("showARMAluOp");
}
}
const HChar* showARMShiftOp ( ARMShiftOp op ) {
switch (op) {
case ARMsh_SHL: return "shl";
case ARMsh_SHR: return "shr";
case ARMsh_SAR: return "sar";
default: vpanic("showARMShiftOp");
}
}
const HChar* showARMUnaryOp ( ARMUnaryOp op ) {
switch (op) {
case ARMun_NEG: return "neg";
case ARMun_NOT: return "not";
case ARMun_CLZ: return "clz";
default: vpanic("showARMUnaryOp");
}
}
const HChar* showARMMulOp ( ARMMulOp op ) {
switch (op) {
case ARMmul_PLAIN: return "mul";
case ARMmul_ZX: return "umull";
case ARMmul_SX: return "smull";
default: vpanic("showARMMulOp");
}
}
const HChar* showARMVfpOp ( ARMVfpOp op ) {
switch (op) {
case ARMvfp_ADD: return "add";
case ARMvfp_SUB: return "sub";
case ARMvfp_MUL: return "mul";
case ARMvfp_DIV: return "div";
default: vpanic("showARMVfpOp");
}
}
const HChar* showARMVfpUnaryOp ( ARMVfpUnaryOp op ) {
switch (op) {
case ARMvfpu_COPY: return "cpy";
case ARMvfpu_NEG: return "neg";
case ARMvfpu_ABS: return "abs";
case ARMvfpu_SQRT: return "sqrt";
default: vpanic("showARMVfpUnaryOp");
}
}
const HChar* showARMNeonBinOp ( ARMNeonBinOp op ) {
switch (op) {
case ARMneon_VAND: return "vand";
case ARMneon_VORR: return "vorr";
case ARMneon_VXOR: return "veor";
case ARMneon_VADD: return "vadd";
case ARMneon_VRHADDS: return "vrhadd";
case ARMneon_VRHADDU: return "vrhadd";
case ARMneon_VADDFP: return "vadd";
case ARMneon_VPADDFP: return "vpadd";
case ARMneon_VABDFP: return "vabd";
case ARMneon_VSUB: return "vsub";
case ARMneon_VSUBFP: return "vsub";
case ARMneon_VMINU: return "vmin";
case ARMneon_VMINS: return "vmin";
case ARMneon_VMINF: return "vmin";
case ARMneon_VMAXU: return "vmax";
case ARMneon_VMAXS: return "vmax";
case ARMneon_VMAXF: return "vmax";
case ARMneon_VQADDU: return "vqadd";
case ARMneon_VQADDS: return "vqadd";
case ARMneon_VQSUBU: return "vqsub";
case ARMneon_VQSUBS: return "vqsub";
case ARMneon_VCGTU: return "vcgt";
case ARMneon_VCGTS: return "vcgt";
case ARMneon_VCGTF: return "vcgt";
case ARMneon_VCGEF: return "vcgt";
case ARMneon_VCGEU: return "vcge";
case ARMneon_VCGES: return "vcge";
case ARMneon_VCEQ: return "vceq";
case ARMneon_VCEQF: return "vceq";
case ARMneon_VPADD: return "vpadd";
case ARMneon_VPMINU: return "vpmin";
case ARMneon_VPMINS: return "vpmin";
case ARMneon_VPMINF: return "vpmin";
case ARMneon_VPMAXU: return "vpmax";
case ARMneon_VPMAXS: return "vpmax";
case ARMneon_VPMAXF: return "vpmax";
case ARMneon_VEXT: return "vext";
case ARMneon_VMUL: return "vmuli";
case ARMneon_VMULLU: return "vmull";
case ARMneon_VMULLS: return "vmull";
case ARMneon_VMULP: return "vmul";
case ARMneon_VMULFP: return "vmul";
case ARMneon_VMULLP: return "vmul";
case ARMneon_VQDMULH: return "vqdmulh";
case ARMneon_VQRDMULH: return "vqrdmulh";
case ARMneon_VQDMULL: return "vqdmull";
case ARMneon_VTBL: return "vtbl";
case ARMneon_VRECPS: return "vrecps";
case ARMneon_VRSQRTS: return "vrecps";
case ARMneon_INVALID: return "??invalid??";
/* ... */
default: vpanic("showARMNeonBinOp");
}
}
const HChar* showARMNeonBinOpDataType ( ARMNeonBinOp op ) {
switch (op) {
case ARMneon_VAND:
case ARMneon_VORR:
case ARMneon_VXOR:
return "";
case ARMneon_VADD:
case ARMneon_VSUB:
case ARMneon_VEXT:
case ARMneon_VMUL:
case ARMneon_VPADD:
case ARMneon_VTBL:
case ARMneon_VCEQ:
return ".i";
case ARMneon_VRHADDU:
case ARMneon_VMINU:
case ARMneon_VMAXU:
case ARMneon_VQADDU:
case ARMneon_VQSUBU:
case ARMneon_VCGTU:
case ARMneon_VCGEU:
case ARMneon_VMULLU:
case ARMneon_VPMINU:
case ARMneon_VPMAXU:
return ".u";
case ARMneon_VRHADDS:
case ARMneon_VMINS:
case ARMneon_VMAXS:
case ARMneon_VQADDS:
case ARMneon_VQSUBS:
case ARMneon_VCGTS:
case ARMneon_VCGES:
case ARMneon_VQDMULL:
case ARMneon_VMULLS:
case ARMneon_VPMINS:
case ARMneon_VPMAXS:
case ARMneon_VQDMULH:
case ARMneon_VQRDMULH:
return ".s";
case ARMneon_VMULP:
case ARMneon_VMULLP:
return ".p";
case ARMneon_VADDFP:
case ARMneon_VABDFP:
case ARMneon_VPADDFP:
case ARMneon_VSUBFP:
case ARMneon_VMULFP:
case ARMneon_VMINF:
case ARMneon_VMAXF:
case ARMneon_VPMINF:
case ARMneon_VPMAXF:
case ARMneon_VCGTF:
case ARMneon_VCGEF:
case ARMneon_VCEQF:
case ARMneon_VRECPS:
case ARMneon_VRSQRTS:
return ".f";
/* ... */
default: vpanic("showARMNeonBinOpDataType");
}
}
const HChar* showARMNeonUnOp ( ARMNeonUnOp op ) {
switch (op) {
case ARMneon_COPY: return "vmov";
case ARMneon_COPYLS: return "vmov";
case ARMneon_COPYLU: return "vmov";
case ARMneon_COPYN: return "vmov";
case ARMneon_COPYQNSS: return "vqmovn";
case ARMneon_COPYQNUS: return "vqmovun";
case ARMneon_COPYQNUU: return "vqmovn";
case ARMneon_NOT: return "vmvn";
case ARMneon_EQZ: return "vceq";
case ARMneon_CNT: return "vcnt";
case ARMneon_CLS: return "vcls";
case ARMneon_CLZ: return "vclz";
case ARMneon_DUP: return "vdup";
case ARMneon_PADDLS: return "vpaddl";
case ARMneon_PADDLU: return "vpaddl";
case ARMneon_VQSHLNSS: return "vqshl";
case ARMneon_VQSHLNUU: return "vqshl";
case ARMneon_VQSHLNUS: return "vqshlu";
case ARMneon_REV16: return "vrev16";
case ARMneon_REV32: return "vrev32";
case ARMneon_REV64: return "vrev64";
case ARMneon_VCVTFtoU: return "vcvt";
case ARMneon_VCVTFtoS: return "vcvt";
case ARMneon_VCVTUtoF: return "vcvt";
case ARMneon_VCVTStoF: return "vcvt";
case ARMneon_VCVTFtoFixedU: return "vcvt";
case ARMneon_VCVTFtoFixedS: return "vcvt";
case ARMneon_VCVTFixedUtoF: return "vcvt";
case ARMneon_VCVTFixedStoF: return "vcvt";
case ARMneon_VCVTF32toF16: return "vcvt";
case ARMneon_VCVTF16toF32: return "vcvt";
case ARMneon_VRECIP: return "vrecip";
case ARMneon_VRECIPF: return "vrecipf";
case ARMneon_VNEGF: return "vneg";
case ARMneon_ABS: return "vabs";
case ARMneon_VABSFP: return "vabsfp";
case ARMneon_VRSQRTEFP: return "vrsqrtefp";
case ARMneon_VRSQRTE: return "vrsqrte";
/* ... */
default: vpanic("showARMNeonUnOp");
}
}
const HChar* showARMNeonUnOpDataType ( ARMNeonUnOp op ) {
switch (op) {
case ARMneon_COPY:
case ARMneon_NOT:
return "";
case ARMneon_COPYN:
case ARMneon_EQZ:
case ARMneon_CNT:
case ARMneon_DUP:
case ARMneon_REV16:
case ARMneon_REV32:
case ARMneon_REV64:
return ".i";
case ARMneon_COPYLU:
case ARMneon_PADDLU:
case ARMneon_COPYQNUU:
case ARMneon_VQSHLNUU:
case ARMneon_VRECIP:
case ARMneon_VRSQRTE:
return ".u";
case ARMneon_CLS:
case ARMneon_CLZ:
case ARMneon_COPYLS:
case ARMneon_PADDLS:
case ARMneon_COPYQNSS:
case ARMneon_COPYQNUS:
case ARMneon_VQSHLNSS:
case ARMneon_VQSHLNUS:
case ARMneon_ABS:
return ".s";
case ARMneon_VRECIPF:
case ARMneon_VNEGF:
case ARMneon_VABSFP:
case ARMneon_VRSQRTEFP:
return ".f";
case ARMneon_VCVTFtoU: return ".u32.f32";
case ARMneon_VCVTFtoS: return ".s32.f32";
case ARMneon_VCVTUtoF: return ".f32.u32";
case ARMneon_VCVTStoF: return ".f32.s32";
case ARMneon_VCVTF16toF32: return ".f32.f16";
case ARMneon_VCVTF32toF16: return ".f16.f32";
case ARMneon_VCVTFtoFixedU: return ".u32.f32";
case ARMneon_VCVTFtoFixedS: return ".s32.f32";
case ARMneon_VCVTFixedUtoF: return ".f32.u32";
case ARMneon_VCVTFixedStoF: return ".f32.s32";
/* ... */
default: vpanic("showARMNeonUnOpDataType");
}
}
const HChar* showARMNeonUnOpS ( ARMNeonUnOpS op ) {
switch (op) {
case ARMneon_SETELEM: return "vmov";
case ARMneon_GETELEMU: return "vmov";
case ARMneon_GETELEMS: return "vmov";
case ARMneon_VDUP: return "vdup";
/* ... */
default: vpanic("showARMNeonUnarySOp");
}
}
const HChar* showARMNeonUnOpSDataType ( ARMNeonUnOpS op ) {
switch (op) {
case ARMneon_SETELEM:
case ARMneon_VDUP:
return ".i";
case ARMneon_GETELEMS:
return ".s";
case ARMneon_GETELEMU:
return ".u";
/* ... */
default: vpanic("showARMNeonUnarySOp");
}
}
const HChar* showARMNeonShiftOp ( ARMNeonShiftOp op ) {
switch (op) {
case ARMneon_VSHL: return "vshl";
case ARMneon_VSAL: return "vshl";
case ARMneon_VQSHL: return "vqshl";
case ARMneon_VQSAL: return "vqshl";
/* ... */
default: vpanic("showARMNeonShiftOp");
}
}
const HChar* showARMNeonShiftOpDataType ( ARMNeonShiftOp op ) {
switch (op) {
case ARMneon_VSHL:
case ARMneon_VQSHL:
return ".u";
case ARMneon_VSAL:
case ARMneon_VQSAL:
return ".s";
/* ... */
default: vpanic("showARMNeonShiftOpDataType");
}
}
const HChar* showARMNeonDualOp ( ARMNeonDualOp op ) {
switch (op) {
case ARMneon_TRN: return "vtrn";
case ARMneon_ZIP: return "vzip";
case ARMneon_UZP: return "vuzp";
/* ... */
default: vpanic("showARMNeonDualOp");
}
}
const HChar* showARMNeonDualOpDataType ( ARMNeonDualOp op ) {
switch (op) {
case ARMneon_TRN:
case ARMneon_ZIP:
case ARMneon_UZP:
return "i";
/* ... */
default: vpanic("showARMNeonDualOp");
}
}
static const HChar* showARMNeonDataSize_wrk ( UInt size )
{
switch (size) {
case 0: return "8";
case 1: return "16";
case 2: return "32";
case 3: return "64";
default: vpanic("showARMNeonDataSize");
}
}
static const HChar* showARMNeonDataSize ( const ARMInstr* i )
{
switch (i->tag) {
case ARMin_NBinary:
if (i->ARMin.NBinary.op == ARMneon_VEXT)
return "8";
if (i->ARMin.NBinary.op == ARMneon_VAND ||
i->ARMin.NBinary.op == ARMneon_VORR ||
i->ARMin.NBinary.op == ARMneon_VXOR)
return "";
return showARMNeonDataSize_wrk(i->ARMin.NBinary.size);
case ARMin_NUnary:
if (i->ARMin.NUnary.op == ARMneon_COPY ||
i->ARMin.NUnary.op == ARMneon_NOT ||
i->ARMin.NUnary.op == ARMneon_VCVTF32toF16||
i->ARMin.NUnary.op == ARMneon_VCVTF16toF32||
i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF ||
i->ARMin.NUnary.op == ARMneon_VCVTFtoS ||
i->ARMin.NUnary.op == ARMneon_VCVTFtoU ||
i->ARMin.NUnary.op == ARMneon_VCVTStoF ||
i->ARMin.NUnary.op == ARMneon_VCVTUtoF)
return "";
if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
UInt size;
size = i->ARMin.NUnary.size;
if (size & 0x40)
return "64";
if (size & 0x20)
return "32";
if (size & 0x10)
return "16";
if (size & 0x08)
return "8";
vpanic("showARMNeonDataSize");
}
return showARMNeonDataSize_wrk(i->ARMin.NUnary.size);
case ARMin_NUnaryS:
if (i->ARMin.NUnaryS.op == ARMneon_VDUP) {
int size;
size = i->ARMin.NUnaryS.size;
if ((size & 1) == 1)
return "8";
if ((size & 3) == 2)
return "16";
if ((size & 7) == 4)
return "32";
vpanic("showARMNeonDataSize");
}
return showARMNeonDataSize_wrk(i->ARMin.NUnaryS.size);
case ARMin_NShift:
return showARMNeonDataSize_wrk(i->ARMin.NShift.size);
case ARMin_NDual:
return showARMNeonDataSize_wrk(i->ARMin.NDual.size);
default:
vpanic("showARMNeonDataSize");
}
}
ARMInstr* ARMInstr_Alu ( ARMAluOp op,
HReg dst, HReg argL, ARMRI84* argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Alu;
i->ARMin.Alu.op = op;
i->ARMin.Alu.dst = dst;
i->ARMin.Alu.argL = argL;
i->ARMin.Alu.argR = argR;
return i;
}
ARMInstr* ARMInstr_Shift ( ARMShiftOp op,
HReg dst, HReg argL, ARMRI5* argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Shift;
i->ARMin.Shift.op = op;
i->ARMin.Shift.dst = dst;
i->ARMin.Shift.argL = argL;
i->ARMin.Shift.argR = argR;
return i;
}
ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Unary;
i->ARMin.Unary.op = op;
i->ARMin.Unary.dst = dst;
i->ARMin.Unary.src = src;
return i;
}
ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_CmpOrTst;
i->ARMin.CmpOrTst.isCmp = isCmp;
i->ARMin.CmpOrTst.argL = argL;
i->ARMin.CmpOrTst.argR = argR;
return i;
}
ARMInstr* ARMInstr_Mov ( HReg dst, ARMRI84* src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Mov;
i->ARMin.Mov.dst = dst;
i->ARMin.Mov.src = src;
return i;
}
ARMInstr* ARMInstr_Imm32 ( HReg dst, UInt imm32 ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Imm32;
i->ARMin.Imm32.dst = dst;
i->ARMin.Imm32.imm32 = imm32;
return i;
}
ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt32;
i->ARMin.LdSt32.cc = cc;
i->ARMin.LdSt32.isLoad = isLoad;
i->ARMin.LdSt32.rD = rD;
i->ARMin.LdSt32.amode = amode;
vassert(cc != ARMcc_NV);
return i;
}
ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc,
Bool isLoad, Bool signedLoad,
HReg rD, ARMAMode2* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt16;
i->ARMin.LdSt16.cc = cc;
i->ARMin.LdSt16.isLoad = isLoad;
i->ARMin.LdSt16.signedLoad = signedLoad;
i->ARMin.LdSt16.rD = rD;
i->ARMin.LdSt16.amode = amode;
vassert(cc != ARMcc_NV);
return i;
}
ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc,
Bool isLoad, HReg rD, ARMAMode1* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdSt8U;
i->ARMin.LdSt8U.cc = cc;
i->ARMin.LdSt8U.isLoad = isLoad;
i->ARMin.LdSt8U.rD = rD;
i->ARMin.LdSt8U.amode = amode;
vassert(cc != ARMcc_NV);
return i;
}
ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Ld8S;
i->ARMin.Ld8S.cc = cc;
i->ARMin.Ld8S.rD = rD;
i->ARMin.Ld8S.amode = amode;
vassert(cc != ARMcc_NV);
return i;
}
ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
ARMCondCode cond, Bool toFastEP ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_XDirect;
i->ARMin.XDirect.dstGA = dstGA;
i->ARMin.XDirect.amR15T = amR15T;
i->ARMin.XDirect.cond = cond;
i->ARMin.XDirect.toFastEP = toFastEP;
return i;
}
ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_XIndir;
i->ARMin.XIndir.dstGA = dstGA;
i->ARMin.XIndir.amR15T = amR15T;
i->ARMin.XIndir.cond = cond;
return i;
}
ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
ARMCondCode cond, IRJumpKind jk ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_XAssisted;
i->ARMin.XAssisted.dstGA = dstGA;
i->ARMin.XAssisted.amR15T = amR15T;
i->ARMin.XAssisted.cond = cond;
i->ARMin.XAssisted.jk = jk;
return i;
}
ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_CMov;
i->ARMin.CMov.cond = cond;
i->ARMin.CMov.dst = dst;
i->ARMin.CMov.src = src;
vassert(cond != ARMcc_AL);
return i;
}
ARMInstr* ARMInstr_Call ( ARMCondCode cond, HWord target, Int nArgRegs,
RetLoc rloc ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Call;
i->ARMin.Call.cond = cond;
i->ARMin.Call.target = target;
i->ARMin.Call.nArgRegs = nArgRegs;
i->ARMin.Call.rloc = rloc;
vassert(is_sane_RetLoc(rloc));
return i;
}
ARMInstr* ARMInstr_Mul ( ARMMulOp op ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_Mul;
i->ARMin.Mul.op = op;
return i;
}
ARMInstr* ARMInstr_LdrEX ( Int szB ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_LdrEX;
i->ARMin.LdrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_StrEX ( Int szB ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_StrEX;
i->ARMin.StrEX.szB = szB;
vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
return i;
}
ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VLdStD;
i->ARMin.VLdStD.isLoad = isLoad;
i->ARMin.VLdStD.dD = dD;
i->ARMin.VLdStD.amode = am;
return i;
}
ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VLdStS;
i->ARMin.VLdStS.isLoad = isLoad;
i->ARMin.VLdStS.fD = fD;
i->ARMin.VLdStS.amode = am;
return i;
}
ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VAluD;
i->ARMin.VAluD.op = op;
i->ARMin.VAluD.dst = dst;
i->ARMin.VAluD.argL = argL;
i->ARMin.VAluD.argR = argR;
return i;
}
ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VAluS;
i->ARMin.VAluS.op = op;
i->ARMin.VAluS.dst = dst;
i->ARMin.VAluS.argL = argL;
i->ARMin.VAluS.argR = argR;
return i;
}
ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VUnaryD;
i->ARMin.VUnaryD.op = op;
i->ARMin.VUnaryD.dst = dst;
i->ARMin.VUnaryD.src = src;
return i;
}
ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VUnaryS;
i->ARMin.VUnaryS.op = op;
i->ARMin.VUnaryS.dst = dst;
i->ARMin.VUnaryS.src = src;
return i;
}
ARMInstr* ARMInstr_VCmpD ( HReg argL, HReg argR ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VCmpD;
i->ARMin.VCmpD.argL = argL;
i->ARMin.VCmpD.argR = argR;
return i;
}
ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VCMovD;
i->ARMin.VCMovD.cond = cond;
i->ARMin.VCMovD.dst = dst;
i->ARMin.VCMovD.src = src;
vassert(cond != ARMcc_AL);
return i;
}
ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VCMovS;
i->ARMin.VCMovS.cond = cond;
i->ARMin.VCMovS.dst = dst;
i->ARMin.VCMovS.src = src;
vassert(cond != ARMcc_AL);
return i;
}
ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VCvtSD;
i->ARMin.VCvtSD.sToD = sToD;
i->ARMin.VCvtSD.dst = dst;
i->ARMin.VCvtSD.src = src;
return i;
}
ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VXferD;
i->ARMin.VXferD.toD = toD;
i->ARMin.VXferD.dD = dD;
i->ARMin.VXferD.rHi = rHi;
i->ARMin.VXferD.rLo = rLo;
return i;
}
ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VXferS;
i->ARMin.VXferS.toS = toS;
i->ARMin.VXferS.fD = fD;
i->ARMin.VXferS.rLo = rLo;
return i;
}
ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_VCvtID;
i->ARMin.VCvtID.iToD = iToD;
i->ARMin.VCvtID.syned = syned;
i->ARMin.VCvtID.dst = dst;
i->ARMin.VCvtID.src = src;
return i;
}
ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_FPSCR;
i->ARMin.FPSCR.toFPSCR = toFPSCR;
i->ARMin.FPSCR.iReg = iReg;
return i;
}
ARMInstr* ARMInstr_MFence ( void ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_MFence;
return i;
}
ARMInstr* ARMInstr_CLREX( void ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_CLREX;
return i;
}
ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NLdStQ;
i->ARMin.NLdStQ.isLoad = isLoad;
i->ARMin.NLdStQ.dQ = dQ;
i->ARMin.NLdStQ.amode = amode;
return i;
}
ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NLdStD;
i->ARMin.NLdStD.isLoad = isLoad;
i->ARMin.NLdStD.dD = dD;
i->ARMin.NLdStD.amode = amode;
return i;
}
ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
UInt size, Bool Q ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NUnary;
i->ARMin.NUnary.op = op;
i->ARMin.NUnary.src = nQ;
i->ARMin.NUnary.dst = dQ;
i->ARMin.NUnary.size = size;
i->ARMin.NUnary.Q = Q;
return i;
}
ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src,
UInt size, Bool Q ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NUnaryS;
i->ARMin.NUnaryS.op = op;
i->ARMin.NUnaryS.src = src;
i->ARMin.NUnaryS.dst = dst;
i->ARMin.NUnaryS.size = size;
i->ARMin.NUnaryS.Q = Q;
return i;
}
ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ,
UInt size, Bool Q ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NDual;
i->ARMin.NDual.op = op;
i->ARMin.NDual.arg1 = nQ;
i->ARMin.NDual.arg2 = mQ;
i->ARMin.NDual.size = size;
i->ARMin.NDual.Q = Q;
return i;
}
ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op,
HReg dst, HReg argL, HReg argR,
UInt size, Bool Q ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NBinary;
i->ARMin.NBinary.op = op;
i->ARMin.NBinary.argL = argL;
i->ARMin.NBinary.argR = argR;
i->ARMin.NBinary.dst = dst;
i->ARMin.NBinary.size = size;
i->ARMin.NBinary.Q = Q;
return i;
}
ARMInstr* ARMInstr_NeonImm (HReg dst, ARMNImm* imm ) {
ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NeonImm;
i->ARMin.NeonImm.dst = dst;
i->ARMin.NeonImm.imm = imm;
return i;
}
ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NCMovQ;
i->ARMin.NCMovQ.cond = cond;
i->ARMin.NCMovQ.dst = dst;
i->ARMin.NCMovQ.src = src;
vassert(cond != ARMcc_AL);
return i;
}
ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op,
HReg dst, HReg argL, HReg argR,
UInt size, Bool Q ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NShift;
i->ARMin.NShift.op = op;
i->ARMin.NShift.argL = argL;
i->ARMin.NShift.argR = argR;
i->ARMin.NShift.dst = dst;
i->ARMin.NShift.size = size;
i->ARMin.NShift.Q = Q;
return i;
}
ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt )
{
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_NShl64;
i->ARMin.NShl64.dst = dst;
i->ARMin.NShl64.src = src;
i->ARMin.NShl64.amt = amt;
vassert(amt >= 1 && amt <= 63);
return i;
}
/* Helper copy-pasted from isel.c */
static Bool fitsIn8x4 ( UInt* u8, UInt* u4, UInt u )
{
UInt i;
for (i = 0; i < 16; i++) {
if (0 == (u & 0xFFFFFF00)) {
*u8 = u;
*u4 = i;
return True;
}
u = ROR32(u, 30);
}
vassert(i == 16);
return False;
}
ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) {
UInt u8, u4;
ARMInstr *i = LibVEX_Alloc(sizeof(ARMInstr));
/* Try to generate single ADD if possible */
if (fitsIn8x4(&u8, &u4, imm32)) {
i->tag = ARMin_Alu;
i->ARMin.Alu.op = ARMalu_ADD;
i->ARMin.Alu.dst = rD;
i->ARMin.Alu.argL = rN;
i->ARMin.Alu.argR = ARMRI84_I84(u8, u4);
} else {
i->tag = ARMin_Add32;
i->ARMin.Add32.rD = rD;
i->ARMin.Add32.rN = rN;
i->ARMin.Add32.imm32 = imm32;
}
return i;
}
ARMInstr* ARMInstr_EvCheck ( ARMAMode1* amCounter,
ARMAMode1* amFailAddr ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_EvCheck;
i->ARMin.EvCheck.amCounter = amCounter;
i->ARMin.EvCheck.amFailAddr = amFailAddr;
return i;
}
ARMInstr* ARMInstr_ProfInc ( void ) {
ARMInstr* i = LibVEX_Alloc(sizeof(ARMInstr));
i->tag = ARMin_ProfInc;
return i;
}
/* ... */
void ppARMInstr ( const ARMInstr* i ) {
switch (i->tag) {
case ARMin_Alu:
vex_printf("%-4s ", showARMAluOp(i->ARMin.Alu.op));
ppHRegARM(i->ARMin.Alu.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.Alu.argL);
vex_printf(", ");
ppARMRI84(i->ARMin.Alu.argR);
return;
case ARMin_Shift:
vex_printf("%s ", showARMShiftOp(i->ARMin.Shift.op));
ppHRegARM(i->ARMin.Shift.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.Shift.argL);
vex_printf(", ");
ppARMRI5(i->ARMin.Shift.argR);
return;
case ARMin_Unary:
vex_printf("%s ", showARMUnaryOp(i->ARMin.Unary.op));
ppHRegARM(i->ARMin.Unary.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.Unary.src);
return;
case ARMin_CmpOrTst:
vex_printf("%s ", i->ARMin.CmpOrTst.isCmp ? "cmp" : "tst");
ppHRegARM(i->ARMin.CmpOrTst.argL);
vex_printf(", ");
ppARMRI84(i->ARMin.CmpOrTst.argR);
return;
case ARMin_Mov:
vex_printf("mov ");
ppHRegARM(i->ARMin.Mov.dst);
vex_printf(", ");
ppARMRI84(i->ARMin.Mov.src);
return;
case ARMin_Imm32:
vex_printf("imm ");
ppHRegARM(i->ARMin.Imm32.dst);
vex_printf(", 0x%x", i->ARMin.Imm32.imm32);
return;
case ARMin_LdSt32:
if (i->ARMin.LdSt32.isLoad) {
vex_printf("ldr%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt32.cc));
ppHRegARM(i->ARMin.LdSt32.rD);
vex_printf(", ");
ppARMAMode1(i->ARMin.LdSt32.amode);
} else {
vex_printf("str%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt32.cc));
ppARMAMode1(i->ARMin.LdSt32.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt32.rD);
}
return;
case ARMin_LdSt16:
if (i->ARMin.LdSt16.isLoad) {
vex_printf("%s%s%s",
i->ARMin.LdSt16.signedLoad ? "ldrsh" : "ldrh",
i->ARMin.LdSt16.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt16.cc),
i->ARMin.LdSt16.signedLoad ? " " : " ");
ppHRegARM(i->ARMin.LdSt16.rD);
vex_printf(", ");
ppARMAMode2(i->ARMin.LdSt16.amode);
} else {
vex_printf("strh%s ",
i->ARMin.LdSt16.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt16.cc));
ppARMAMode2(i->ARMin.LdSt16.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt16.rD);
}
return;
case ARMin_LdSt8U:
if (i->ARMin.LdSt8U.isLoad) {
vex_printf("ldrb%s ", i->ARMin.LdSt8U.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt8U.cc));
ppHRegARM(i->ARMin.LdSt8U.rD);
vex_printf(", ");
ppARMAMode1(i->ARMin.LdSt8U.amode);
} else {
vex_printf("strb%s ", i->ARMin.LdSt8U.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.LdSt8U.cc));
ppARMAMode1(i->ARMin.LdSt8U.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.LdSt8U.rD);
}
return;
case ARMin_Ld8S:
vex_printf("ldrsb%s ", i->ARMin.Ld8S.cc == ARMcc_AL ? " "
: showARMCondCode(i->ARMin.Ld8S.cc));
ppARMAMode2(i->ARMin.Ld8S.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.Ld8S.rD);
return;
case ARMin_XDirect:
vex_printf("(xDirect) ");
vex_printf("if (%%cpsr.%s) { ",
showARMCondCode(i->ARMin.XDirect.cond));
vex_printf("movw r12,0x%x; ",
(UInt)(i->ARMin.XDirect.dstGA & 0xFFFF));
vex_printf("movt r12,0x%x; ",
(UInt)((i->ARMin.XDirect.dstGA >> 16) & 0xFFFF));
vex_printf("str r12,");
ppARMAMode1(i->ARMin.XDirect.amR15T);
vex_printf("; movw r12,LO16($disp_cp_chain_me_to_%sEP); ",
i->ARMin.XDirect.toFastEP ? "fast" : "slow");
vex_printf("movt r12,HI16($disp_cp_chain_me_to_%sEP); ",
i->ARMin.XDirect.toFastEP ? "fast" : "slow");
vex_printf("blx r12 }");
return;
case ARMin_XIndir:
vex_printf("(xIndir) ");
vex_printf("if (%%cpsr.%s) { ",
showARMCondCode(i->ARMin.XIndir.cond));
vex_printf("str ");
ppHRegARM(i->ARMin.XIndir.dstGA);
vex_printf(",");
ppARMAMode1(i->ARMin.XIndir.amR15T);
vex_printf("; movw r12,LO16($disp_cp_xindir); ");
vex_printf("movt r12,HI16($disp_cp_xindir); ");
vex_printf("blx r12 }");
return;
case ARMin_XAssisted:
vex_printf("(xAssisted) ");
vex_printf("if (%%cpsr.%s) { ",
showARMCondCode(i->ARMin.XAssisted.cond));
vex_printf("str ");
ppHRegARM(i->ARMin.XAssisted.dstGA);
vex_printf(",");
ppARMAMode1(i->ARMin.XAssisted.amR15T);
vex_printf("movw r8,$IRJumpKind_to_TRCVAL(%d); ",
(Int)i->ARMin.XAssisted.jk);
vex_printf("movw r12,LO16($disp_cp_xassisted); ");
vex_printf("movt r12,HI16($disp_cp_xassisted); ");
vex_printf("blx r12 }");
return;
case ARMin_CMov:
vex_printf("mov%s ", showARMCondCode(i->ARMin.CMov.cond));
ppHRegARM(i->ARMin.CMov.dst);
vex_printf(", ");
ppARMRI84(i->ARMin.CMov.src);
return;
case ARMin_Call:
vex_printf("call%s ",
i->ARMin.Call.cond==ARMcc_AL
? "" : showARMCondCode(i->ARMin.Call.cond));
vex_printf("0x%lx [nArgRegs=%d, ",
i->ARMin.Call.target, i->ARMin.Call.nArgRegs);
ppRetLoc(i->ARMin.Call.rloc);
vex_printf("]");
return;
case ARMin_Mul:
vex_printf("%-5s ", showARMMulOp(i->ARMin.Mul.op));
if (i->ARMin.Mul.op == ARMmul_PLAIN) {
vex_printf("r0, r2, r3");
} else {
vex_printf("r1:r0, r2, r3");
}
return;
case ARMin_LdrEX: {
const HChar* sz = "";
switch (i->ARMin.LdrEX.szB) {
case 1: sz = "b"; break; case 2: sz = "h"; break;
case 8: sz = "d"; break; case 4: break;
default: vassert(0);
}
vex_printf("ldrex%s %sr2, [r4]",
sz, i->ARMin.LdrEX.szB == 8 ? "r3:" : "");
return;
}
case ARMin_StrEX: {
const HChar* sz = "";
switch (i->ARMin.StrEX.szB) {
case 1: sz = "b"; break; case 2: sz = "h"; break;
case 8: sz = "d"; break; case 4: break;
default: vassert(0);
}
vex_printf("strex%s r0, %sr2, [r4]",
sz, i->ARMin.StrEX.szB == 8 ? "r3:" : "");
return;
}
case ARMin_VLdStD:
if (i->ARMin.VLdStD.isLoad) {
vex_printf("fldd ");
ppHRegARM(i->ARMin.VLdStD.dD);
vex_printf(", ");
ppARMAModeV(i->ARMin.VLdStD.amode);
} else {
vex_printf("fstd ");
ppARMAModeV(i->ARMin.VLdStD.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.VLdStD.dD);
}
return;
case ARMin_VLdStS:
if (i->ARMin.VLdStS.isLoad) {
vex_printf("flds ");
ppHRegARM(i->ARMin.VLdStS.fD);
vex_printf(", ");
ppARMAModeV(i->ARMin.VLdStS.amode);
} else {
vex_printf("fsts ");
ppARMAModeV(i->ARMin.VLdStS.amode);
vex_printf(", ");
ppHRegARM(i->ARMin.VLdStS.fD);
}
return;
case ARMin_VAluD:
vex_printf("f%-3sd ", showARMVfpOp(i->ARMin.VAluD.op));
ppHRegARM(i->ARMin.VAluD.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VAluD.argL);
vex_printf(", ");
ppHRegARM(i->ARMin.VAluD.argR);
return;
case ARMin_VAluS:
vex_printf("f%-3ss ", showARMVfpOp(i->ARMin.VAluS.op));
ppHRegARM(i->ARMin.VAluS.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VAluS.argL);
vex_printf(", ");
ppHRegARM(i->ARMin.VAluS.argR);
return;
case ARMin_VUnaryD:
vex_printf("f%-3sd ", showARMVfpUnaryOp(i->ARMin.VUnaryD.op));
ppHRegARM(i->ARMin.VUnaryD.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VUnaryD.src);
return;
case ARMin_VUnaryS:
vex_printf("f%-3ss ", showARMVfpUnaryOp(i->ARMin.VUnaryS.op));
ppHRegARM(i->ARMin.VUnaryS.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VUnaryS.src);
return;
case ARMin_VCmpD:
vex_printf("fcmpd ");
ppHRegARM(i->ARMin.VCmpD.argL);
vex_printf(", ");
ppHRegARM(i->ARMin.VCmpD.argR);
vex_printf(" ; fmstat");
return;
case ARMin_VCMovD:
vex_printf("fcpyd%s ", showARMCondCode(i->ARMin.VCMovD.cond));
ppHRegARM(i->ARMin.VCMovD.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VCMovD.src);
return;
case ARMin_VCMovS:
vex_printf("fcpys%s ", showARMCondCode(i->ARMin.VCMovS.cond));
ppHRegARM(i->ARMin.VCMovS.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VCMovS.src);
return;
case ARMin_VCvtSD:
vex_printf("fcvt%s ", i->ARMin.VCvtSD.sToD ? "ds" : "sd");
ppHRegARM(i->ARMin.VCvtSD.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VCvtSD.src);
return;
case ARMin_VXferD:
vex_printf("vmov ");
if (i->ARMin.VXferD.toD) {
ppHRegARM(i->ARMin.VXferD.dD);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferD.rLo);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferD.rHi);
} else {
ppHRegARM(i->ARMin.VXferD.rLo);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferD.rHi);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferD.dD);
}
return;
case ARMin_VXferS:
vex_printf("vmov ");
if (i->ARMin.VXferS.toS) {
ppHRegARM(i->ARMin.VXferS.fD);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferS.rLo);
} else {
ppHRegARM(i->ARMin.VXferS.rLo);
vex_printf(", ");
ppHRegARM(i->ARMin.VXferS.fD);
}
return;
case ARMin_VCvtID: {
const HChar* nm = "?";
if (i->ARMin.VCvtID.iToD) {
nm = i->ARMin.VCvtID.syned ? "fsitod" : "fuitod";
} else {
nm = i->ARMin.VCvtID.syned ? "ftosid" : "ftouid";
}
vex_printf("%s ", nm);
ppHRegARM(i->ARMin.VCvtID.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.VCvtID.src);
return;
}
case ARMin_FPSCR:
if (i->ARMin.FPSCR.toFPSCR) {
vex_printf("fmxr fpscr, ");
ppHRegARM(i->ARMin.FPSCR.iReg);
} else {
vex_printf("fmrx ");
ppHRegARM(i->ARMin.FPSCR.iReg);
vex_printf(", fpscr");
}
return;
case ARMin_MFence:
vex_printf("(mfence) dsb sy; dmb sy; isb");
return;
case ARMin_CLREX:
vex_printf("clrex");
return;
case ARMin_NLdStQ:
if (i->ARMin.NLdStQ.isLoad)
vex_printf("vld1.32 {");
else
vex_printf("vst1.32 {");
ppHRegARM(i->ARMin.NLdStQ.dQ);
vex_printf("} ");
ppARMAModeN(i->ARMin.NLdStQ.amode);
return;
case ARMin_NLdStD:
if (i->ARMin.NLdStD.isLoad)
vex_printf("vld1.32 {");
else
vex_printf("vst1.32 {");
ppHRegARM(i->ARMin.NLdStD.dD);
vex_printf("} ");
ppARMAModeN(i->ARMin.NLdStD.amode);
return;
case ARMin_NUnary:
vex_printf("%s%s%s ",
showARMNeonUnOp(i->ARMin.NUnary.op),
showARMNeonUnOpDataType(i->ARMin.NUnary.op),
showARMNeonDataSize(i));
ppHRegARM(i->ARMin.NUnary.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.NUnary.src);
if (i->ARMin.NUnary.op == ARMneon_EQZ)
vex_printf(", #0");
if (i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF) {
vex_printf(", #%d", i->ARMin.NUnary.size);
}
if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
UInt size;
size = i->ARMin.NUnary.size;
if (size & 0x40) {
vex_printf(", #%d", size - 64);
} else if (size & 0x20) {
vex_printf(", #%d", size - 32);
} else if (size & 0x10) {
vex_printf(", #%d", size - 16);
} else if (size & 0x08) {
vex_printf(", #%d", size - 8);
}
}
return;
case ARMin_NUnaryS:
vex_printf("%s%s%s ",
showARMNeonUnOpS(i->ARMin.NUnaryS.op),
showARMNeonUnOpSDataType(i->ARMin.NUnaryS.op),
showARMNeonDataSize(i));
ppARMNRS(i->ARMin.NUnaryS.dst);
vex_printf(", ");
ppARMNRS(i->ARMin.NUnaryS.src);
return;
case ARMin_NShift:
vex_printf("%s%s%s ",
showARMNeonShiftOp(i->ARMin.NShift.op),
showARMNeonShiftOpDataType(i->ARMin.NShift.op),
showARMNeonDataSize(i));
ppHRegARM(i->ARMin.NShift.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.NShift.argL);
vex_printf(", ");
ppHRegARM(i->ARMin.NShift.argR);
return;
case ARMin_NShl64:
vex_printf("vshl.i64 ");
ppHRegARM(i->ARMin.NShl64.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.NShl64.src);
vex_printf(", #%u", i->ARMin.NShl64.amt);
return;
case ARMin_NDual:
vex_printf("%s%s%s ",
showARMNeonDualOp(i->ARMin.NDual.op),
showARMNeonDualOpDataType(i->ARMin.NDual.op),
showARMNeonDataSize(i));
ppHRegARM(i->ARMin.NDual.arg1);
vex_printf(", ");
ppHRegARM(i->ARMin.NDual.arg2);
return;
case ARMin_NBinary:
vex_printf("%s%s%s",
showARMNeonBinOp(i->ARMin.NBinary.op),
showARMNeonBinOpDataType(i->ARMin.NBinary.op),
showARMNeonDataSize(i));
vex_printf(" ");
ppHRegARM(i->ARMin.NBinary.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.NBinary.argL);
vex_printf(", ");
ppHRegARM(i->ARMin.NBinary.argR);
return;
case ARMin_NeonImm:
vex_printf("vmov ");
ppHRegARM(i->ARMin.NeonImm.dst);
vex_printf(", ");
ppARMNImm(i->ARMin.NeonImm.imm);
return;
case ARMin_NCMovQ:
vex_printf("vmov%s ", showARMCondCode(i->ARMin.NCMovQ.cond));
ppHRegARM(i->ARMin.NCMovQ.dst);
vex_printf(", ");
ppHRegARM(i->ARMin.NCMovQ.src);
return;
case ARMin_Add32:
vex_printf("add32 ");
ppHRegARM(i->ARMin.Add32.rD);
vex_printf(", ");
ppHRegARM(i->ARMin.Add32.rN);
vex_printf(", ");
vex_printf("%d", i->ARMin.Add32.imm32);
return;
case ARMin_EvCheck:
vex_printf("(evCheck) ldr r12,");
ppARMAMode1(i->ARMin.EvCheck.amCounter);
vex_printf("; subs r12,r12,$1; str r12,");
ppARMAMode1(i->ARMin.EvCheck.amCounter);
vex_printf("; bpl nofail; ldr r12,");
ppARMAMode1(i->ARMin.EvCheck.amFailAddr);
vex_printf("; bx r12; nofail:");
return;
case ARMin_ProfInc:
vex_printf("(profInc) movw r12,LO16($NotKnownYet); "
"movw r12,HI16($NotKnownYet); "
"ldr r11,[r12]; "
"adds r11,r11,$1; "
"str r11,[r12]; "
"ldr r11,[r12+4]; "
"adc r11,r11,$0; "
"str r11,[r12+4]");
return;
default:
vex_printf("ppARMInstr: unhandled case (tag %d)", (Int)i->tag);
vpanic("ppARMInstr(1)");
return;
}
}
/* --------- Helpers for register allocation. --------- */
void getRegUsage_ARMInstr ( HRegUsage* u, const ARMInstr* i, Bool mode64 )
{
vassert(mode64 == False);
initHRegUsage(u);
switch (i->tag) {
case ARMin_Alu:
addHRegUse(u, HRmWrite, i->ARMin.Alu.dst);
addHRegUse(u, HRmRead, i->ARMin.Alu.argL);
addRegUsage_ARMRI84(u, i->ARMin.Alu.argR);
return;
case ARMin_Shift:
addHRegUse(u, HRmWrite, i->ARMin.Shift.dst);
addHRegUse(u, HRmRead, i->ARMin.Shift.argL);
addRegUsage_ARMRI5(u, i->ARMin.Shift.argR);
return;
case ARMin_Unary:
addHRegUse(u, HRmWrite, i->ARMin.Unary.dst);
addHRegUse(u, HRmRead, i->ARMin.Unary.src);
return;
case ARMin_CmpOrTst:
addHRegUse(u, HRmRead, i->ARMin.CmpOrTst.argL);
addRegUsage_ARMRI84(u, i->ARMin.CmpOrTst.argR);
return;
case ARMin_Mov:
addHRegUse(u, HRmWrite, i->ARMin.Mov.dst);
addRegUsage_ARMRI84(u, i->ARMin.Mov.src);
return;
case ARMin_Imm32:
addHRegUse(u, HRmWrite, i->ARMin.Imm32.dst);
return;
case ARMin_LdSt32:
addRegUsage_ARMAMode1(u, i->ARMin.LdSt32.amode);
if (i->ARMin.LdSt32.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt32.rD);
if (i->ARMin.LdSt32.cc != ARMcc_AL)
addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
}
return;
case ARMin_LdSt16:
addRegUsage_ARMAMode2(u, i->ARMin.LdSt16.amode);
if (i->ARMin.LdSt16.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt16.rD);
if (i->ARMin.LdSt16.cc != ARMcc_AL)
addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
}
return;
case ARMin_LdSt8U:
addRegUsage_ARMAMode1(u, i->ARMin.LdSt8U.amode);
if (i->ARMin.LdSt8U.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.LdSt8U.rD);
if (i->ARMin.LdSt8U.cc != ARMcc_AL)
addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
} else {
addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
}
return;
case ARMin_Ld8S:
addRegUsage_ARMAMode2(u, i->ARMin.Ld8S.amode);
addHRegUse(u, HRmWrite, i->ARMin.Ld8S.rD);
if (i->ARMin.Ld8S.cc != ARMcc_AL)
addHRegUse(u, HRmRead, i->ARMin.Ld8S.rD);
return;
/* XDirect/XIndir/XAssisted are also a bit subtle. They
conditionally exit the block. Hence we only need to list (1)
the registers that they read, and (2) the registers that they
write in the case where the block is not exited. (2) is
empty, hence only (1) is relevant here. */
case ARMin_XDirect:
addRegUsage_ARMAMode1(u, i->ARMin.XDirect.amR15T);
return;
case ARMin_XIndir:
addHRegUse(u, HRmRead, i->ARMin.XIndir.dstGA);
addRegUsage_ARMAMode1(u, i->ARMin.XIndir.amR15T);
return;
case ARMin_XAssisted:
addHRegUse(u, HRmRead, i->ARMin.XAssisted.dstGA);
addRegUsage_ARMAMode1(u, i->ARMin.XAssisted.amR15T);
return;
case ARMin_CMov:
addHRegUse(u, HRmWrite, i->ARMin.CMov.dst);
addHRegUse(u, HRmRead, i->ARMin.CMov.dst);
addRegUsage_ARMRI84(u, i->ARMin.CMov.src);
return;
case ARMin_Call:
/* logic and comments copied/modified from x86 back end */
/* This is a bit subtle. */
/* First off, claim it trashes all the caller-saved regs
which fall within the register allocator's jurisdiction.
These I believe to be r0,1,2,3. If it turns out that r9
is also caller-saved, then we'll have to add that here
too. */
addHRegUse(u, HRmWrite, hregARM_R0());
addHRegUse(u, HRmWrite, hregARM_R1());
addHRegUse(u, HRmWrite, hregARM_R2());
addHRegUse(u, HRmWrite, hregARM_R3());
/* Now we have to state any parameter-carrying registers
which might be read. This depends on nArgRegs. */
switch (i->ARMin.Call.nArgRegs) {
case 4: addHRegUse(u, HRmRead, hregARM_R3()); /*fallthru*/
case 3: addHRegUse(u, HRmRead, hregARM_R2()); /*fallthru*/
case 2: addHRegUse(u, HRmRead, hregARM_R1()); /*fallthru*/
case 1: addHRegUse(u, HRmRead, hregARM_R0()); break;
case 0: break;
default: vpanic("getRegUsage_ARM:Call:regparms");
}
/* Finally, there is the issue that the insn trashes a
register because the literal target address has to be
loaded into a register. Fortunately, for the nArgRegs=
0/1/2/3 case, we can use r0, r1, r2 or r3 respectively, so
this does not cause any further damage. For the
nArgRegs=4 case, we'll have to choose another register
arbitrarily since all the caller saved regs are used for
parameters, and so we might as well choose r11.
*/
if (i->ARMin.Call.nArgRegs == 4)
addHRegUse(u, HRmWrite, hregARM_R11());
/* Upshot of this is that the assembler really must observe
the here-stated convention of which register to use as an
address temporary, depending on nArgRegs: 0==r0,
1==r1, 2==r2, 3==r3, 4==r11 */
return;
case ARMin_Mul:
addHRegUse(u, HRmRead, hregARM_R2());
addHRegUse(u, HRmRead, hregARM_R3());
addHRegUse(u, HRmWrite, hregARM_R0());
if (i->ARMin.Mul.op != ARMmul_PLAIN)
addHRegUse(u, HRmWrite, hregARM_R1());
return;
case ARMin_LdrEX:
addHRegUse(u, HRmRead, hregARM_R4());
addHRegUse(u, HRmWrite, hregARM_R2());
if (i->ARMin.LdrEX.szB == 8)
addHRegUse(u, HRmWrite, hregARM_R3());
return;
case ARMin_StrEX:
addHRegUse(u, HRmRead, hregARM_R4());
addHRegUse(u, HRmWrite, hregARM_R0());
addHRegUse(u, HRmRead, hregARM_R2());
if (i->ARMin.StrEX.szB == 8)
addHRegUse(u, HRmRead, hregARM_R3());
return;
case ARMin_VLdStD:
addRegUsage_ARMAModeV(u, i->ARMin.VLdStD.amode);
if (i->ARMin.VLdStD.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.VLdStD.dD);
} else {
addHRegUse(u, HRmRead, i->ARMin.VLdStD.dD);
}
return;
case ARMin_VLdStS:
addRegUsage_ARMAModeV(u, i->ARMin.VLdStS.amode);
if (i->ARMin.VLdStS.isLoad) {
addHRegUse(u, HRmWrite, i->ARMin.VLdStS.fD);
} else {
addHRegUse(u, HRmRead, i->ARMin.VLdStS.fD);
}
return;
case ARMin_VAluD:
addHRegUse(u, HRmWrite, i->ARMin.VAluD.dst);
addHRegUse(u, HRmRead, i->ARMin.VAluD.argL);
addHRegUse(u, HRmRead, i->ARMin.VAluD.argR);
return;
case ARMin_VAluS:
addHRegUse(u, HRmWrite, i->ARMin.VAluS.dst);
addHRegUse(u, HRmRead, i->ARMin.VAluS.argL);
addHRegUse(u, HRmRead, i->ARMin.VAluS.argR);
return;
case ARMin_VUnaryD:
addHRegUse(u, HRmWrite, i->ARMin.VUnaryD.dst);
addHRegUse(u, HRmRead, i->ARMin.VUnaryD.src);
return;
case ARMin_VUnaryS:
addHRegUse(u, HRmWrite, i->ARMin.VUnaryS.dst);
addHRegUse(u, HRmRead, i->ARMin.VUnaryS.src);
return;
case ARMin_VCmpD:
addHRegUse(u, HRmRead, i->ARMin.VCmpD.argL);
addHRegUse(u, HRmRead, i->ARMin.VCmpD.argR);
return;
case ARMin_VCMovD:
addHRegUse(u, HRmWrite, i->ARMin.VCMovD.dst);
addHRegUse(u, HRmRead, i->ARMin.VCMovD.dst);
addHRegUse(u, HRmRead, i->ARMin.VCMovD.src);
return;
case ARMin_VCMovS:
addHRegUse(u, HRmWrite, i->ARMin.VCMovS.dst);
addHRegUse(u, HRmRead, i->ARMin.VCMovS.dst);
addHRegUse(u, HRmRead, i->ARMin.VCMovS.src);
return;
case ARMin_VCvtSD:
addHRegUse(u, HRmWrite, i->ARMin.VCvtSD.dst);
addHRegUse(u, HRmRead, i->ARMin.VCvtSD.src);
return;
case ARMin_VXferD:
if (i->ARMin.VXferD.toD) {
addHRegUse(u, HRmWrite, i->ARMin.VXferD.dD);
addHRegUse(u, HRmRead, i->ARMin.VXferD.rHi);
addHRegUse(u, HRmRead, i->ARMin.VXferD.rLo);
} else {
addHRegUse(u, HRmRead, i->ARMin.VXferD.dD);
addHRegUse(u, HRmWrite, i->ARMin.VXferD.rHi);
addHRegUse(u, HRmWrite, i->ARMin.VXferD.rLo);
}
return;
case ARMin_VXferS:
if (i->ARMin.VXferS.toS) {
addHRegUse(u, HRmWrite, i->ARMin.VXferS.fD);
addHRegUse(u, HRmRead, i->ARMin.VXferS.rLo);
} else {
addHRegUse(u, HRmRead, i->ARMin.VXferS.fD);
addHRegUse(u, HRmWrite, i->ARMin.VXferS.rLo);
}
return;
case ARMin_VCvtID:
addHRegUse(u, HRmWrite, i->ARMin.VCvtID.dst);
addHRegUse(u, HRmRead, i->ARMin.VCvtID.src);
return;
case ARMin_FPSCR:
if (i->ARMin.FPSCR.toFPSCR)
addHRegUse(u, HRmRead, i->ARMin.FPSCR.iReg);
else
addHRegUse(u, HRmWrite, i->ARMin.FPSCR.iReg);
return;
case ARMin_MFence:
return;
case ARMin_CLREX:
return;
case ARMin_NLdStQ:
if (i->ARMin.NLdStQ.isLoad)
addHRegUse(u, HRmWrite, i->ARMin.NLdStQ.dQ);
else
addHRegUse(u, HRmRead, i->ARMin.NLdStQ.dQ);
addRegUsage_ARMAModeN(u, i->ARMin.NLdStQ.amode);
return;
case ARMin_NLdStD:
if (i->ARMin.NLdStD.isLoad)
addHRegUse(u, HRmWrite, i->ARMin.NLdStD.dD);
else
addHRegUse(u, HRmRead, i->ARMin.NLdStD.dD);
addRegUsage_ARMAModeN(u, i->ARMin.NLdStD.amode);
return;
case ARMin_NUnary:
addHRegUse(u, HRmWrite, i->ARMin.NUnary.dst);
addHRegUse(u, HRmRead, i->ARMin.NUnary.src);
return;
case ARMin_NUnaryS:
addHRegUse(u, HRmWrite, i->ARMin.NUnaryS.dst->reg);
addHRegUse(u, HRmRead, i->ARMin.NUnaryS.src->reg);
return;
case ARMin_NShift:
addHRegUse(u, HRmWrite, i->ARMin.NShift.dst);
addHRegUse(u, HRmRead, i->ARMin.NShift.argL);
addHRegUse(u, HRmRead, i->ARMin.NShift.argR);
return;
case ARMin_NShl64:
addHRegUse(u, HRmWrite, i->ARMin.NShl64.dst);
addHRegUse(u, HRmRead, i->ARMin.NShl64.src);
return;
case ARMin_NDual:
addHRegUse(u, HRmWrite, i->ARMin.NDual.arg1);
addHRegUse(u, HRmWrite, i->ARMin.NDual.arg2);
addHRegUse(u, HRmRead, i->ARMin.NDual.arg1);
addHRegUse(u, HRmRead, i->ARMin.NDual.arg2);
return;
case ARMin_NBinary:
addHRegUse(u, HRmWrite, i->ARMin.NBinary.dst);
/* TODO: sometimes dst is also being read! */
// XXX fix this
addHRegUse(u, HRmRead, i->ARMin.NBinary.argL);
addHRegUse(u, HRmRead, i->ARMin.NBinary.argR);
return;
case ARMin_NeonImm:
addHRegUse(u, HRmWrite, i->ARMin.NeonImm.dst);
return;
case ARMin_NCMovQ:
addHRegUse(u, HRmWrite, i->ARMin.NCMovQ.dst);
addHRegUse(u, HRmRead, i->ARMin.NCMovQ.dst);
addHRegUse(u, HRmRead, i->ARMin.NCMovQ.src);
return;
case ARMin_Add32:
addHRegUse(u, HRmWrite, i->ARMin.Add32.rD);
addHRegUse(u, HRmRead, i->ARMin.Add32.rN);
return;
case ARMin_EvCheck:
/* We expect both amodes only to mention r8, so this is in