blob: e7c59f89b5965b5a52958833c57ad872bc428677 [file] [log] [blame]
/*---------------------------------------------------------------*/
/*--- begin host_mips_defs.c ---*/
/*---------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2010-2013 RT-RK
mips-valgrind@rt-rk.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "libvex_basictypes.h"
#include "libvex.h"
#include "libvex_trc_values.h"
#include "main_util.h"
#include "host_generic_regs.h"
#include "host_mips_defs.h"
/* guest_COND offset. */
#define COND_OFFSET(__mode64) (__mode64 ? 612 : 448)
/* Register number for guest state pointer in host code. */
#define GuestSP 23
/*---------------- Registers ----------------*/
const RRegUniverse* getRRegUniverse_MIPS ( Bool mode64 )
{
/* The real-register universe is a big constant, so we just want to
initialise it once. rRegUniverse_MIPS_initted values: 0=not initted,
1=initted for 32-bit-mode, 2=initted for 64-bit-mode */
static RRegUniverse rRegUniverse_MIPS;
static UInt rRegUniverse_MIPS_initted = 0;
/* Handy shorthand, nothing more */
RRegUniverse* ru = &rRegUniverse_MIPS;
/* This isn't thread-safe. Sigh. */
UInt howNeeded = mode64 ? 2 : 1;
if (LIKELY(rRegUniverse_MIPS_initted == howNeeded))
return ru;
RRegUniverse__init(ru);
/* Add the registers. The initial segment of this array must be
those available for allocation by reg-alloc, and those that
follow are not available for allocation. */
ru->regs[ru->size++] = hregMIPS_GPR16(mode64);
ru->regs[ru->size++] = hregMIPS_GPR17(mode64);
ru->regs[ru->size++] = hregMIPS_GPR18(mode64);
ru->regs[ru->size++] = hregMIPS_GPR19(mode64);
ru->regs[ru->size++] = hregMIPS_GPR20(mode64);
ru->regs[ru->size++] = hregMIPS_GPR21(mode64);
ru->regs[ru->size++] = hregMIPS_GPR22(mode64);
ru->regs[ru->size++] = hregMIPS_GPR12(mode64);
ru->regs[ru->size++] = hregMIPS_GPR13(mode64);
ru->regs[ru->size++] = hregMIPS_GPR14(mode64);
ru->regs[ru->size++] = hregMIPS_GPR15(mode64);
ru->regs[ru->size++] = hregMIPS_GPR24(mode64);
/* s7 (=guest_state) */
ru->regs[ru->size++] = hregMIPS_F16(mode64);
ru->regs[ru->size++] = hregMIPS_F18(mode64);
ru->regs[ru->size++] = hregMIPS_F20(mode64);
ru->regs[ru->size++] = hregMIPS_F22(mode64);
ru->regs[ru->size++] = hregMIPS_F24(mode64);
ru->regs[ru->size++] = hregMIPS_F26(mode64);
ru->regs[ru->size++] = hregMIPS_F28(mode64);
ru->regs[ru->size++] = hregMIPS_F30(mode64);
if (!mode64) {
/* Fake double floating point */
ru->regs[ru->size++] = hregMIPS_D0(mode64);
ru->regs[ru->size++] = hregMIPS_D1(mode64);
ru->regs[ru->size++] = hregMIPS_D2(mode64);
ru->regs[ru->size++] = hregMIPS_D3(mode64);
ru->regs[ru->size++] = hregMIPS_D4(mode64);
ru->regs[ru->size++] = hregMIPS_D5(mode64);
ru->regs[ru->size++] = hregMIPS_D6(mode64);
ru->regs[ru->size++] = hregMIPS_D7(mode64);
}
ru->allocable = ru->size;
/* And other regs, not available to the allocator. */
ru->regs[ru->size++] = hregMIPS_HI(mode64);
ru->regs[ru->size++] = hregMIPS_LO(mode64);
ru->regs[ru->size++] = hregMIPS_GPR0(mode64);
ru->regs[ru->size++] = hregMIPS_GPR1(mode64);
ru->regs[ru->size++] = hregMIPS_GPR2(mode64);
ru->regs[ru->size++] = hregMIPS_GPR3(mode64);
ru->regs[ru->size++] = hregMIPS_GPR4(mode64);
ru->regs[ru->size++] = hregMIPS_GPR5(mode64);
ru->regs[ru->size++] = hregMIPS_GPR6(mode64);
ru->regs[ru->size++] = hregMIPS_GPR7(mode64);
ru->regs[ru->size++] = hregMIPS_GPR8(mode64);
ru->regs[ru->size++] = hregMIPS_GPR9(mode64);
ru->regs[ru->size++] = hregMIPS_GPR10(mode64);
ru->regs[ru->size++] = hregMIPS_GPR11(mode64);
ru->regs[ru->size++] = hregMIPS_GPR23(mode64);
ru->regs[ru->size++] = hregMIPS_GPR25(mode64);
ru->regs[ru->size++] = hregMIPS_GPR29(mode64);
ru->regs[ru->size++] = hregMIPS_GPR31(mode64);
rRegUniverse_MIPS_initted = howNeeded;
RRegUniverse__check_is_sane(ru);
return ru;
}
void ppHRegMIPS(HReg reg, Bool mode64)
{
Int r;
static const HChar *ireg32_names[35]
= { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
"$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
"%32", "%33", "%34",
};
static const HChar *freg32_names[32]
= { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
"$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
"$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
"$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
};
static const HChar *freg64_names[32]
= { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
"$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
};
/* Be generic for all virtual regs. */
if (hregIsVirtual(reg)) {
ppHReg(reg);
return;
}
/* But specific for real regs. */
vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
/* But specific for real regs. */
switch (hregClass(reg)) {
case HRcInt32:
r = hregEncoding(reg);
vassert(r >= 0 && r < 32);
vex_printf("%s", ireg32_names[r]);
return;
case HRcInt64:
r = hregEncoding (reg);
vassert (r >= 0 && r < 32);
vex_printf ("%s", ireg32_names[r]);
return;
case HRcFlt32:
r = hregEncoding(reg);
vassert(r >= 0 && r < 32);
vex_printf("%s", freg32_names[r]);
return;
case HRcFlt64:
r = hregEncoding(reg);
vassert(r >= 0 && r < 32);
vex_printf("%s", freg64_names[r]);
return;
default:
vpanic("ppHRegMIPS");
break;
}
return;
}
/*----------------- Condition Codes ----------------------*/
const HChar *showMIPSCondCode(MIPSCondCode cond)
{
const HChar* ret;
switch (cond) {
case MIPScc_EQ:
ret = "EQ"; /* equal */
break;
case MIPScc_NE:
ret = "NEQ"; /* not equal */
break;
case MIPScc_HS:
ret = "GE"; /* >=u (Greater Than or Equal) */
break;
case MIPScc_LO:
ret = "LT"; /* <u (lower) */
break;
case MIPScc_MI:
ret = "MI"; /* minus (negative) */
break;
case MIPScc_PL:
ret = "PL"; /* plus (zero or +ve) */
break;
case MIPScc_VS:
ret = "VS"; /* overflow */
break;
case MIPScc_VC:
ret = "VC"; /* no overflow */
break;
case MIPScc_HI:
ret = "HI"; /* >u (higher) */
break;
case MIPScc_LS:
ret = "LS"; /* <=u (lower or same) */
break;
case MIPScc_GE:
ret = "GE"; /* >=s (signed greater or equal) */
break;
case MIPScc_LT:
ret = "LT"; /* <s (signed less than) */
break;
case MIPScc_GT:
ret = "GT"; /* >s (signed greater) */
break;
case MIPScc_LE:
ret = "LE"; /* <=s (signed less or equal) */
break;
case MIPScc_AL:
ret = "AL"; /* always (unconditional) */
break;
case MIPScc_NV:
ret = "NV"; /* never (unconditional): */
break;
default:
vpanic("showMIPSCondCode");
break;
}
return ret;
}
const HChar *showMIPSFpOp(MIPSFpOp op)
{
const HChar *ret;
switch (op) {
case Mfp_ADDD:
ret = "add.d";
break;
case Mfp_SUBD:
ret = "sub.d";
break;
case Mfp_MULD:
ret = "mul.d";
break;
case Mfp_DIVD:
ret = "div.d";
break;
case Mfp_MADDD:
ret = "madd.d";
break;
case Mfp_MSUBD:
ret = "msub.d";
break;
case Mfp_MADDS:
ret = "madd.s";
break;
case Mfp_MSUBS:
ret = "msub.s";
break;
case Mfp_ADDS:
ret = "add.s";
break;
case Mfp_SUBS:
ret = "sub.s";
break;
case Mfp_MULS:
ret = "mul.s";
break;
case Mfp_DIVS:
ret = "div.s";
break;
case Mfp_SQRTS:
ret = "sqrt.s";
break;
case Mfp_SQRTD:
ret = "sqrt.d";
break;
case Mfp_ABSS:
ret = "abs.s";
break;
case Mfp_ABSD:
ret = "abs.d";
break;
case Mfp_NEGS:
ret = "neg.s";
break;
case Mfp_NEGD:
ret = "neg.d";
break;
case Mfp_MOVS:
ret = "mov.s";
break;
case Mfp_MOVD:
ret = "mov.d";
break;
case Mfp_ROUNDWS:
ret = "round.w.s";
break;
case Mfp_ROUNDWD:
ret = "round.w.d";
break;
case Mfp_ROUNDLD:
ret = "round.l.d";
break;
case Mfp_FLOORWS:
ret = "floor.w.s";
break;
case Mfp_FLOORWD:
ret = "floor.w.d";
break;
case Mfp_CVTDW:
ret = "cvt.d.w";
break;
case Mfp_CVTDL:
ret = "cvt.d.l";
break;
case Mfp_CVTDS:
ret = "cvt.d.s";
break;
case Mfp_CVTSD:
ret = "cvt.s.d";
break;
case Mfp_CVTSW:
ret = "cvt.s.w";
break;
case Mfp_CVTWS:
ret = "cvt.w.s";
break;
case Mfp_CVTWD:
ret = "cvt.w.d";
break;
case Mfp_CVTLD:
ret = "cvt.l.d";
break;
case Mfp_CVTLS:
ret = "cvt.l.s";
break;
case Mfp_TRUWD:
ret = "trunc.w.d";
break;
case Mfp_TRUWS:
ret = "trunc.w.s";
break;
case Mfp_TRULD:
ret = "trunc.l.d";
break;
case Mfp_TRULS:
ret = "trunc.l.s";
break;
case Mfp_CEILWS:
ret = "ceil.w.s";
break;
case Mfp_CEILWD:
ret = "ceil.w.d";
break;
case Mfp_CEILLS:
ret = "ceil.l.s";
break;
case Mfp_CEILLD:
ret = "ceil.l.d";
break;
case Mfp_CMP_UN:
ret = "c.un.d";
break;
case Mfp_CMP_EQ:
ret = "c.eq.d";
break;
case Mfp_CMP_LT:
ret = "c.lt.d";
break;
case Mfp_CMP_NGT:
ret = "c.ngt.d";
break;
default:
vex_printf("Unknown op: %d", op);
vpanic("showMIPSFpOp");
break;
}
return ret;
}
/* Show move from/to fpr to/from gpr */
const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op )
{
const HChar *ret;
switch (op) {
case MFpGpMove_mfc1:
ret = "mfc1";
break;
case MFpGpMove_dmfc1:
ret = "dmfc1";
break;
case MFpGpMove_mtc1:
ret = "mtc1";
break;
case MFpGpMove_dmtc1:
ret = "dmtc1";
break;
default:
vpanic("showMIPSFpGpMoveOp");
break;
}
return ret;
}
/* Show floating point move conditional */
const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op )
{
const HChar *ret;
switch (op) {
case MFpMoveCond_movns:
ret = "movn.s";
break;
case MFpMoveCond_movnd:
ret = "movn.d";
break;
case MMoveCond_movn:
ret = "movn";
break;
default:
vpanic("showMIPSFpMoveCondOp");
break;
}
return ret;
}
/* --------- MIPSAMode: memory address expressions. --------- */
MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
{
MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
am->tag = Mam_IR;
am->Mam.IR.base = base;
am->Mam.IR.index = idx;
return am;
}
MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
{
MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
am->tag = Mam_RR;
am->Mam.RR.base = base;
am->Mam.RR.index = idx;
return am;
}
MIPSAMode *dopyMIPSAMode(MIPSAMode * am)
{
MIPSAMode* ret;
switch (am->tag) {
case Mam_IR:
ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base);
break;
case Mam_RR:
ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base);
break;
default:
vpanic("dopyMIPSAMode");
break;
}
return ret;
}
MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am)
{
MIPSAMode* ret;
switch (am->tag) {
case Mam_IR:
ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
break;
case Mam_RR:
/* We can't do anything with the RR case, so if it appears
we simply have to give up. */
/* fallthrough */
default:
vpanic("nextMIPSAModeFloat");
break;
}
return ret;
}
MIPSAMode *nextMIPSAModeInt(MIPSAMode * am)
{
MIPSAMode* ret;
switch (am->tag) {
case Mam_IR:
ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
break;
case Mam_RR:
/* We can't do anything with the RR case, so if it appears
we simply have to give up. */
/* fallthrough */
default:
vpanic("nextMIPSAModeInt");
break;
}
return ret;
}
void ppMIPSAMode(MIPSAMode * am, Bool mode64)
{
switch (am->tag) {
case Mam_IR:
if (am->Mam.IR.index == 0)
vex_printf("0(");
else
vex_printf("%d(", (Int) am->Mam.IR.index);
ppHRegMIPS(am->Mam.IR.base, mode64);
vex_printf(")");
return;
case Mam_RR:
ppHRegMIPS(am->Mam.RR.base, mode64);
vex_printf(", ");
ppHRegMIPS(am->Mam.RR.index, mode64);
return;
default:
vpanic("ppMIPSAMode");
break;
}
}
static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am)
{
switch (am->tag) {
case Mam_IR:
addHRegUse(u, HRmRead, am->Mam.IR.base);
return;
case Mam_RR:
addHRegUse(u, HRmRead, am->Mam.RR.base);
addHRegUse(u, HRmRead, am->Mam.RR.index);
return;
default:
vpanic("addRegUsage_MIPSAMode");
break;
}
}
static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am)
{
switch (am->tag) {
case Mam_IR:
am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base);
return;
case Mam_RR:
am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base);
am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index);
return;
default:
vpanic("mapRegs_MIPSAMode");
break;
}
}
/* --------- Operand, which can be a reg or a u16/s16. --------- */
MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
{
MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
op->tag = Mrh_Imm;
op->Mrh.Imm.syned = syned;
op->Mrh.Imm.imm16 = imm16;
/* If this is a signed value, ensure it's not -32768, so that we
are guaranteed always to be able to negate if needed. */
if (syned)
vassert(imm16 != 0x8000);
vassert(syned == True || syned == False);
return op;
}
MIPSRH *MIPSRH_Reg(HReg reg)
{
MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
op->tag = Mrh_Reg;
op->Mrh.Reg.reg = reg;
return op;
}
void ppMIPSRH(MIPSRH * op, Bool mode64)
{
MIPSRHTag tag = op->tag;
switch (tag) {
case Mrh_Imm:
if (op->Mrh.Imm.syned)
vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16);
else
vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16);
return;
case Mrh_Reg:
ppHRegMIPS(op->Mrh.Reg.reg, mode64);
return;
default:
vpanic("ppMIPSRH");
break;
}
}
/* An MIPSRH can only be used in a "read" context (what would it mean
to write or modify a literal?) and so we enumerate its registers
accordingly. */
static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op)
{
switch (op->tag) {
case Mrh_Imm:
return;
case Mrh_Reg:
addHRegUse(u, HRmRead, op->Mrh.Reg.reg);
return;
default:
vpanic("addRegUsage_MIPSRH");
break;
}
}
static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op)
{
switch (op->tag) {
case Mrh_Imm:
return;
case Mrh_Reg:
op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg);
return;
default:
vpanic("mapRegs_MIPSRH");
break;
}
}
/* --------- Instructions. --------- */
const HChar *showMIPSUnaryOp(MIPSUnaryOp op)
{
const HChar* ret;
switch (op) {
case Mun_CLO:
ret = "clo";
break;
case Mun_CLZ:
ret = "clz";
break;
case Mun_NOP:
ret = "nop";
break;
case Mun_DCLO:
ret = "dclo";
break;
case Mun_DCLZ:
ret = "dclz";
break;
default:
vpanic("showMIPSUnaryOp");
break;
}
return ret;
}
const HChar *showMIPSAluOp(MIPSAluOp op, Bool immR)
{
const HChar* ret;
switch (op) {
case Malu_ADD:
ret = immR ? "addiu" : "addu";
break;
case Malu_SUB:
ret = "subu";
break;
case Malu_AND:
ret = immR ? "andi" : "and";
break;
case Malu_OR:
ret = immR ? "ori" : "or";
break;
case Malu_NOR:
vassert(immR == False); /*there's no nor with an immediate operand!? */
ret = "nor";
break;
case Malu_XOR:
ret = immR ? "xori" : "xor";
break;
case Malu_DADD:
ret = immR ? "daddi" : "dadd";
break;
case Malu_DSUB:
ret = immR ? "dsubi" : "dsub";
break;
case Malu_SLT:
ret = immR ? "slti" : "slt";
break;
default:
vpanic("showMIPSAluOp");
break;
}
return ret;
}
const HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32)
{
const HChar *ret;
switch (op) {
case Mshft_SRA:
ret = immR ? (sz32 ? "sra" : "dsra") : (sz32 ? "srav" : "dsrav");
break;
case Mshft_SLL:
ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv");
break;
case Mshft_SRL:
ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv");
break;
default:
vpanic("showMIPSShftOp");
break;
}
return ret;
}
const HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable)
{
const HChar *ret;
switch (op) {
case Macc_ADD:
ret = variable ? "madd" : "maddu";
break;
case Macc_SUB:
ret = variable ? "msub" : "msubu";
break;
default:
vpanic("showMIPSAccOp");
break;
}
return ret;
}
MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_LI;
i->Min.LI.dst = dst;
i->Min.LI.imm = imm;
return i;
}
MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Alu;
i->Min.Alu.op = op;
i->Min.Alu.dst = dst;
i->Min.Alu.srcL = srcL;
i->Min.Alu.srcR = srcR;
return i;
}
MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
MIPSRH * srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Shft;
i->Min.Shft.op = op;
i->Min.Shft.sz32 = sz32;
i->Min.Shft.dst = dst;
i->Min.Shft.srcL = srcL;
i->Min.Shft.srcR = srcR;
return i;
}
MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Unary;
i->Min.Unary.op = op;
i->Min.Unary.dst = dst;
i->Min.Unary.src = src;
return i;
}
MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
MIPSCondCode cond)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Cmp;
i->Min.Cmp.syned = syned;
i->Min.Cmp.sz32 = sz32;
i->Min.Cmp.dst = dst;
i->Min.Cmp.srcL = srcL;
i->Min.Cmp.srcR = srcR;
i->Min.Cmp.cond = cond;
return i;
}
/* multiply */
MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL,
HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mul;
i->Min.Mul.syned = syned;
i->Min.Mul.widening = wid; /* widen=True else False */
i->Min.Mul.sz32 = sz32; /* True = 32 bits */
i->Min.Mul.dst = dst;
i->Min.Mul.srcL = srcL;
i->Min.Mul.srcR = srcR;
return i;
}
/* msub */
MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Macc;
i->Min.Macc.op = Macc_SUB;
i->Min.Macc.syned = syned;
i->Min.Macc.srcL = srcL;
i->Min.Macc.srcR = srcR;
return i;
}
/* madd */
MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Macc;
i->Min.Macc.op = Macc_ADD;
i->Min.Macc.syned = syned;
i->Min.Macc.srcL = srcL;
i->Min.Macc.srcR = srcR;
return i;
}
/* div */
MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Div;
i->Min.Div.syned = syned;
i->Min.Div.sz32 = sz32; /* True = 32 bits */
i->Min.Div.srcL = srcL;
i->Min.Div.srcR = srcR;
return i;
}
MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
HReg src, RetLoc rloc )
{
UInt mask;
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Call;
i->Min.Call.cond = cond;
i->Min.Call.target = target;
i->Min.Call.argiregs = argiregs;
i->Min.Call.src = src;
i->Min.Call.rloc = rloc;
/* Only $4 .. $7/$11 inclusive may be used as arg regs. */
mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
| (1 << 10) | (1 << 11);
vassert(0 == (argiregs & ~mask));
vassert(is_sane_RetLoc(rloc));
return i;
}
MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target,
UInt argiregs, RetLoc rloc )
{
UInt mask;
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Call;
i->Min.Call.cond = cond;
i->Min.Call.target = target;
i->Min.Call.argiregs = argiregs;
i->Min.Call.rloc = rloc;
/* Only $4 .. $7/$11 inclusive may be used as arg regs. */
mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
| (1 << 10) | (1 << 11);
vassert(0 == (argiregs & ~mask));
vassert(is_sane_RetLoc(rloc));
return i;
}
MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
MIPSCondCode cond, Bool toFastEP ) {
MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XDirect;
i->Min.XDirect.dstGA = dstGA;
i->Min.XDirect.amPC = amPC;
i->Min.XDirect.cond = cond;
i->Min.XDirect.toFastEP = toFastEP;
return i;
}
MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
MIPSCondCode cond ) {
MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XIndir;
i->Min.XIndir.dstGA = dstGA;
i->Min.XIndir.amPC = amPC;
i->Min.XIndir.cond = cond;
return i;
}
MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
MIPSCondCode cond, IRJumpKind jk ) {
MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_XAssisted;
i->Min.XAssisted.dstGA = dstGA;
i->Min.XAssisted.amPC = amPC;
i->Min.XAssisted.cond = cond;
i->Min.XAssisted.jk = jk;
return i;
}
MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Load;
i->Min.Load.sz = sz;
i->Min.Load.src = src;
i->Min.Load.dst = dst;
vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
if (sz == 8)
vassert(mode64);
return i;
}
MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Store;
i->Min.Store.sz = sz;
i->Min.Store.src = src;
i->Min.Store.dst = dst;
vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
if (sz == 8)
vassert(mode64);
return i;
}
MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_LoadL;
i->Min.LoadL.sz = sz;
i->Min.LoadL.src = src;
i->Min.LoadL.dst = dst;
vassert(sz == 4 || sz == 8);
if (sz == 8)
vassert(mode64);
return i;
}
MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr,
HReg expd, HReg data, Bool mode64)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Cas;
i->Min.Cas.sz = sz;
i->Min.Cas.old = old;
i->Min.Cas.addr = addr;
i->Min.Cas.expd = expd;
i->Min.Cas.data = data;
vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
if (sz == 8)
vassert(mode64);
return i;
}
MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_StoreC;
i->Min.StoreC.sz = sz;
i->Min.StoreC.src = src;
i->Min.StoreC.dst = dst;
vassert(sz == 4 || sz == 8);
if (sz == 8)
vassert(mode64);
return i;
}
MIPSInstr *MIPSInstr_Mthi(HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mthi;
i->Min.MtHL.src = src;
return i;
}
MIPSInstr *MIPSInstr_Mtlo(HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mtlo;
i->Min.MtHL.src = src;
return i;
}
MIPSInstr *MIPSInstr_Mfhi(HReg dst)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mfhi;
i->Min.MfHL.dst = dst;
return i;
}
MIPSInstr *MIPSInstr_Mflo(HReg dst)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_Mflo;
i->Min.MfHL.dst = dst;
return i;
}
/* Read/Write Link Register */
MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_RdWrLR;
i->Min.RdWrLR.wrLR = wrLR;
i->Min.RdWrLR.gpr = gpr;
return i;
}
MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpLdSt;
i->Min.FpLdSt.isLoad = isLoad;
i->Min.FpLdSt.sz = sz;
i->Min.FpLdSt.reg = reg;
i->Min.FpLdSt.addr = addr;
vassert(sz == 4 || sz == 8);
return i;
}
MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpUnary;
i->Min.FpUnary.op = op;
i->Min.FpUnary.dst = dst;
i->Min.FpUnary.src = src;
return i;
}
MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpBinary;
i->Min.FpBinary.op = op;
i->Min.FpBinary.dst = dst;
i->Min.FpBinary.srcL = srcL;
i->Min.FpBinary.srcR = srcR;
return i;
}
MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
HReg src3 )
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpTernary;
i->Min.FpTernary.op = op;
i->Min.FpTernary.dst = dst;
i->Min.FpTernary.src1 = src1;
i->Min.FpTernary.src2 = src2;
i->Min.FpTernary.src3 = src3;
return i;
}
MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpConvert;
i->Min.FpConvert.op = op;
i->Min.FpConvert.dst = dst;
i->Min.FpConvert.src = src;
return i;
}
MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpCompare;
i->Min.FpCompare.op = op;
i->Min.FpCompare.dst = dst;
i->Min.FpCompare.srcL = srcL;
i->Min.FpCompare.srcR = srcR;
return i;
}
MIPSInstr *MIPSInstr_MtFCSR(HReg src)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MtFCSR;
i->Min.MtFCSR.src = src;
return i;
}
MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MfFCSR;
i->Min.MfFCSR.dst = dst;
return i;
}
MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_FpGpMove;
i->Min.FpGpMove.op = op;
i->Min.FpGpMove.dst = dst;
i->Min.FpGpMove.src = src;
return i;
}
MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
HReg cond )
{
MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_MoveCond;
i->Min.MoveCond.op = op;
i->Min.MoveCond.dst = dst;
i->Min.MoveCond.src = src;
i->Min.MoveCond.cond = cond;
return i;
}
MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
MIPSAMode* amFailAddr ) {
MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_EvCheck;
i->Min.EvCheck.amCounter = amCounter;
i->Min.EvCheck.amFailAddr = amFailAddr;
return i;
}
MIPSInstr* MIPSInstr_ProfInc ( void ) {
MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
i->tag = Min_ProfInc;
return i;
}
/* -------- Pretty Print instructions ------------- */
static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
{
vex_printf("li ");
ppHRegMIPS(dst, mode64);
vex_printf(",0x%016llx", imm);
}
void ppMIPSInstr(const MIPSInstr * i, Bool mode64)
{
switch (i->tag) {
case Min_LI:
ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64);
break;
case Min_Alu: {
HReg r_srcL = i->Min.Alu.srcL;
MIPSRH *rh_srcR = i->Min.Alu.srcR;
/* generic */
vex_printf("%s ", showMIPSAluOp(i->Min.Alu.op,
toBool(rh_srcR->tag == Mrh_Imm)));
ppHRegMIPS(i->Min.Alu.dst, mode64);
vex_printf(",");
ppHRegMIPS(r_srcL, mode64);
vex_printf(",");
ppMIPSRH(rh_srcR, mode64);
return;
}
case Min_Shft: {
HReg r_srcL = i->Min.Shft.srcL;
MIPSRH *rh_srcR = i->Min.Shft.srcR;
vex_printf("%s ", showMIPSShftOp(i->Min.Shft.op,
toBool(rh_srcR->tag == Mrh_Imm),
i->Min.Shft.sz32));
ppHRegMIPS(i->Min.Shft.dst, mode64);
vex_printf(",");
ppHRegMIPS(r_srcL, mode64);
vex_printf(",");
ppMIPSRH(rh_srcR, mode64);
return;
}
case Min_Unary: {
vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op));
ppHRegMIPS(i->Min.Unary.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.Unary.src, mode64);
return;
}
case Min_Cmp: {
vex_printf("word_compare ");
ppHRegMIPS(i->Min.Cmp.dst, mode64);
vex_printf(" = %s ( ", showMIPSCondCode(i->Min.Cmp.cond));
ppHRegMIPS(i->Min.Cmp.srcL, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Cmp.srcR, mode64);
vex_printf(" )");
return;
}
case Min_Mul: {
switch (i->Min.Mul.widening) {
case False:
vex_printf("mul ");
ppHRegMIPS(i->Min.Mul.dst, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Mul.srcL, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Mul.srcR, mode64);
return;
case True:
vex_printf("%s%s ", i->Min.Mul.sz32 ? "mult" : "dmult",
i->Min.Mul.syned ? "" : "u");
ppHRegMIPS(i->Min.Mul.dst, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Mul.srcL, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Mul.srcR, mode64);
return;
}
break;
}
case Min_Mthi: {
vex_printf("mthi ");
ppHRegMIPS(i->Min.MtHL.src, mode64);
return;
}
case Min_Mtlo: {
vex_printf("mtlo ");
ppHRegMIPS(i->Min.MtHL.src, mode64);
return;
}
case Min_Mfhi: {
vex_printf("mfhi ");
ppHRegMIPS(i->Min.MfHL.dst, mode64);
return;
}
case Min_Mflo: {
vex_printf("mflo ");
ppHRegMIPS(i->Min.MfHL.dst, mode64);
return;
}
case Min_Macc: {
vex_printf("%s ", showMIPSMaccOp(i->Min.Macc.op, i->Min.Macc.syned));
ppHRegMIPS(i->Min.Macc.srcL, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Macc.srcR, mode64);
return;
}
case Min_Div: {
if (!i->Min.Div.sz32)
vex_printf("d");
vex_printf("div");
vex_printf("%s ", i->Min.Div.syned ? "s" : "u");
ppHRegMIPS(i->Min.Div.srcL, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Div.srcR, mode64);
return;
}
case Min_Call: {
Int n;
vex_printf("call: ");
if (i->Min.Call.cond != MIPScc_AL) {
vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond));
}
vex_printf(" {");
if (!mode64)
vex_printf(" addiu $29, $29, -16");
ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64);
vex_printf(" ; jarl $31, $25; # args [");
for (n = 0; n < 32; n++) {
if (i->Min.Call.argiregs & (1 << n)) {
vex_printf("$%d", n);
if ((i->Min.Call.argiregs >> n) > 1)
vex_printf(",");
}
}
vex_printf("] nop; ");
if (!mode64)
vex_printf("addiu $29, $29, 16; ]");
break;
}
case Min_XDirect:
vex_printf("(xDirect) ");
vex_printf("if (guest_COND.%s) { ",
showMIPSCondCode(i->Min.XDirect.cond));
vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA);
vex_printf("; sw $9, ");
ppMIPSAMode(i->Min.XDirect.amPC, mode64);
vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
i->Min.XDirect.toFastEP ? "fast" : "slow");
return;
case Min_XIndir:
vex_printf("(xIndir) ");
vex_printf("if (guest_COND.%s) { sw ",
showMIPSCondCode(i->Min.XIndir.cond));
ppHRegMIPS(i->Min.XIndir.dstGA, mode64);
vex_printf(", ");
ppMIPSAMode(i->Min.XIndir.amPC, mode64);
vex_printf("; move $9, $disp_indir; jalr $9; nop}");
return;
case Min_XAssisted:
vex_printf("(xAssisted) ");
vex_printf("if (guest_COND.%s) { ",
showMIPSCondCode(i->Min.XAssisted.cond));
vex_printf("sw ");
ppHRegMIPS(i->Min.XAssisted.dstGA, mode64);
vex_printf(", ");
ppMIPSAMode(i->Min.XAssisted.amPC, mode64);
vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)",
(Int)i->Min.XAssisted.jk);
vex_printf("; move $9, $disp_assisted; jalr $9; nop; }");
return;
case Min_Load: {
Bool idxd = toBool(i->Min.Load.src->tag == Mam_RR);
UChar sz = i->Min.Load.sz;
HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
vex_printf("l%c%s ", c_sz, idxd ? "x" : "");
ppHRegMIPS(i->Min.Load.dst, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.Load.src, mode64);
return;
}
case Min_Store: {
UChar sz = i->Min.Store.sz;
Bool idxd = toBool(i->Min.Store.dst->tag == Mam_RR);
HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
vex_printf("s%c%s ", c_sz, idxd ? "x" : "");
ppHRegMIPS(i->Min.Store.src, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.Store.dst, mode64);
return;
}
case Min_LoadL: {
vex_printf("ll ");
ppHRegMIPS(i->Min.LoadL.dst, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.LoadL.src, mode64);
return;
}
case Min_Cas: {
Bool sz8 = toBool(i->Min.Cas.sz == 8);
/*
* ll(d) old, 0(addr)
* bne old, expd, end
* nop
* (d)addiu old, old, 1
* sc(d) data, 0(addr)
* movn old, expd, data
* end:
*/
// ll(d) old, 0(addr)
vex_printf("cas: ");
vex_printf("%s ", sz8 ? "lld" : "ll");
ppHRegMIPS(i->Min.Cas.old , mode64);
vex_printf(", 0(");
ppHRegMIPS(i->Min.Cas.addr , mode64);
vex_printf(")\n");
vex_printf("bne ");
ppHRegMIPS(i->Min.Cas.old , mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Cas.expd , mode64);
vex_printf(", end\n");
vex_printf("nop\n");
vex_printf("%s ", sz8 ? "daddiu" : "addiu");
ppHRegMIPS(i->Min.Cas.old , mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Cas.old , mode64);
vex_printf(", 1\n");
vex_printf("%s ", sz8 ? "scd" : "sc");
ppHRegMIPS(i->Min.Cas.data , mode64);
vex_printf(", 0(");
ppHRegMIPS(i->Min.Cas.addr , mode64);
vex_printf(")\n");
vex_printf("movn ");
ppHRegMIPS(i->Min.Cas.old , mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Cas.expd , mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.Cas.data , mode64);
vex_printf("\nend:");
return;
}
case Min_StoreC: {
vex_printf("sc ");
ppHRegMIPS(i->Min.StoreC.src, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.StoreC.dst, mode64);
return;
}
case Min_RdWrLR: {
vex_printf("%s ", i->Min.RdWrLR.wrLR ? "mtlr" : "mflr");
ppHRegMIPS(i->Min.RdWrLR.gpr, mode64);
return;
}
case Min_FpUnary:
vex_printf("%s ", showMIPSFpOp(i->Min.FpUnary.op));
ppHRegMIPS(i->Min.FpUnary.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpUnary.src, mode64);
return;
case Min_FpBinary:
vex_printf("%s", showMIPSFpOp(i->Min.FpBinary.op));
ppHRegMIPS(i->Min.FpBinary.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpBinary.srcL, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpBinary.srcR, mode64);
return;
case Min_FpTernary:
vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op));
ppHRegMIPS(i->Min.FpTernary.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpTernary.src1, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpTernary.src2, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpTernary.src3, mode64);
return;
case Min_FpConvert:
vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op));
ppHRegMIPS(i->Min.FpConvert.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpConvert.src, mode64);
return;
case Min_FpCompare:
vex_printf("%s ", showMIPSFpOp(i->Min.FpCompare.op));
ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
return;
case Min_FpMulAcc:
vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op));
ppHRegMIPS(i->Min.FpMulAcc.dst, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpMulAcc.srcML, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpMulAcc.srcMR, mode64);
vex_printf(",");
ppHRegMIPS(i->Min.FpMulAcc.srcAcc, mode64);
return;
case Min_FpLdSt: {
if (i->Min.FpLdSt.sz == 4) {
if (i->Min.FpLdSt.isLoad) {
vex_printf("lwc1 ");
ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
} else {
vex_printf("swc1 ");
ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
}
} else if (i->Min.FpLdSt.sz == 8) {
if (i->Min.FpLdSt.isLoad) {
vex_printf("ldc1 ");
ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
} else {
vex_printf("sdc1 ");
ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
vex_printf(",");
ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
}
}
return;
}
case Min_MtFCSR: {
vex_printf("ctc1 ");
ppHRegMIPS(i->Min.MtFCSR.src, mode64);
vex_printf(", $31");
return;
}
case Min_MfFCSR: {
vex_printf("ctc1 ");
ppHRegMIPS(i->Min.MfFCSR.dst, mode64);
vex_printf(", $31");
return;
}
case Min_FpGpMove: {
vex_printf("%s ", showMIPSFpGpMoveOp(i->Min.FpGpMove.op));
ppHRegMIPS(i->Min.FpGpMove.dst, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.FpGpMove.src, mode64);
return;
}
case Min_MoveCond: {
vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op));
ppHRegMIPS(i->Min.MoveCond.dst, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.MoveCond.src, mode64);
vex_printf(", ");
ppHRegMIPS(i->Min.MoveCond.cond, mode64);
return;
}
case Min_EvCheck:
vex_printf("(evCheck) lw $9, ");
ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
vex_printf("; addiu $9, $9, -1");
vex_printf("; sw $9, ");
ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
vex_printf("; bgez $t9, nofail; jalr *");
ppMIPSAMode(i->Min.EvCheck.amFailAddr, mode64);
vex_printf("; nofail:");
return;
case Min_ProfInc:
if (mode64)
vex_printf("(profInc) move $9, ($NotKnownYet); "
"ld $8, 0($9); "
"daddiu $8, $8, 1; "
"sd $8, 0($9); " );
else
vex_printf("(profInc) move $9, ($NotKnownYet); "
"lw $8, 0($9); "
"addiu $8, $8, 1; "
"sw $8, 0($9); "
"sltiu $1, $8, 1; "
"lw $8, 4($9); "
"addu $8, $8, $1; "
"sw $8, 4($9); " );
return;
default:
vpanic("ppMIPSInstr");
break;
}
}
/* --------- Helpers for register allocation. --------- */
void getRegUsage_MIPSInstr(HRegUsage * u, const MIPSInstr * i, Bool mode64)
{
initHRegUsage(u);
switch (i->tag) {
case Min_LI:
addHRegUse(u, HRmWrite, i->Min.LI.dst);
break;
case Min_Alu:
addHRegUse(u, HRmRead, i->Min.Alu.srcL);
addRegUsage_MIPSRH(u, i->Min.Alu.srcR);
addHRegUse(u, HRmWrite, i->Min.Alu.dst);
return;
case Min_Shft:
addHRegUse(u, HRmRead, i->Min.Shft.srcL);
addRegUsage_MIPSRH(u, i->Min.Shft.srcR);
addHRegUse(u, HRmWrite, i->Min.Shft.dst);
return;
case Min_Cmp:
addHRegUse(u, HRmRead, i->Min.Cmp.srcL);
addHRegUse(u, HRmRead, i->Min.Cmp.srcR);
addHRegUse(u, HRmWrite, i->Min.Cmp.dst);
return;
case Min_Unary:
addHRegUse(u, HRmRead, i->Min.Unary.src);
addHRegUse(u, HRmWrite, i->Min.Unary.dst);
return;
case Min_Mul:
addHRegUse(u, HRmWrite, i->Min.Mul.dst);
addHRegUse(u, HRmRead, i->Min.Mul.srcL);
addHRegUse(u, HRmRead, i->Min.Mul.srcR);
return;
case Min_Mthi:
case Min_Mtlo:
addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
addHRegUse(u, HRmRead, i->Min.MtHL.src);
return;
case Min_Mfhi:
case Min_Mflo:
addHRegUse(u, HRmRead, hregMIPS_HI(mode64));
addHRegUse(u, HRmRead, hregMIPS_LO(mode64));
addHRegUse(u, HRmWrite, i->Min.MfHL.dst);
return;
case Min_MtFCSR:
addHRegUse(u, HRmRead, i->Min.MtFCSR.src);
return;
case Min_MfFCSR:
addHRegUse(u, HRmWrite, i->Min.MfFCSR.dst);
return;
case Min_Macc:
addHRegUse(u, HRmModify, hregMIPS_HI(mode64));
addHRegUse(u, HRmModify, hregMIPS_LO(mode64));
addHRegUse(u, HRmRead, i->Min.Macc.srcL);
addHRegUse(u, HRmRead, i->Min.Macc.srcR);
return;
case Min_Div:
addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
addHRegUse(u, HRmRead, i->Min.Div.srcL);
addHRegUse(u, HRmRead, i->Min.Div.srcR);
return;
case Min_Call: {
/* Logic and comments copied/modified from x86, ppc and arm back end.
First off, claim it trashes all the caller-saved regs
which fall within the register allocator's jurisdiction. */
if (i->Min.Call.cond != MIPScc_AL)
addHRegUse(u, HRmRead, i->Min.Call.src);
UInt argir;
addHRegUse(u, HRmWrite, hregMIPS_GPR1(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR2(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR3(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR4(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR5(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR6(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR7(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR8(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR9(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR10(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR11(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR12(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR13(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR14(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR15(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64));
addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64));
/* Now we have to state any parameter-carrying registers
which might be read. This depends on the argiregs field. */
argir = i->Min.Call.argiregs;
if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64));
if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64));
if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64));
if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64));
if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6)
| (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
| (1 << 11))));
return;
}
/* XDirect/XIndir/XAssisted are also a bit subtle. They
conditionally exit the block. Hence we only need to list (1)
the registers that they read, and (2) the registers that they
write in the case where the block is not exited. (2) is
empty, hence only (1) is relevant here. */
case Min_XDirect:
addRegUsage_MIPSAMode(u, i->Min.XDirect.amPC);
return;
case Min_XIndir:
addHRegUse(u, HRmRead, i->Min.XIndir.dstGA);
addRegUsage_MIPSAMode(u, i->Min.XIndir.amPC);
return;
case Min_XAssisted:
addHRegUse(u, HRmRead, i->Min.XAssisted.dstGA);
addRegUsage_MIPSAMode(u, i->Min.XAssisted.amPC);
return;
case Min_Load:
addRegUsage_MIPSAMode(u, i->Min.Load.src);
addHRegUse(u, HRmWrite, i->Min.Load.dst);
return;
case Min_Store:
addHRegUse(u, HRmRead, i->Min.Store.src);
addRegUsage_MIPSAMode(u, i->Min.Store.dst);
return;
case Min_LoadL:
addRegUsage_MIPSAMode(u, i->Min.LoadL.src);
addHRegUse(u, HRmWrite, i->Min.LoadL.dst);
return;
case Min_Cas:
addHRegUse(u, HRmWrite, i->Min.Cas.old);
addHRegUse(u, HRmRead, i->Min.Cas.addr);
addHRegUse(u, HRmRead, i->Min.Cas.expd);
addHRegUse(u, HRmModify, i->Min.Cas.data);
return;
case Min_StoreC:
addHRegUse(u, HRmWrite, i->Min.StoreC.src);
addHRegUse(u, HRmRead, i->Min.StoreC.src);
addRegUsage_MIPSAMode(u, i->Min.StoreC.dst);
return;
case Min_RdWrLR:
addHRegUse(u, (i->Min.RdWrLR.wrLR ? HRmRead : HRmWrite),
i->Min.RdWrLR.gpr);
return;
case Min_FpLdSt:
if (i->Min.FpLdSt.sz == 4) {
addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
i->Min.FpLdSt.reg);
addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
return;
} else if (i->Min.FpLdSt.sz == 8) {
addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
i->Min.FpLdSt.reg);
addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
return;
}
break;
case Min_FpUnary:
addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
addHRegUse(u, HRmRead, i->Min.FpUnary.src);
return;
case Min_FpBinary:
addHRegUse(u, HRmWrite, i->Min.FpBinary.dst);
addHRegUse(u, HRmRead, i->Min.FpBinary.srcL);
addHRegUse(u, HRmRead, i->Min.FpBinary.srcR);
return;
case Min_FpTernary:
addHRegUse(u, HRmWrite, i->Min.FpTernary.dst);
addHRegUse(u, HRmRead, i->Min.FpTernary.src1);
addHRegUse(u, HRmRead, i->Min.FpTernary.src2);
addHRegUse(u, HRmRead, i->Min.FpTernary.src3);
return;
case Min_FpConvert:
addHRegUse(u, HRmWrite, i->Min.FpConvert.dst);
addHRegUse(u, HRmRead, i->Min.FpConvert.src);
return;
case Min_FpCompare:
addHRegUse(u, HRmWrite, i->Min.FpCompare.dst);
addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
return;
case Min_FpGpMove:
addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
return;
case Min_MoveCond:
addHRegUse(u, HRmModify, i->Min.MoveCond.dst);
addHRegUse(u, HRmRead, i->Min.MoveCond.src);
addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
return;
case Min_EvCheck:
/* We expect both amodes only to mention %ebp, so this is in
fact pointless, since %ebp isn't allocatable, but anyway.. */
addRegUsage_MIPSAMode(u, i->Min.EvCheck.amCounter);
addRegUsage_MIPSAMode(u, i->Min.EvCheck.amFailAddr);
return;
case Min_ProfInc:
/* does not use any registers. */
return;
default:
ppMIPSInstr(i, mode64);
vpanic("getRegUsage_MIPSInstr");
break;
}
}
/* local helper */
static void mapReg(HRegRemap * m, HReg * r)
{
*r = lookupHRegRemap(m, *r);
}
void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64)
{
switch (i->tag) {
case Min_LI:
mapReg(m, &i->Min.LI.dst);
break;
case Min_Alu:
mapReg(m, &i->Min.Alu.srcL);
mapRegs_MIPSRH(m, i->Min.Alu.srcR);
mapReg(m, &i->Min.Alu.dst);
return;
case Min_Shft:
mapReg(m, &i->Min.Shft.srcL);
mapRegs_MIPSRH(m, i->Min.Shft.srcR);
mapReg(m, &i->Min.Shft.dst);
return;
case Min_Cmp:
mapReg(m, &i->Min.Cmp.srcL);
mapReg(m, &i->Min.Cmp.srcR);
mapReg(m, &i->Min.Cmp.dst);
return;
case Min_Unary:
mapReg(m, &i->Min.Unary.src);
mapReg(m, &i->Min.Unary.dst);
return;
case Min_Mul:
mapReg(m, &i->Min.Mul.dst);
mapReg(m, &i->Min.Mul.srcL);
mapReg(m, &i->Min.Mul.srcR);
return;
case Min_Mthi:
case Min_Mtlo:
mapReg(m, &i->Min.MtHL.src);
return;
case Min_Mfhi:
case Min_Mflo:
mapReg(m, &i->Min.MfHL.dst);
return;
case Min_Macc:
mapReg(m, &i->Min.Macc.srcL);
mapReg(m, &i->Min.Macc.srcR);
return;
case Min_Div:
mapReg(m, &i->Min.Div.srcL);
mapReg(m, &i->Min.Div.srcR);
return;
case Min_Call:
{
if (i->Min.Call.cond != MIPScc_AL)
mapReg(m, &i->Min.Call.src);
return;
}
case Min_XDirect:
mapRegs_MIPSAMode(m, i->Min.XDirect.amPC);
return;
case Min_XIndir:
mapReg(m, &i->Min.XIndir.dstGA);
mapRegs_MIPSAMode(m, i->Min.XIndir.amPC);
return;
case Min_XAssisted:
mapReg(m, &i->Min.XAssisted.dstGA);
mapRegs_MIPSAMode(m, i->Min.XAssisted.amPC);
return;
case Min_Load:
mapRegs_MIPSAMode(m, i->Min.Load.src);
mapReg(m, &i->Min.Load.dst);
return;
case Min_Store:
mapReg(m, &i->Min.Store.src);
mapRegs_MIPSAMode(m, i->Min.Store.dst);
return;
case Min_LoadL:
mapRegs_MIPSAMode(m, i->Min.LoadL.src);
mapReg(m, &i->Min.LoadL.dst);
return;
case Min_Cas:
mapReg(m, &i->Min.Cas.old);
mapReg(m, &i->Min.Cas.addr);
mapReg(m, &i->Min.Cas.expd);
mapReg(m, &i->Min.Cas.data);
return;
case Min_StoreC:
mapReg(m, &i->Min.StoreC.src);
mapRegs_MIPSAMode(m, i->Min.StoreC.dst);
return;
case Min_RdWrLR:
mapReg(m, &i->Min.RdWrLR.gpr);
return;
case Min_FpLdSt:
if (i->Min.FpLdSt.sz == 4) {
mapReg(m, &i->Min.FpLdSt.reg);
mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
return;
} else if (i->Min.FpLdSt.sz == 8) {
mapReg(m, &i->Min.FpLdSt.reg);
mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
return;
}
break;
case Min_FpUnary:
mapReg(m, &i->Min.FpUnary.dst);
mapReg(m, &i->Min.FpUnary.src);
return;
case Min_FpBinary:
mapReg(m, &i->Min.FpBinary.dst);
mapReg(m, &i->Min.FpBinary.srcL);
mapReg(m, &i->Min.FpBinary.srcR);
return;
case Min_FpTernary:
mapReg(m, &i->Min.FpTernary.dst);
mapReg(m, &i->Min.FpTernary.src1);
mapReg(m, &i->Min.FpTernary.src2);
mapReg(m, &i->Min.FpTernary.src3);
return;
case Min_FpConvert:
mapReg(m, &i->Min.FpConvert.dst);
mapReg(m, &i->Min.FpConvert.src);
return;
case Min_FpCompare:
mapReg(m, &i->Min.FpCompare.dst);
mapReg(m, &i->Min.FpCompare.srcL);
mapReg(m, &i->Min.FpCompare.srcR);
return;
case Min_MtFCSR:
mapReg(m, &i->Min.MtFCSR.src);
return;
case Min_MfFCSR:
mapReg(m, &i->Min.MfFCSR.dst);
return;
case Min_FpGpMove:
mapReg(m, &i->Min.FpGpMove.dst);
mapReg(m, &i->Min.FpGpMove.src);
return;
case Min_MoveCond:
mapReg(m, &i->Min.MoveCond.dst);
mapReg(m, &i->Min.MoveCond.src);
mapReg(m, &i->Min.MoveCond.cond);
return;
case Min_EvCheck:
/* We expect both amodes only to mention %ebp, so this is in
fact pointless, since %ebp isn't allocatable, but anyway.. */
mapRegs_MIPSAMode(m, i->Min.EvCheck.amCounter);
mapRegs_MIPSAMode(m, i->Min.EvCheck.amFailAddr);
return;
case Min_ProfInc:
/* does not use any registers. */
return;
default:
ppMIPSInstr(i, mode64);
vpanic("mapRegs_MIPSInstr");
break;
}
}
/* Figure out if i represents a reg-reg move, and if so assign the
source and destination to *src and *dst. If in doubt say No. Used
by the register allocator to do move coalescing.
*/
Bool isMove_MIPSInstr(const MIPSInstr * i, HReg * src, HReg * dst)
{
/* Moves between integer regs */
if (i->tag == Min_Alu) {
/* or Rd,Rs,Rs == mr Rd,Rs */
if (i->Min.Alu.op != Malu_OR)
return False;
if (i->Min.Alu.srcR->tag != Mrh_Reg)
return False;
if (!sameHReg(i->Min.Alu.srcR->Mrh.Reg.reg, i->Min.Alu.srcL))
return False;
*src = i->Min.Alu.srcL;
*dst = i->Min.Alu.dst;
return True;
}
return False;
}
/* Generate mips spill/reload instructions under the direction of the
register allocator. */
void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
Int offsetB, Bool mode64)
{
MIPSAMode *am;
vassert(offsetB >= 0);
vassert(!hregIsVirtual(rreg));
*i1 = *i2 = NULL;
am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
switch (hregClass(rreg)) {
case HRcInt64:
vassert(mode64);
*i1 = MIPSInstr_Store(8, am, rreg, mode64);
break;
case HRcInt32:
vassert(!mode64);
*i1 = MIPSInstr_Store(4, am, rreg, mode64);
break;
case HRcFlt32:
vassert(!mode64);
*i1 = MIPSInstr_FpLdSt(False /*Store */ , 4, rreg, am);
break;
case HRcFlt64:
*i1 = MIPSInstr_FpLdSt(False /*Store */ , 8, rreg, am);
break;
default:
ppHRegClass(hregClass(rreg));
vpanic("genSpill_MIPS: unimplemented regclass");
break;
}
}
void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
Int offsetB, Bool mode64)
{
MIPSAMode *am;
vassert(!hregIsVirtual(rreg));
am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
switch (hregClass(rreg)) {
case HRcInt64:
vassert(mode64);
*i1 = MIPSInstr_Load(8, rreg, am, mode64);
break;
case HRcInt32:
vassert(!mode64);
*i1 = MIPSInstr_Load(4, rreg, am, mode64);
break;
case HRcFlt32:
if (mode64)
*i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
else
*i1 = MIPSInstr_FpLdSt(True /*Load */ , 4, rreg, am);
break;
case HRcFlt64:
*i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
break;
default:
ppHRegClass(hregClass(rreg));
vpanic("genReload_MIPS: unimplemented regclass");
break;
}
}
/* --------- The mips assembler --------- */
inline static UInt iregNo(HReg r, Bool mode64)
{
UInt n;
vassert(hregClass(r) == (mode64 ? HRcInt64 : HRcInt32));
vassert(!hregIsVirtual(r));
n = hregEncoding(r);
vassert(n <= 32);
return n;
}
inline static UInt fregNo(HReg r, Bool mode64)
{
UInt n;
vassert(!hregIsVirtual(r));
n = hregEncoding(r);
vassert(n <= 31);
return n;
}
inline static UInt dregNo(HReg r)
{
UInt n;
vassert(!hregIsVirtual(r));
n = hregEncoding(r);
vassert(n <= 31);
return n;
}
/* Emit 32bit instruction */
static UChar *emit32(UChar * p, UInt w32)
{
#if defined (_MIPSEL)
*p++ = toUChar(w32 & 0x000000FF);
*p++ = toUChar((w32 >> 8) & 0x000000FF);
*p++ = toUChar((w32 >> 16) & 0x000000FF);
*p++ = toUChar((w32 >> 24) & 0x000000FF);
#elif defined (_MIPSEB)
*p++ = toUChar((w32 >> 24) & 0x000000FF);
*p++ = toUChar((w32 >> 16) & 0x000000FF);
*p++ = toUChar((w32 >> 8) & 0x000000FF);
*p++ = toUChar(w32 & 0x000000FF);
#endif
return p;
}
/* Fetch an instruction */
static UInt fetch32 ( UChar* p )
{
UInt w32 = 0;
#if defined (_MIPSEL)
w32 |= ((0xFF & (UInt)p[0]) << 0);
w32 |= ((0xFF & (UInt)p[1]) << 8);
w32 |= ((0xFF & (UInt)p[2]) << 16);
w32 |= ((0xFF & (UInt)p[3]) << 24);
#elif defined (_MIPSEB)
w32 |= ((0xFF & (UInt)p[0]) << 24);
w32 |= ((0xFF & (UInt)p[1]) << 16);
w32 |= ((0xFF & (UInt)p[2]) << 8);
w32 |= ((0xFF & (UInt)p[3]) << 0);
#endif
return w32;
}
/* physical structure of mips instructions */
/* type I : opcode - 6 bits
rs - 5 bits
rt - 5 bits
immediate - 16 bits
*/
static UChar *mkFormI(UChar * p, UInt opc, UInt rs, UInt rt, UInt imm)
{
UInt theInstr;
vassert(opc < 0x40);
vassert(rs < 0x20);
vassert(rt < 0x20);
imm = imm & 0xFFFF;
theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (imm));
return emit32(p, theInstr);
}
/* type R: opcode - 6 bits
rs - 5 bits
rt - 5 bits
rd - 5 bits
sa - 5 bits
func - 6 bits
*/
static UChar *mkFormR(UChar * p, UInt opc, UInt rs, UInt rt, UInt rd, UInt sa,
UInt func)
{
if (rs >= 0x20)
vex_printf("rs = %d\n", rs);
UInt theInstr;
vassert(opc < 0x40);
vassert(rs < 0x20);
vassert(rt < 0x20);
vassert(rd < 0x20);
vassert(sa < 0x20);
func = func & 0xFFFF;
theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (sa << 6) |
(func));
return emit32(p, theInstr);
}
static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT,
UInt sa, UInt opc2)
{
UInt theInstr;
vassert(opc1 <= 0x3F);
vassert(rRD < 0x20);
vassert(rRS < 0x20);
vassert(rRT < 0x20);
vassert(opc2 <= 0x3F);
vassert(sa >= 0 && sa <= 0x3F);
theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) |
((sa & 0x1F) << 6) | (opc2));
return emit32(p, theInstr);
}
static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
Bool mode64)
{
UInt rA, idx, r_dst;
vassert(am->tag == Mam_IR);
vassert(am->Mam.IR.index < 0x10000);
rA = iregNo(am->Mam.IR.base, mode64);
idx = am->Mam.IR.index;
if (rSD == 33 || rSD == 34)
r_dst = 24;
else
r_dst = rSD;
if (opc1 < 40) {
/* load */
if (rSD == 33)
/* mfhi */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
else if (rSD == 34)
/* mflo */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
}
p = mkFormI(p, opc1, rA, r_dst, idx);
if (opc1 >= 40) {
/* store */
if (rSD == 33)
/* mthi */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
else if (rSD == 34)
/* mtlo */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
}
return p;
}
static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
Bool mode64)
{
UInt rA, rB, r_dst;
vassert(am->tag == Mam_RR);
rA = iregNo(am->Mam.RR.base, mode64);
rB = iregNo(am->Mam.RR.index, mode64);
if (rSD == 33 || rSD == 34)
r_dst = 24;
else
r_dst = rSD;
if (opc1 < 40) {
/* load */
if (rSD == 33)
/* mfhi */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
else if (rSD == 34)
/* mflo */
p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
}
if (mode64) {
/* daddu rA, rA, rB$
sd/ld r_dst, 0(rA)$
dsubu rA, rA, rB */
p = mkFormR(p, 0, rA, rB, rA, 0, 45);
p = mkFormI(p, opc1, rA, r_dst, 0);
p = mkFormR(p, 0, rA, rB, rA, 0, 47);
} else {
/* addu rA, rA, rB
sw/lw r_dst, 0(rA)
subu rA, rA, rB */
p = mkFormR(p, 0, rA, rB, rA, 0, 33);
p = mkFormI(p, opc1, rA, r_dst, 0);
p = mkFormR(p, 0, rA, rB, rA, 0, 35);
}
if (opc1 >= 40) {
/* store */
if (rSD == 33)
/* mthi */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
else if (rSD == 34)
/* mtlo */
p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
}
return p;
}
/* Load imm to r_dst */
static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64)
{
if (!mode64) {
vassert(r_dst < 0x20);
UInt u32 = (UInt) imm;
Int s32 = (Int) u32;
Long s64 = (Long) s32;
imm = (ULong) s64;
}
if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
/* sign-extendable from 16 bits
addiu r_dst, 0, imm => li r_dst, imm */
p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF);
} else {
if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
/* sign-extendable from 32 bits
addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
lui r_dst, (imm >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
/* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
} else {
vassert(mode64);
/* lui load in upper half of low word */
p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
}
}
return p;
}
/* A simplified version of mkLoadImm that always generates 2 or 6
instructions (32 or 64 bits respectively) even if it could generate
fewer. This is needed for generating fixed sized patchable
sequences. */
static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p,
UInt r_dst, ULong imm, Bool mode64)
{
vassert(r_dst < 0x20);
if (!mode64) {
/* In 32-bit mode, make sure the top 32 bits of imm are a sign
extension of the bottom 32 bits. (Probably unnecessary.) */
UInt u32 = (UInt)imm;
Int s32 = (Int)u32;
Long s64 = (Long)s32;
imm = (ULong)s64;
}
if (!mode64) {
/* sign-extendable from 32 bits
addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
lui r_dst, (imm >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
/* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
} else {
/* full 64bit immediate load: 6 (six!) insns. */
vassert(mode64);
/* lui load in upper half of low word */
p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
}
return p;
}
/* Checks whether the sequence of bytes at p was indeed created
by mkLoadImm_EXACTLY2or6 with the given parameters. */
static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check,
UInt r_dst, ULong imm, Bool mode64 )
{
vassert(r_dst < 0x20);
Bool ret;
if (!mode64) {
/* In 32-bit mode, make sure the top 32 bits of imm are a sign
extension of the bottom 32 bits. (Probably unnecessary.) */
UInt u32 = (UInt)imm;
Int s32 = (Int)u32;
Long s64 = (Long)s32;
imm = (ULong)s64;
}
if (!mode64) {
UInt expect[2] = { 0, 0 };
UChar* p = (UChar*)&expect[0];
/* lui r_dst, (immi >> 16) */
p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
/* ori r_dst, r_dst, (imm & 0xFFFF) */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
vassert(p == (UChar*)&expect[2]);
ret = fetch32(p_to_check + 0) == expect[0]
&& fetch32(p_to_check + 4) == expect[1];
} else {
UInt expect[6] = { 0, 0, 0, 0, 0, 0};
UChar* p = (UChar*)&expect[0];
/* lui load in upper half of low word */
p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
/* shift */
p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
/* ori */
p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
vassert(p == (UChar*)&expect[6]);
ret = fetch32(p_to_check + 0) == expect[0]
&& fetch32(p_to_check + 4) == expect[1]
&& fetch32(p_to_check + 8) == expect[2]
&& fetch32(p_to_check + 12) == expect[3]
&& fetch32(p_to_check + 16) == expect[4]
&& fetch32(p_to_check + 20) == expect[5];
}
return ret;
}
/* Generate a machine-word sized load or store. Simplified version of
the Min_Load and Min_Store cases below.
This will generate 32-bit load/store on MIPS32, and 64-bit load/store on
MIPS64 platforms.
*/
static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt reg,
MIPSAMode* am, Bool mode64 )
{
if (isLoad) { /* load */
switch (am->tag) {
case Mam_IR:
if (mode64) {
vassert(0 == (am->Mam.IR.index & 3));
}
p = doAMode_IR(p, mode64 ? 55 : 35, reg, am, mode64);
break;
case Mam_RR:
/* we could handle this case, but we don't expect to ever
need to. */
vassert(0);