blob: 23db9f7fd71e56376c01bd3ad3ee779694efb6ee [file] [log] [blame]
/*
* ARM translation
*
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2005-2007 CodeSourcery
* Copyright (c) 2007 OpenedHand, Ltd.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include "cpu.h"
#include "exec-all.h"
#include "disas.h"
#include "tcg-op.h"
#include "qemu-log.h"
#ifdef CONFIG_TRACE
#include "trace.h"
#endif
#define GEN_HELPER 1
#include "helpers.h"
#define ENABLE_ARCH_5J 0
#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
/* internal defines */
typedef struct DisasContext {
target_ulong pc;
int is_jmp;
/* Nonzero if this instruction has been conditionally skipped. */
int condjmp;
/* The label that will be jumped to when the instruction is skipped. */
int condlabel;
/* Thumb-2 condtional execution bits. */
int condexec_mask;
int condexec_cond;
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
int is_mem;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
} DisasContext;
#if defined(CONFIG_USER_ONLY)
#define IS_USER(s) 1
#else
#define IS_USER(s) (s->user)
#endif
#ifdef CONFIG_TRACE
#include "helpers.h"
#endif
/* These instructions trap after executing, so defer them until after the
conditional executions state has been updated. */
#define DISAS_WFI 4
#define DISAS_SWI 5
static TCGv cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv cpu_V0, cpu_V1, cpu_M0;
/* FIXME: These should be removed. */
static TCGv cpu_T[2];
static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
#define ICOUNT_TEMP cpu_T[0]
#include "gen-icount.h"
/* initialize TCG globals. */
void arm_translate_init(void)
{
cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
}
/* The code generator doesn't like lots of temporaries, so maintain our own
cache for reuse within a function. */
#define MAX_TEMPS 8
static int num_temps;
static TCGv temps[MAX_TEMPS];
/* Allocate a temporary variable. */
static TCGv new_tmp(void)
{
TCGv tmp;
if (num_temps == MAX_TEMPS)
abort();
if (GET_TCGV(temps[num_temps]))
return temps[num_temps++];
tmp = tcg_temp_new(TCG_TYPE_I32);
temps[num_temps++] = tmp;
return tmp;
}
/* Release a temporary variable. */
static void dead_tmp(TCGv tmp)
{
int i;
num_temps--;
i = num_temps;
if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
return;
/* Shuffle this temp to the last slot. */
while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
i--;
while (i < num_temps) {
temps[i] = temps[i + 1];
i++;
}
temps[i] = tmp;
}
static inline TCGv load_cpu_offset(int offset)
{
TCGv tmp = new_tmp();
tcg_gen_ld_i32(tmp, cpu_env, offset);
return tmp;
}
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
static inline void store_cpu_offset(TCGv var, int offset)
{
tcg_gen_st_i32(var, cpu_env, offset);
dead_tmp(var);
}
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUState, name))
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv var, int reg)
{
if (reg == 15) {
uint32_t addr;
/* normaly, since we updated PC, we need only to add one insn */
if (s->thumb)
addr = (long)s->pc + 2;
else
addr = (long)s->pc + 4;
tcg_gen_movi_i32(var, addr);
} else {
tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
}
}
/* Create a new temporary and set it to the value of a CPU register. */
static inline TCGv load_reg(DisasContext *s, int reg)
{
TCGv tmp = new_tmp();
load_reg_var(s, tmp, reg);
return tmp;
}
/* Set a CPU register. The source must be a temporary and will be
marked as dead. */
static void store_reg(DisasContext *s, int reg, TCGv var)
{
if (reg == 15) {
tcg_gen_andi_i32(var, var, ~1);
s->is_jmp = DISAS_JUMP;
}
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
dead_tmp(var);
}
/* Basic operations. */
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
/* Value extensions. */
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
/* Set NZCV flags from the high 4 bits of var. */
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
#ifdef CONFIG_TRACE
static void gen_traceTicks(int count)
{
TCGv t0 = new_tmp();
tcg_gen_movi_i32(t0, count);
gen_helper_traceTicks(t0);
dead_tmp(t0);
}
static void gen_traceBB(uint64_t bb_num, target_phys_addr_t tb)
{
#if HOST_LONG_BITS ==64
TCGv t0 = tcg_const_i64(bb_num);
TCGv t1 = tcg_const_i64(tb);
gen_helper_traceBB64(t0, t1);
tcg_temp_free(t1);
tcg_temp_free(t0);
#else
TCGv t0 = new_tmp();
TCGv t1 = new_tmp();
TCGv t2 = new_tmp();
tcg_gen_movi_i32(t0, (int32_t)(bb_num >> 32));
tcg_gen_movi_i32(t1, (int32_t)(bb_num));
tcg_gen_movi_i32(t2, (int32_t)tb);
gen_helper_traceBB32(t0, t1, t2);
dead_tmp(t2);
dead_tmp(t1);
dead_tmp(t0);
#endif
}
#endif /* CONFIG_TRACE */
static void gen_exception(int excp)
{
TCGv tmp = new_tmp();
tcg_gen_movi_i32(tmp, excp);
gen_helper_exception(tmp);
dead_tmp(tmp);
}
static void gen_smul_dual(TCGv a, TCGv b)
{
TCGv tmp1 = new_tmp();
TCGv tmp2 = new_tmp();
tcg_gen_ext16s_i32(tmp1, a);
tcg_gen_ext16s_i32(tmp2, b);
tcg_gen_mul_i32(tmp1, tmp1, tmp2);
dead_tmp(tmp2);
tcg_gen_sari_i32(a, a, 16);
tcg_gen_sari_i32(b, b, 16);
tcg_gen_mul_i32(b, b, a);
tcg_gen_mov_i32(a, tmp1);
dead_tmp(tmp1);
}
/* Byteswap each halfword. */
static void gen_rev16(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 8);
tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
tcg_gen_shli_i32(var, var, 8);
tcg_gen_andi_i32(var, var, 0xff00ff00);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
}
/* Byteswap low halfword and sign extend. */
static void gen_revsh(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 8);
tcg_gen_andi_i32(tmp, tmp, 0x00ff);
tcg_gen_shli_i32(var, var, 8);
tcg_gen_ext8s_i32(var, var);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
}
/* Unsigned bitfield extract. */
static void gen_ubfx(TCGv var, int shift, uint32_t mask)
{
if (shift)
tcg_gen_shri_i32(var, var, shift);
tcg_gen_andi_i32(var, var, mask);
}
/* Signed bitfield extract. */
static void gen_sbfx(TCGv var, int shift, int width)
{
uint32_t signbit;
if (shift)
tcg_gen_sari_i32(var, var, shift);
if (shift + width < 32) {
signbit = 1u << (width - 1);
tcg_gen_andi_i32(var, var, (1u << width) - 1);
tcg_gen_xori_i32(var, var, signbit);
tcg_gen_subi_i32(var, var, signbit);
}
}
/* Bitfield insertion. Insert val into base. Clobbers base and val. */
static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
{
tcg_gen_andi_i32(val, val, mask);
tcg_gen_shli_i32(val, val, shift);
tcg_gen_andi_i32(base, base, ~(mask << shift));
tcg_gen_or_i32(dest, base, val);
}
/* Round the top 32 bits of a 64-bit value. */
static void gen_roundqd(TCGv a, TCGv b)
{
tcg_gen_shri_i32(a, a, 31);
tcg_gen_add_i32(a, a, b);
}
/* FIXME: Most targets have native widening multiplication.
It would be good to use that instead of a full wide multiply. */
/* 32x32->64 multiply. Marks inputs as dead. */
static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
{
TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_extu_i32_i64(tmp1, a);
dead_tmp(a);
tcg_gen_extu_i32_i64(tmp2, b);
dead_tmp(b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
return tmp1;
}
static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
{
TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_ext_i32_i64(tmp1, a);
dead_tmp(a);
tcg_gen_ext_i32_i64(tmp2, b);
dead_tmp(b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
return tmp1;
}
/* Unsigned 32x32->64 multiply. */
static void gen_op_mull_T0_T1(void)
{
TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
tcg_gen_shri_i64(tmp1, tmp1, 32);
tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
}
/* Signed 32x32->64 multiply. */
static void gen_imull(TCGv a, TCGv b)
{
TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_ext_i32_i64(tmp1, a);
tcg_gen_ext_i32_i64(tmp2, b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2);
tcg_gen_trunc_i64_i32(a, tmp1);
tcg_gen_shri_i64(tmp1, tmp1, 32);
tcg_gen_trunc_i64_i32(b, tmp1);
}
#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
/* Swap low and high halfwords. */
static void gen_swap_half(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 16);
tcg_gen_shli_i32(var, var, 16);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
}
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
tmp = (t0 ^ t1) & 0x8000;
t0 &= ~0x8000;
t1 &= ~0x8000;
t0 = (t0 + t1) ^ tmp;
*/
static void gen_add16(TCGv t0, TCGv t1)
{
TCGv tmp = new_tmp();
tcg_gen_xor_i32(tmp, t0, t1);
tcg_gen_andi_i32(tmp, tmp, 0x8000);
tcg_gen_andi_i32(t0, t0, ~0x8000);
tcg_gen_andi_i32(t1, t1, ~0x8000);
tcg_gen_add_i32(t0, t0, t1);
tcg_gen_xor_i32(t0, t0, tmp);
dead_tmp(tmp);
dead_tmp(t1);
}
#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 31);
gen_set_CF(var);
dead_tmp(tmp);
}
/* Set N and Z flags from var. */
static inline void gen_logic_CC(TCGv var)
{
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
}
/* T0 += T1 + CF. */
static void gen_adc_T0_T1(void)
{
TCGv tmp;
gen_op_addl_T0_T1();
tmp = load_cpu_field(CF);
tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
dead_tmp(tmp);
}
/* dest = T0 - T1 + CF - 1. */
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
{
TCGv tmp;
tcg_gen_sub_i32(dest, t0, t1);
tmp = load_cpu_field(CF);
tcg_gen_add_i32(dest, dest, tmp);
tcg_gen_subi_i32(dest, dest, 1);
dead_tmp(tmp);
}
#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
/* T0 &= ~T1. Clobbers T1. */
/* FIXME: Implement bic natively. */
static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
{
TCGv tmp = new_tmp();
tcg_gen_not_i32(tmp, t1);
tcg_gen_and_i32(dest, t0, tmp);
dead_tmp(tmp);
}
static inline void gen_op_bicl_T0_T1(void)
{
gen_op_notl_T1();
gen_op_andl_T0_T1();
}
/* FIXME: Implement this natively. */
#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
/* FIXME: Implement this natively. */
static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
{
TCGv tmp;
if (i == 0)
return;
tmp = new_tmp();
tcg_gen_shri_i32(tmp, t1, i);
tcg_gen_shli_i32(t1, t1, 32 - i);
tcg_gen_or_i32(t0, t1, tmp);
dead_tmp(tmp);
}
static void shifter_out_im(TCGv var, int shift)
{
TCGv tmp = new_tmp();
if (shift == 0) {
tcg_gen_andi_i32(tmp, var, 1);
} else {
tcg_gen_shri_i32(tmp, var, shift);
if (shift != 31);
tcg_gen_andi_i32(tmp, tmp, 1);
}
gen_set_CF(tmp);
dead_tmp(tmp);
}
/* Shift by immediate. Includes special handling for shift == 0. */
static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
{
switch (shiftop) {
case 0: /* LSL */
if (shift != 0) {
if (flags)
shifter_out_im(var, 32 - shift);
tcg_gen_shli_i32(var, var, shift);
}
break;
case 1: /* LSR */
if (shift == 0) {
if (flags) {
tcg_gen_shri_i32(var, var, 31);
gen_set_CF(var);
}
tcg_gen_movi_i32(var, 0);
} else {
if (flags)
shifter_out_im(var, shift - 1);
tcg_gen_shri_i32(var, var, shift);
}
break;
case 2: /* ASR */
if (shift == 0)
shift = 32;
if (flags)
shifter_out_im(var, shift - 1);
if (shift == 32)
shift = 31;
tcg_gen_sari_i32(var, var, shift);
break;
case 3: /* ROR/RRX */
if (shift != 0) {
if (flags)
shifter_out_im(var, shift - 1);
tcg_gen_rori_i32(var, var, shift); break;
} else {
TCGv tmp = load_cpu_field(CF);
if (flags)
shifter_out_im(var, 0);
tcg_gen_shri_i32(var, var, 1);
tcg_gen_shli_i32(tmp, tmp, 31);
tcg_gen_or_i32(var, var, tmp);
dead_tmp(tmp);
}
}
};
static inline void gen_arm_shift_reg(TCGv var, int shiftop,
TCGv shift, int flags)
{
if (flags) {
switch (shiftop) {
case 0: gen_helper_shl_cc(var, var, shift); break;
case 1: gen_helper_shr_cc(var, var, shift); break;
case 2: gen_helper_sar_cc(var, var, shift); break;
case 3: gen_helper_ror_cc(var, var, shift); break;
}
} else {
switch (shiftop) {
case 0: gen_helper_shl(var, var, shift); break;
case 1: gen_helper_shr(var, var, shift); break;
case 2: gen_helper_sar(var, var, shift); break;
case 3: gen_helper_ror(var, var, shift); break;
}
}
dead_tmp(shift);
}
#define PAS_OP(pfx) \
switch (op2) { \
case 0: gen_pas_helper(glue(pfx,add16)); break; \
case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
case 3: gen_pas_helper(glue(pfx,sub16)); break; \
case 4: gen_pas_helper(glue(pfx,add8)); break; \
case 7: gen_pas_helper(glue(pfx,sub8)); break; \
}
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
TCGv tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
tmp = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 5:
tmp = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
#undef gen_pas_helper
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
case 2:
PAS_OP(q);
break;
case 3:
PAS_OP(sh);
break;
case 6:
PAS_OP(uq);
break;
case 7:
PAS_OP(uh);
break;
#undef gen_pas_helper
}
}
#undef PAS_OP
/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
#define PAS_OP(pfx) \
switch (op2) { \
case 0: gen_pas_helper(glue(pfx,add8)); break; \
case 1: gen_pas_helper(glue(pfx,add16)); break; \
case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
case 4: gen_pas_helper(glue(pfx,sub8)); break; \
case 5: gen_pas_helper(glue(pfx,sub16)); break; \
case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
}
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
TCGv tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
tmp = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 4:
tmp = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
#undef gen_pas_helper
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
case 1:
PAS_OP(q);
break;
case 2:
PAS_OP(sh);
break;
case 5:
PAS_OP(uq);
break;
case 6:
PAS_OP(uh);
break;
#undef gen_pas_helper
}
}
#undef PAS_OP
static void gen_test_cc(int cc, int label)
{
TCGv tmp;
TCGv tmp2;
int inv;
switch (cc) {
case 0: /* eq: Z */
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 1: /* ne: !Z */
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 2: /* cs: C */
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
break;
case 3: /* cc: !C */
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 4: /* mi: N */
tmp = load_cpu_field(NF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 5: /* pl: !N */
tmp = load_cpu_field(NF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 6: /* vs: V */
tmp = load_cpu_field(VF);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 7: /* vc: !V */
tmp = load_cpu_field(VF);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 8: /* hi: C && !Z */
inv = gen_new_label();
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
gen_set_label(inv);
break;
case 9: /* ls: !C || Z */
tmp = load_cpu_field(CF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
break;
case 10: /* ge: N == V -> N ^ V == 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
break;
case 11: /* lt: N != V -> N ^ V != 0 */
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
case 12: /* gt: !Z && N == V */
inv = gen_new_label();
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
gen_set_label(inv);
break;
case 13: /* le: Z || N != V */
tmp = load_cpu_field(ZF);
tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
dead_tmp(tmp);
tmp = load_cpu_field(VF);
tmp2 = load_cpu_field(NF);
tcg_gen_xor_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
break;
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
abort();
}
dead_tmp(tmp);
}
const uint8_t table_logic_cc[16] = {
1, /* and */
1, /* xor */
0, /* sub */
0, /* rsb */
0, /* add */
0, /* adc */
0, /* sbc */
0, /* rsc */
1, /* andl */
1, /* xorl */
0, /* cmp */
0, /* cmn */
1, /* orr */
1, /* mov */
1, /* bic */
1, /* mvn */
};
/* Set PC and Thumb state from an immediate address. */
static inline void gen_bx_im(DisasContext *s, uint32_t addr)
{
TCGv tmp;
s->is_jmp = DISAS_UPDATE;
tmp = new_tmp();
if (s->thumb != (addr & 1)) {
tcg_gen_movi_i32(tmp, addr & 1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
}
tcg_gen_movi_i32(tmp, addr & ~1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
dead_tmp(tmp);
}
/* Set PC and Thumb state from var. var is marked as dead. */
static inline void gen_bx(DisasContext *s, TCGv var)
{
TCGv tmp;
s->is_jmp = DISAS_UPDATE;
tmp = new_tmp();
tcg_gen_andi_i32(tmp, var, 1);
store_cpu_field(tmp, thumb);
tcg_gen_andi_i32(var, var, ~1);
store_cpu_field(var, regs[15]);
}
/* TODO: This should be removed. Use gen_bx instead. */
static inline void gen_bx_T0(DisasContext *s)
{
TCGv tmp = new_tmp();
tcg_gen_mov_i32(tmp, cpu_T[0]);
gen_bx(s, tmp);
}
#if defined(CONFIG_USER_ONLY)
#define gen_ldst(name, s) gen_op_##name##_raw()
#else
#define gen_ldst(name, s) do { \
s->is_mem = 1; \
if (IS_USER(s)) \
gen_op_##name##_user(); \
else \
gen_op_##name##_kernel(); \
} while (0)
#endif
static inline TCGv gen_ld8s(TCGv addr, int index)
{
TCGv tmp = new_tmp();
tcg_gen_qemu_ld8s(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld8u(TCGv addr, int index)
{
TCGv tmp = new_tmp();
tcg_gen_qemu_ld8u(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld16s(TCGv addr, int index)
{
TCGv tmp = new_tmp();
tcg_gen_qemu_ld16s(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld16u(TCGv addr, int index)
{
TCGv tmp = new_tmp();
tcg_gen_qemu_ld16u(tmp, addr, index);
return tmp;
}
static inline TCGv gen_ld32(TCGv addr, int index)
{
TCGv tmp = new_tmp();
tcg_gen_qemu_ld32u(tmp, addr, index);
return tmp;
}
static inline void gen_st8(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st8(val, addr, index);
dead_tmp(val);
}
static inline void gen_st16(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st16(val, addr, index);
dead_tmp(val);
}
static inline void gen_st32(TCGv val, TCGv addr, int index)
{
tcg_gen_qemu_st32(val, addr, index);
dead_tmp(val);
}
static inline void gen_movl_T0_reg(DisasContext *s, int reg)
{
load_reg_var(s, cpu_T[0], reg);
}
static inline void gen_movl_T1_reg(DisasContext *s, int reg)
{
load_reg_var(s, cpu_T[1], reg);
}
static inline void gen_movl_T2_reg(DisasContext *s, int reg)
{
load_reg_var(s, cpu_T[2], reg);
}
static inline void gen_set_pc_im(uint32_t val)
{
TCGv tmp = new_tmp();
tcg_gen_movi_i32(tmp, val);
store_cpu_field(tmp, regs[15]);
}
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
{
TCGv tmp;
if (reg == 15) {
tmp = new_tmp();
tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
} else {
tmp = cpu_T[t];
}
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
if (reg == 15) {
dead_tmp(tmp);
s->is_jmp = DISAS_JUMP;
}
}
static inline void gen_movl_reg_T0(DisasContext *s, int reg)
{
gen_movl_reg_TN(s, reg, 0);
}
static inline void gen_movl_reg_T1(DisasContext *s, int reg)
{
gen_movl_reg_TN(s, reg, 1);
}
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
gen_op_movl_T0_im(s->pc);
gen_movl_reg_T0(s, 15);
s->is_jmp = DISAS_UPDATE;
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
TCGv var)
{
int val, rm, shift, shiftop;
TCGv offset;
if (!(insn & (1 << 25))) {
/* immediate */
val = insn & 0xfff;
if (!(insn & (1 << 23)))
val = -val;
if (val != 0)
tcg_gen_addi_i32(var, var, val);
} else {
/* shift/register */
rm = (insn) & 0xf;
shift = (insn >> 7) & 0x1f;
shiftop = (insn >> 5) & 3;
offset = load_reg(s, rm);
gen_arm_shift_im(offset, shiftop, shift, 0);
if (!(insn & (1 << 23)))
tcg_gen_sub_i32(var, var, offset);
else
tcg_gen_add_i32(var, var, offset);
dead_tmp(offset);
}
}
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
int extra, TCGv var)
{
int val, rm;
TCGv offset;
if (insn & (1 << 22)) {
/* immediate */
val = (insn & 0xf) | ((insn >> 4) & 0xf0);
if (!(insn & (1 << 23)))
val = -val;
val += extra;
if (val != 0)
tcg_gen_addi_i32(var, var, val);
} else {
/* register */
if (extra)
tcg_gen_addi_i32(var, var, extra);
rm = (insn) & 0xf;
offset = load_reg(s, rm);
if (!(insn & (1 << 23)))
tcg_gen_sub_i32(var, var, offset);
else
tcg_gen_add_i32(var, var, offset);
dead_tmp(offset);
}
}
#define VFP_OP2(name) \
static inline void gen_vfp_##name(int dp) \
{ \
if (dp) \
gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
else \
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
}
#define VFP_OP1(name) \
static inline void gen_vfp_##name(int dp, int arg) \
{ \
if (dp) \
gen_op_vfp_##name##d(arg); \
else \
gen_op_vfp_##name##s(arg); \
}
VFP_OP2(add)
VFP_OP2(sub)
VFP_OP2(mul)
VFP_OP2(div)
#undef VFP_OP2
static inline void gen_vfp_abs(int dp)
{
if (dp)
gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
else
gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
}
static inline void gen_vfp_neg(int dp)
{
if (dp)
gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
else
gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
}
static inline void gen_vfp_sqrt(int dp)
{
if (dp)
gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
else
gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_cmp(int dp)
{
if (dp)
gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
else
gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
}
static inline void gen_vfp_cmpe(int dp)
{
if (dp)
gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
else
gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
}
static inline void gen_vfp_F1_ld0(int dp)
{
if (dp)
tcg_gen_movi_i64(cpu_F1d, 0);
else
tcg_gen_movi_i32(cpu_F1s, 0);
}
static inline void gen_vfp_uito(int dp)
{
if (dp)
gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
else
gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_sito(int dp)
{
if (dp)
gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
else
gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_toui(int dp)
{
if (dp)
gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
else
gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_touiz(int dp)
{
if (dp)
gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
else
gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_tosi(int dp)
{
if (dp)
gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
else
gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
}
static inline void gen_vfp_tosiz(int dp)
{
if (dp)
gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
else
gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
}
#define VFP_GEN_FIX(name) \
static inline void gen_vfp_##name(int dp, int shift) \
{ \
if (dp) \
gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
else \
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
}
VFP_GEN_FIX(tosh)
VFP_GEN_FIX(tosl)
VFP_GEN_FIX(touh)
VFP_GEN_FIX(toul)
VFP_GEN_FIX(shto)
VFP_GEN_FIX(slto)
VFP_GEN_FIX(uhto)
VFP_GEN_FIX(ulto)
#undef VFP_GEN_FIX
static inline void gen_vfp_ld(DisasContext *s, int dp)
{
if (dp)
tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
else
tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
}
static inline void gen_vfp_st(DisasContext *s, int dp)
{
if (dp)
tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
else
tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
}
static inline long
vfp_reg_offset (int dp, int reg)
{
if (dp)
return offsetof(CPUARMState, vfp.regs[reg]);
else if (reg & 1) {
return offsetof(CPUARMState, vfp.regs[reg >> 1])
+ offsetof(CPU_DoubleU, l.upper);
} else {
return offsetof(CPUARMState, vfp.regs[reg >> 1])
+ offsetof(CPU_DoubleU, l.lower);
}
}
/* Return the offset of a 32-bit piece of a NEON register.
zero is the least significant end of the register. */
static inline long
neon_reg_offset (int reg, int n)
{
int sreg;
sreg = reg * 2 + n;
return vfp_reg_offset(0, sreg);
}
/* FIXME: Remove these. */
#define neon_T0 cpu_T[0]
#define neon_T1 cpu_T[1]
#define NEON_GET_REG(T, reg, n) \
tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
#define NEON_SET_REG(T, reg, n) \
tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
static TCGv neon_load_reg(int reg, int pass)
{
TCGv tmp = new_tmp();
tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
return tmp;
}
static void neon_store_reg(int reg, int pass, TCGv var)
{
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
dead_tmp(var);
}
static inline void neon_load_reg64(TCGv var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
static inline void neon_store_reg64(TCGv var, int reg)
{
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
#define tcg_gen_ld_f32 tcg_gen_ld_i32
#define tcg_gen_ld_f64 tcg_gen_ld_i64
#define tcg_gen_st_f32 tcg_gen_st_i32
#define tcg_gen_st_f64 tcg_gen_st_i64
static inline void gen_mov_F0_vreg(int dp, int reg)
{
if (dp)
tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
else
tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
}
static inline void gen_mov_F1_vreg(int dp, int reg)
{
if (dp)
tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
else
tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
}
static inline void gen_mov_vreg_F0(int dp, int reg)
{
if (dp)
tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
else
tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
}
#define ARM_CP_RW_BIT (1 << 20)
static inline void iwmmxt_load_reg(TCGv var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
static inline void iwmmxt_store_reg(TCGv var, int reg)
{
tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
{
tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
}
static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
{
tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
}
static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
{
tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
}
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
{
iwmmxt_store_reg(cpu_M0, rn);
}
static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_M0, rn);
}
static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
}
static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
}
static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
}
#define IWMMXT_OP(name) \
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
{ \
iwmmxt_load_reg(cpu_V1, rn); \
gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
}
#define IWMMXT_OP_ENV(name) \
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
{ \
iwmmxt_load_reg(cpu_V1, rn); \
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
}
#define IWMMXT_OP_ENV_SIZE(name) \
IWMMXT_OP_ENV(name##b) \
IWMMXT_OP_ENV(name##w) \
IWMMXT_OP_ENV(name##l)
#define IWMMXT_OP_ENV1(name) \
static inline void gen_op_iwmmxt_##name##_M0(void) \
{ \
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
}
IWMMXT_OP(maddsq)
IWMMXT_OP(madduq)
IWMMXT_OP(sadb)
IWMMXT_OP(sadw)
IWMMXT_OP(mulslw)
IWMMXT_OP(mulshw)
IWMMXT_OP(mululw)
IWMMXT_OP(muluhw)
IWMMXT_OP(macsw)
IWMMXT_OP(macuw)
IWMMXT_OP_ENV_SIZE(unpackl)
IWMMXT_OP_ENV_SIZE(unpackh)
IWMMXT_OP_ENV1(unpacklub)
IWMMXT_OP_ENV1(unpackluw)
IWMMXT_OP_ENV1(unpacklul)
IWMMXT_OP_ENV1(unpackhub)
IWMMXT_OP_ENV1(unpackhuw)
IWMMXT_OP_ENV1(unpackhul)
IWMMXT_OP_ENV1(unpacklsb)
IWMMXT_OP_ENV1(unpacklsw)
IWMMXT_OP_ENV1(unpacklsl)
IWMMXT_OP_ENV1(unpackhsb)
IWMMXT_OP_ENV1(unpackhsw)
IWMMXT_OP_ENV1(unpackhsl)
IWMMXT_OP_ENV_SIZE(cmpeq)
IWMMXT_OP_ENV_SIZE(cmpgtu)
IWMMXT_OP_ENV_SIZE(cmpgts)
IWMMXT_OP_ENV_SIZE(mins)
IWMMXT_OP_ENV_SIZE(minu)
IWMMXT_OP_ENV_SIZE(maxs)
IWMMXT_OP_ENV_SIZE(maxu)
IWMMXT_OP_ENV_SIZE(subn)
IWMMXT_OP_ENV_SIZE(addn)
IWMMXT_OP_ENV_SIZE(subu)
IWMMXT_OP_ENV_SIZE(addu)
IWMMXT_OP_ENV_SIZE(subs)
IWMMXT_OP_ENV_SIZE(adds)
IWMMXT_OP_ENV(avgb0)
IWMMXT_OP_ENV(avgb1)
IWMMXT_OP_ENV(avgw0)
IWMMXT_OP_ENV(avgw1)
IWMMXT_OP(msadb)
IWMMXT_OP_ENV(packuw)
IWMMXT_OP_ENV(packul)
IWMMXT_OP_ENV(packuq)
IWMMXT_OP_ENV(packsw)
IWMMXT_OP_ENV(packsl)
IWMMXT_OP_ENV(packsq)
static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
{
gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
}
static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
{
gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
}
static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
{
gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
}
static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
}
static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
{
TCGv tmp = tcg_const_i32(shift);
gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
}
static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
{
tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
}
static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
{
tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
}
static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
{
tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
if (mask != ~0u)
tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
}
static void gen_op_iwmmxt_set_mup(void)
{
TCGv tmp;
tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
tcg_gen_ori_i32(tmp, tmp, 2);
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
}
static void gen_op_iwmmxt_set_cup(void)
{
TCGv tmp;
tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
tcg_gen_ori_i32(tmp, tmp, 1);
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
}
static void gen_op_iwmmxt_setpsr_nz(void)
{
TCGv tmp = new_tmp();
gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
}
static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
{
iwmmxt_load_reg(cpu_V1, rn);
tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
}
static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
{
iwmmxt_load_reg(cpu_V0, rn);
tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
}
static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
{
tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
iwmmxt_store_reg(cpu_V0, rn);
}
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
{
int rd;
uint32_t offset;
rd = (insn >> 16) & 0xf;
gen_movl_T1_reg(s, rd);
offset = (insn & 0xff) << ((insn >> 7) & 2);
if (insn & (1 << 24)) {
/* Pre indexed */
if (insn & (1 << 23))
gen_op_addl_T1_im(offset);
else
gen_op_addl_T1_im(-offset);
if (insn & (1 << 21))
gen_movl_reg_T1(s, rd);
} else if (insn & (1 << 21)) {
/* Post indexed */
if (insn & (1 << 23))
gen_op_movl_T0_im(offset);
else
gen_op_movl_T0_im(- offset);
gen_op_addl_T0_T1();
gen_movl_reg_T0(s, rd);
} else if (!(insn & (1 << 23)))
return 1;
return 0;
}
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
{
int rd = (insn >> 0) & 0xf;
if (insn & (1 << 8))
if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
return 1;
else
gen_op_iwmmxt_movl_T0_wCx(rd);
else
gen_iwmmxt_movl_T0_T1_wRn(rd);
gen_op_movl_T1_im(mask);
gen_op_andl_T0_T1();
return 0;
}
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
(ie. an undefined instruction). */
static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
TCGv tmp;
if ((insn & 0x0e000e00) == 0x0c000000) {
if ((insn & 0x0fe00ff0) == 0x0c400000) {
wrd = insn & 0xf;
rdlo = (insn >> 12) & 0xf;
rdhi = (insn >> 16) & 0xf;
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
gen_iwmmxt_movl_T0_T1_wRn(wrd);
gen_movl_reg_T0(s, rdlo);
gen_movl_reg_T1(s, rdhi);
} else { /* TMCRR */
gen_movl_T0_reg(s, rdlo);
gen_movl_T1_reg(s, rdhi);
gen_iwmmxt_movl_wRn_T0_T1(wrd);
gen_op_iwmmxt_set_mup();
}
return 0;
}
wrd = (insn >> 12) & 0xf;
if (gen_iwmmxt_address(s, insn))
return 1;
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
tmp = gen_ld32(cpu_T[1], IS_USER(s));
tcg_gen_mov_i32(cpu_T[0], tmp);
dead_tmp(tmp);
gen_op_iwmmxt_movl_wCx_T0(wrd);
} else {
i = 1;
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WLDRD */
tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
i = 0;
} else { /* WLDRW wRd */
tmp = gen_ld32(cpu_T[1], IS_USER(s));
}
} else {
if (insn & (1 << 22)) { /* WLDRH */
tmp = gen_ld16u(cpu_T[1], IS_USER(s));
} else { /* WLDRB */
tmp = gen_ld8u(cpu_T[1], IS_USER(s));
}
}
if (i) {
tcg_gen_extu_i32_i64(cpu_M0, tmp);
dead_tmp(tmp);
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
}
} else {
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
gen_op_iwmmxt_movl_T0_wCx(wrd);
tmp = new_tmp();
tcg_gen_mov_i32(tmp, cpu_T[0]);
gen_st32(tmp, cpu_T[1], IS_USER(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
tmp = new_tmp();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
dead_tmp(tmp);
tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
} else { /* WSTRW wRd */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
gen_st32(tmp, cpu_T[1], IS_USER(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
gen_st16(tmp, cpu_T[1], IS_USER(s));
} else { /* WSTRB */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
gen_st8(tmp, cpu_T[1], IS_USER(s));
}
}
}
}
return 0;
}
if ((insn & 0x0f000000) != 0x0e000000)
return 1;
switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
case 0x000: /* WOR */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 0) & 0xf;
rd1 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_iwmmxt_orq_M0_wRn(rd1);
gen_op_iwmmxt_setpsr_nz();
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x011: /* TMCR */
if (insn & 0xf)
return 1;
rd = (insn >> 12) & 0xf;
wrd = (insn >> 16) & 0xf;
switch (wrd) {
case ARM_IWMMXT_wCID:
case ARM_IWMMXT_wCASF:
break;
case ARM_IWMMXT_wCon:
gen_op_iwmmxt_set_cup();
/* Fall through. */
case ARM_IWMMXT_wCSSF:
gen_op_iwmmxt_movl_T0_wCx(wrd);
gen_movl_T1_reg(s, rd);
gen_op_bicl_T0_T1();
gen_op_iwmmxt_movl_wCx_T0(wrd);
break;
case ARM_IWMMXT_wCGR0:
case ARM_IWMMXT_wCGR1:
case ARM_IWMMXT_wCGR2:
case ARM_IWMMXT_wCGR3:
gen_op_iwmmxt_set_cup();
gen_movl_reg_T0(s, rd);
gen_op_iwmmxt_movl_wCx_T0(wrd);
break;
default:
return 1;
}
break;
case 0x100: /* WXOR */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 0) & 0xf;
rd1 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_iwmmxt_xorq_M0_wRn(rd1);
gen_op_iwmmxt_setpsr_nz();
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x111: /* TMRC */
if (insn & 0xf)
return 1;
rd = (insn >> 12) & 0xf;
wrd = (insn >> 16) & 0xf;
gen_op_iwmmxt_movl_T0_wCx(wrd);
gen_movl_reg_T0(s, rd);
break;
case 0x300: /* WANDN */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 0) & 0xf;
rd1 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
tcg_gen_neg_i64(cpu_M0, cpu_M0);
gen_op_iwmmxt_andq_M0_wRn(rd1);
gen_op_iwmmxt_setpsr_nz();
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x200: /* WAND */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 0) & 0xf;
rd1 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_iwmmxt_andq_M0_wRn(rd1);
gen_op_iwmmxt_setpsr_nz();
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x810: case 0xa10: /* WMADD */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 0) & 0xf;
rd1 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (insn & (1 << 21))
gen_op_iwmmxt_maddsq_M0_wRn(rd1);
else
gen_op_iwmmxt_madduq_M0_wRn(rd1);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
break;
case 1:
gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
break;
case 2:
gen_op_iwmmxt_unpackll_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
break;
case 1:
gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
break;
case 2:
gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (insn & (1 << 22))
gen_op_iwmmxt_sadw_M0_wRn(rd1);
else
gen_op_iwmmxt_sadb_M0_wRn(rd1);
if (!(insn & (1 << 20)))
gen_op_iwmmxt_addl_M0_wRn(wrd);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (insn & (1 << 21)) {
if (insn & (1 << 20))
gen_op_iwmmxt_mulshw_M0_wRn(rd1);
else
gen_op_iwmmxt_mulslw_M0_wRn(rd1);
} else {
if (insn & (1 << 20))
gen_op_iwmmxt_muluhw_M0_wRn(rd1);
else
gen_op_iwmmxt_mululw_M0_wRn(rd1);
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (insn & (1 << 21))
gen_op_iwmmxt_macsw_M0_wRn(rd1);
else
gen_op_iwmmxt_macuw_M0_wRn(rd1);
if (!(insn & (1 << 20))) {
iwmmxt_load_reg(cpu_V1, wrd);
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
break;
case 1:
gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
break;
case 2:
gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (insn & (1 << 22)) {
if (insn & (1 << 20))
gen_op_iwmmxt_avgw1_M0_wRn(rd1);
else
gen_op_iwmmxt_avgw0_M0_wRn(rd1);
} else {
if (insn & (1 << 20))
gen_op_iwmmxt_avgb1_M0_wRn(rd1);
else
gen_op_iwmmxt_avgb0_M0_wRn(rd1);
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
gen_op_movl_T1_im(7);
gen_op_andl_T0_T1();
gen_op_iwmmxt_align_M0_T0_wRn(rd1);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
rd = (insn >> 12) & 0xf;
wrd = (insn >> 16) & 0xf;
gen_movl_T0_reg(s, rd);
gen_op_iwmmxt_movq_M0_wRn(wrd);
switch ((insn >> 6) & 3) {
case 0:
gen_op_movl_T1_im(0xff);
gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
break;
case 1:
gen_op_movl_T1_im(0xffff);
gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
break;
case 2:
gen_op_movl_T1_im(0xffffffff);
gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
rd = (insn >> 12) & 0xf;
wrd = (insn >> 16) & 0xf;
if (rd == 15)
return 1;
gen_op_iwmmxt_movq_M0_wRn(wrd);
switch ((insn >> 22) & 3) {
case 0:
if (insn & 8)
gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
else {
gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
}
break;
case 1:
if (insn & 8)
gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
else {
gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
}
break;
case 2:
gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
break;
case 3:
return 1;
}
gen_movl_reg_T0(s, rd);
break;
case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
if ((insn & 0x000ff008) != 0x0003f000)
return 1;
gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
switch ((insn >> 22) & 3) {
case 0:
gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
break;
case 1:
gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
break;
case 2:
gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
break;
case 3:
return 1;
}
gen_op_shll_T1_im(28);
gen_set_nzcv(cpu_T[1]);
break;
case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
rd = (insn >> 12) & 0xf;
wrd = (insn >> 16) & 0xf;
gen_movl_T0_reg(s, rd);
switch ((insn >> 6) & 3) {
case 0:
gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
break;
case 1:
gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
break;
case 2:
gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
if ((insn & 0x000ff00f) != 0x0003f000)
return 1;
gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
switch ((insn >> 22) & 3) {
case 0:
for (i = 0; i < 7; i ++) {
gen_op_shll_T1_im(4);
gen_op_andl_T0_T1();
}
break;
case 1:
for (i = 0; i < 3; i ++) {
gen_op_shll_T1_im(8);
gen_op_andl_T0_T1();
}
break;
case 2:
gen_op_shll_T1_im(16);
gen_op_andl_T0_T1();
break;
case 3:
return 1;
}
gen_set_nzcv(cpu_T[0]);
break;
case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
break;
case 1:
gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
break;
case 2:
gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
if ((insn & 0x000ff00f) != 0x0003f000)
return 1;
gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
switch ((insn >> 22) & 3) {
case 0:
for (i = 0; i < 7; i ++) {
gen_op_shll_T1_im(4);
gen_op_orl_T0_T1();
}
break;
case 1:
for (i = 0; i < 3; i ++) {
gen_op_shll_T1_im(8);
gen_op_orl_T0_T1();
}
break;
case 2:
gen_op_shll_T1_im(16);
gen_op_orl_T0_T1();
break;
case 3:
return 1;
}
gen_set_nzcv(cpu_T[0]);
break;
case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
rd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
if ((insn & 0xf) != 0)
return 1;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
break;
case 1:
gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
break;
case 2:
gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
break;
case 3:
return 1;
}
gen_movl_reg_T0(s, rd);
break;
case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
case 0x906: case 0xb06: case 0xd06: case 0xf06:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
if (insn & (1 << 21))
gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
else
gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
break;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
else
gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
else
gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
if (insn & (1 << 21))
gen_op_iwmmxt_unpacklsb_M0();
else
gen_op_iwmmxt_unpacklub_M0();
break;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_unpacklsw_M0();
else
gen_op_iwmmxt_unpackluw_M0();
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_unpacklsl_M0();
else
gen_op_iwmmxt_unpacklul_M0();
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
if (insn & (1 << 21))
gen_op_iwmmxt_unpackhsb_M0();
else
gen_op_iwmmxt_unpackhub_M0();
break;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_unpackhsw_M0();
else
gen_op_iwmmxt_unpackhuw_M0();
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_unpackhsl_M0();
else
gen_op_iwmmxt_unpackhul_M0();
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
case 0x214: case 0x614: case 0xa14: case 0xe14:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (gen_iwmmxt_shift(insn, 0xff))
return 1;
switch ((insn >> 22) & 3) {
case 0:
return 1;
case 1:
gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 2:
gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 3:
gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
case 0x014: case 0x414: case 0x814: case 0xc14:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (gen_iwmmxt_shift(insn, 0xff))
return 1;
switch ((insn >> 22) & 3) {
case 0:
return 1;
case 1:
gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 2:
gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 3:
gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
case 0x114: case 0x514: case 0x914: case 0xd14:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (gen_iwmmxt_shift(insn, 0xff))
return 1;
switch ((insn >> 22) & 3) {
case 0:
return 1;
case 1:
gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 2:
gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 3:
gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
case 0x314: case 0x714: case 0xb14: case 0xf14:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
return 1;
case 1:
if (gen_iwmmxt_shift(insn, 0xf))
return 1;
gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 2:
if (gen_iwmmxt_shift(insn, 0x1f))
return 1;
gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
case 3:
if (gen_iwmmxt_shift(insn, 0x3f))
return 1;
gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
case 0x916: case 0xb16: case 0xd16: case 0xf16:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
if (insn & (1 << 21))
gen_op_iwmmxt_minsb_M0_wRn(rd1);
else
gen_op_iwmmxt_minub_M0_wRn(rd1);
break;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_minsw_M0_wRn(rd1);
else
gen_op_iwmmxt_minuw_M0_wRn(rd1);
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_minsl_M0_wRn(rd1);
else
gen_op_iwmmxt_minul_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
case 0x816: case 0xa16: case 0xc16: case 0xe16:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 22) & 3) {
case 0:
if (insn & (1 << 21))
gen_op_iwmmxt_maxsb_M0_wRn(rd1);
else
gen_op_iwmmxt_maxub_M0_wRn(rd1);
break;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_maxsw_M0_wRn(rd1);
else
gen_op_iwmmxt_maxuw_M0_wRn(rd1);
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_maxsl_M0_wRn(rd1);
else
gen_op_iwmmxt_maxul_M0_wRn(rd1);
break;
case 3:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
case 0x402: case 0x502: case 0x602: case 0x702:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_movl_T0_im((insn >> 20) & 3);
gen_op_iwmmxt_align_M0_T0_wRn(rd1);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
case 0x41a: case 0x51a: case 0x61a: case 0x71a:
case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 20) & 0xf) {
case 0x0:
gen_op_iwmmxt_subnb_M0_wRn(rd1);
break;
case 0x1:
gen_op_iwmmxt_subub_M0_wRn(rd1);
break;
case 0x3:
gen_op_iwmmxt_subsb_M0_wRn(rd1);
break;
case 0x4:
gen_op_iwmmxt_subnw_M0_wRn(rd1);
break;
case 0x5:
gen_op_iwmmxt_subuw_M0_wRn(rd1);
break;
case 0x7:
gen_op_iwmmxt_subsw_M0_wRn(rd1);
break;
case 0x8:
gen_op_iwmmxt_subnl_M0_wRn(rd1);
break;
case 0x9:
gen_op_iwmmxt_subul_M0_wRn(rd1);
break;
case 0xb:
gen_op_iwmmxt_subsl_M0_wRn(rd1);
break;
default:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
case 0x41e: case 0x51e: case 0x61e: case 0x71e:
case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
case 0x418: case 0x518: case 0x618: case 0x718:
case 0x818: case 0x918: case 0xa18: case 0xb18:
case 0xc18: case 0xd18: case 0xe18: case 0xf18:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
switch ((insn >> 20) & 0xf) {
case 0x0:
gen_op_iwmmxt_addnb_M0_wRn(rd1);
break;
case 0x1:
gen_op_iwmmxt_addub_M0_wRn(rd1);
break;
case 0x3:
gen_op_iwmmxt_addsb_M0_wRn(rd1);
break;
case 0x4:
gen_op_iwmmxt_addnw_M0_wRn(rd1);
break;
case 0x5:
gen_op_iwmmxt_adduw_M0_wRn(rd1);
break;
case 0x7:
gen_op_iwmmxt_addsw_M0_wRn(rd1);
break;
case 0x8:
gen_op_iwmmxt_addnl_M0_wRn(rd1);
break;
case 0x9:
gen_op_iwmmxt_addul_M0_wRn(rd1);
break;
case 0xb:
gen_op_iwmmxt_addsl_M0_wRn(rd1);
break;
default:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
case 0x408: case 0x508: case 0x608: case 0x708:
case 0x808: case 0x908: case 0xa08: case 0xb08:
case 0xc08: case 0xd08: case 0xe08: case 0xf08:
wrd = (insn >> 12) & 0xf;
rd0 = (insn >> 16) & 0xf;
rd1 = (insn >> 0) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
if (!(insn & (1 << 20)))
return 1;
switch ((insn >> 22) & 3) {
case 0:
return 1;
case 1:
if (insn & (1 << 21))
gen_op_iwmmxt_packsw_M0_wRn(rd1);
else
gen_op_iwmmxt_packuw_M0_wRn(rd1);
break;
case 2:
if (insn & (1 << 21))
gen_op_iwmmxt_packsl_M0_wRn(rd1);
else
gen_op_iwmmxt_packul_M0_wRn(rd1);
break;
case 3:
if (insn & (1 << 21))
gen_op_iwmmxt_packsq_M0_wRn(rd1);
else
gen_op_iwmmxt_packuq_M0_wRn(rd1);
break;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
gen_op_iwmmxt_set_cup();
break;
case 0x201: case 0x203: case 0x205: case 0x207:
case 0x209: case 0x20b: case 0x20d: case 0x20f:
case 0x211: case 0x213: case 0x215: case 0x217:
case 0x219: case 0x21b: case 0x21d: case 0x21f:
wrd = (insn >> 5) & 0xf;
rd0 = (insn >> 12) & 0xf;
rd1 = (insn >> 0) & 0xf;
if (rd0 == 0xf || rd1 == 0xf)
return 1;
gen_op_iwmmxt_movq_M0_wRn(wrd);
switch ((insn >> 16) & 0xf) {
case 0x0: /* TMIA */
gen_movl_T0_reg(s, rd0);
gen_movl_T1_reg(s, rd1);
gen_op_iwmmxt_muladdsl_M0_T0_T1();
break;
case 0x8: /* TMIAPH */
gen_movl_T0_reg(s, rd0);
gen_movl_T1_reg(s, rd1);
gen_op_iwmmxt_muladdsw_M0_T0_T1();
break;
case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
gen_movl_T1_reg(s, rd0);
if (insn & (1 << 16))
gen_op_shrl_T1_im(16);
gen_op_movl_T0_T1();
gen_movl_T1_reg(s, rd1);
if (insn & (1 << 17))
gen_op_shrl_T1_im(16);
gen_op_iwmmxt_muladdswl_M0_T0_T1();
break;
default:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
break;
default:
return 1;
}
return 0;
}
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
(ie. an undefined instruction). */
static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
if ((insn & 0x0ff00f10) == 0x0e200010) {
/* Multiply with Internal Accumulate Format */
rd0 = (insn >> 12) & 0xf;
rd1 = insn & 0xf;
acc = (insn >> 5) & 7;
if (acc != 0)
return 1;
switch ((insn >> 16) & 0xf) {
case 0x0: /* MIA */
gen_movl_T0_reg(s, rd0);
gen_movl_T1_reg(s, rd1);
gen_op_iwmmxt_muladdsl_M0_T0_T1();
break;
case 0x8: /* MIAPH */
gen_movl_T0_reg(s, rd0);
gen_movl_T1_reg(s, rd1);
gen_op_iwmmxt_muladdsw_M0_T0_T1();
break;
case 0xc: /* MIABB */
case 0xd: /* MIABT */
case 0xe: /* MIATB */
case 0xf: /* MIATT */
gen_movl_T1_reg(s, rd0);
if (insn & (1 << 16))
gen_op_shrl_T1_im(16);
gen_op_movl_T0_T1();
gen_movl_T1_reg(s, rd1);
if (insn & (1 << 17))
gen_op_shrl_T1_im(16);
gen_op_iwmmxt_muladdswl_M0_T0_T1();
break;
default:
return 1;
}
gen_op_iwmmxt_movq_wRn_M0(acc);
return 0;
}
if ((insn & 0x0fe00ff8) == 0x0c400000) {
/* Internal Accumulator Access Format */
rdhi = (insn >> 16) & 0xf;
rdlo = (insn >> 12) & 0xf;
acc = insn & 7;
if (acc != 0)
return 1;
if (insn & ARM_CP_RW_BIT)