blob: c2ce03578ad60501f9444286e3ee6f2201865fde [file] [log] [blame]
/*
* This file was generated automatically by gen-mterp.py for 'x86'.
*
* --> DO NOT EDIT <--
*/
/* File: x86/header.S */
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* 32-bit x86 definitions and declarations.
*/
/*
386 ABI general notes:
Caller save set:
eax, edx, ecx, st(0)-st(7)
Callee save set:
ebx, esi, edi, ebp
Return regs:
32-bit in eax
64-bit in edx:eax (low-order 32 in eax)
fp on top of fp stack st(0)
Parameters passed on stack, pushed right-to-left. On entry to target, first
parm is at 4(%esp). Traditional entry code is:
functEntry:
push %ebp # save old frame pointer
mov %ebp,%esp # establish new frame pointer
sub FrameSize,%esp # Allocate storage for spill, locals & outs
Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
Alignment of stack not strictly required, but should be for performance. We'll
align frame sizes to 16-byte multiples.
If we're not doing variable stack allocation (alloca), the frame pointer can be
eliminated and all arg references adjusted to be esp relative.
Mterp notes:
Some key interpreter variables will be assigned to registers. Note that each
will also have an associated spill location (mostly used useful for those assigned
to callee save registers).
nick reg purpose
rPC edi interpreted program counter, used for fetching instructions
rFP esi interpreted frame pointer, used for accessing locals and args
rINSTw bx first 16-bit code of current instruction
rINSTbl bl opcode portion of instruction word
rINSTbh bh high byte of inst word, usually contains src/tgt reg names
Notes:
o High order 16 bits of ebx must be zero on entry to handler
o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
o eax, edx and ecx are scratch, rINSTw/ebx sometimes scratch
o rPC is in the caller save set, and will be killed across external calls. Don't
forget to SPILL/UNSPILL it around call points
*/
#define rGLUE (%ebp)
#define rPC %esi
#define rFP %edi
#define rINST %ebx
#define rINSTw %bx
#define rINSTbh %bh
#define rINSTbl %bl
/* Frame diagram while executing dvmMterpStdRun, high to low addresses */
#define IN_ARG0 ( 12)
#define CALLER_RP ( 8)
#define PREV_FP ( 4)
#define rGLUE_SPILL ( 0) /* <- dvmMterpStdRun ebp */
/* Spill offsets relative to %ebp */
#define EDI_SPILL ( -4)
#define ESI_SPILL ( -8)
#define EBX_SPILL (-12) /* <- esp following dmMterpStdRun header */
#define rPC_SPILL (-16)
#define rFP_SPILL (-20)
#define rINST_SPILL (-24)
#define TMP_SPILL1 (-28)
#define TMP_SPILL2 (-32)
#define TMP_SPILL3 (-36)
#define LOCAL0_OFFSET (-40)
#define LOCAL1_OFFSET (-44)
#define LOCAL2_OFFSET (-48)
#define LOCAL3_OFFSET (-52)
/* Out Arg offsets, relative to %sp */
#define OUT_ARG4 ( 16)
#define OUT_ARG3 ( 12)
#define OUT_ARG2 ( 8)
#define OUT_ARG1 ( 4)
#define OUT_ARG0 ( 0) /* <- dvmMterpStdRun esp */
#define FRAME_SIZE 80
#define SPILL(reg) movl reg##,reg##_SPILL(%ebp)
#define UNSPILL(reg) movl reg##_SPILL(%ebp),reg
#define SPILL_TMP1(reg) movl reg,TMP_SPILL1(%ebp)
#define UNSPILL_TMP1(reg) movl TMP_SPILL1(%ebp),reg
#define SPILL_TMP2(reg) movl reg,TMP_SPILL2(%ebp)
#define UNSPILL_TMP2(reg) movl TMP_SPILL2(%ebp),reg
#define SPILL_TMP3(reg) movl reg,TMP_SPILL3(%ebp)
#define UNSPILL_TMP3(reg) movl TMP_SPILL3(%ebp),reg
/* save/restore the PC and/or FP from the glue struct */
.macro SAVE_PC_FP_TO_GLUE _reg
movl rGLUE,\_reg
movl rPC,offGlue_pc(\_reg)
movl rFP,offGlue_fp(\_reg)
.endm
.macro LOAD_PC_FP_FROM_GLUE
movl rGLUE,rFP
movl offGlue_pc(rFP),rPC
movl offGlue_fp(rFP),rFP
.endm
/* The interpreter assumes a properly aligned stack on entry, and
* will preserve 16-byte alignment.
*/
/*
* "export" the PC to the interpreted stack frame, f/b/o future exception
* objects. Must * be done *before* something calls dvmThrowException.
*
* In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
* fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
*
* It's okay to do this more than once.
*/
.macro EXPORT_PC
movl rPC, (-sizeofStackSaveArea + offStackSaveArea_currentPc)(rFP)
.endm
/*
* Given a frame pointer, find the stack save area.
*
* In C this is "((StackSaveArea*)(_fp) -1)".
*/
.macro SAVEAREA_FROM_FP _reg
leal -sizeofStackSaveArea(rFP), \_reg
.endm
/*
* Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
*/
.macro FETCH_INST
movzwl (rPC),rINST
.endm
/*
* Fetch the opcode byte and zero-extend it into _reg. Must be used
* in conjunction with GOTO_NEXT_R
*/
.macro FETCH_INST_R _reg
movzbl (rPC),\_reg
.endm
/*
* Fetch the opcode byte at _count words offset from rPC and zero-extend
* it into _reg. Must be used in conjunction with GOTO_NEXT_R
*/
.macro FETCH_INST_OPCODE _count _reg
movzbl \_count*2(rPC),\_reg
.endm
/*
* Fetch the nth instruction word from rPC into rINSTw. Does not advance
* rPC, and _count is in words
*/
.macro FETCH_INST_WORD _count
movzwl \_count*2(rPC),rINST
.endm
/*
* Fetch instruction word indexed (used for branching).
* Index is in instruction word units.
*/
.macro FETCH_INST_INDEXED _reg
movzwl (rPC,\_reg,2),rINST
.endm
/*
* Advance rPC by instruction count
*/
.macro ADVANCE_PC _count
leal 2*\_count(rPC),rPC
.endm
/*
* Advance rPC by branch offset in register
*/
.macro ADVANCE_PC_INDEXED _reg
leal (rPC,\_reg,2),rPC
.endm
.macro GOTO_NEXT
movzx rINSTbl,%eax
movzbl rINSTbh,rINST
jmp *dvmAsmInstructionJmpTable(,%eax,4)
.endm
/*
* Version of GOTO_NEXT that assumes _reg preloaded with opcode.
* Should be paired with FETCH_INST_R
*/
.macro GOTO_NEXT_R _reg
movzbl 1(rPC),rINST
jmp *dvmAsmInstructionJmpTable(,\_reg,4)
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
.macro GET_VREG_R _reg _vreg
movl (rFP,\_vreg,4),\_reg
.endm
.macro SET_VREG _reg _vreg
movl \_reg,(rFP,\_vreg,4)
.endm
.macro GET_VREG_WORD _reg _vreg _offset
movl 4*(\_offset)(rFP,\_vreg,4),\_reg
.endm
.macro SET_VREG_WORD _reg _vreg _offset
movl \_reg,4*(\_offset)(rFP,\_vreg,4)
.endm
#if 1
#define rFinish %edx
/* Macros for x86-atom handlers */
/*
* Get the 32-bit value from a dalvik register.
*/
.macro GET_VREG _vreg
movl (rFP,\_vreg, 4), \_vreg
.endm
/*
* Fetch the next instruction from the specified offset. Advances rPC
* to point to the next instruction. "_count" is in 16-bit code units.
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC())
*/
.macro FETCH_ADVANCE_INST _count
add $(\_count*2), rPC
movzwl (rPC), rINST
.endm
/*
* Fetch the next instruction from an offset specified by _reg. Updates
* rPC to point to the next instruction. "_reg" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*/
.macro FETCH_ADVANCE_INST_RB _reg
addl \_reg, rPC
movzwl (rPC), rINST
.endm
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
* For example, given instruction of format: AA|op BBBB, it
* fetches BBBB.
*/
.macro FETCH _count _reg
movzwl (\_count*2)(rPC), \_reg
.endm
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
* This variant treats the value as signed.
*/
.macro FETCHs _count _reg
movswl (\_count*2)(rPC), \_reg
.endm
/*
* Fetch the first byte from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
* For example, given instruction of format: AA|op CC|BB, it
* fetches BB.
*/
.macro FETCH_BB _count _reg
movzbl (\_count*2)(rPC), \_reg
.endm
/*
* Fetch the second byte from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
* For example, given instruction of format: AA|op CC|BB, it
* fetches CC.
*/
.macro FETCH_CC _count _reg
movzbl (\_count*2 + 1)(rPC), \_reg
.endm
/*
* Fetch the second byte from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
* This variant treats the value as signed.
*/
.macro FETCH_CCs _count _reg
movsbl (\_count*2 + 1)(rPC), \_reg
.endm
/*
* Fetch one byte from an offset past the current PC. Pass in the same
* "_count" as you would for FETCH, and an additional 0/1 indicating which
* byte of the halfword you want (lo/hi).
*/
.macro FETCH_B _reg _count _byte
movzbl (\_count*2+\_byte)(rPC), \_reg
.endm
/*
* Put the instruction's opcode field into the specified register.
*/
.macro GET_INST_OPCODE _reg
movzbl rINSTbl, \_reg
.endm
/*
* Begin executing the opcode in _reg.
*/
.macro GOTO_OPCODE _reg
shl $6, \_reg
addl $dvmAsmInstructionStart,\_reg
jmp *\_reg
.endm
/*
* Macros pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
* by using a jump table. _rFinish should must be the same register for
* both macros.
*/
.macro FFETCH _rFinish
movzbl (rPC), \_rFinish
.endm
.macro FGETOP_JMPa _rFinish
movzbl 1(rPC), rINST
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
/*
* Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
* by using a jump table. _rFinish and _count should must be the same register for
* both macros.
*/
.macro FFETCH_ADV _count _rFinish
movzbl (\_count*2)(rPC), \_rFinish
.endm
.macro FGETOP_JMP _count _rFinish
movzbl (\_count*2 + 1)(rPC), rINST
addl $(\_count*2), rPC
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
.macro FGETOP_JMP2 _rFinish
movzbl 1(rPC), rINST
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
.macro OLD_JMP_1 _count _rFinish
movzbl (\_count*2)(rPC), \_rFinish
shl $6, \_rFinish
.endm
.macro OLD_JMP_2 _rFinish
addl $dvmAsmInstructionStart,\_rFinish
.endm
.macro OLD_JMP_3 _count
addl $(\_count*2), rPC
.endm
.macro OLD_JMP_4 _rFinish
movzbl 1(rPC), rINST
jmp *\_rFinish
.endm
.macro OLD_JMP_A_1 _reg _rFinish
movzbl (rPC, \_reg), \_rFinish
shl $6, \_rFinish
.endm
.macro OLD_JMP_A_2 _rFinish
addl $dvmAsmInstructionStart,\_rFinish
.endm
.macro OLD_JMP_A_3 _reg _rFinish
addl \_reg, rPC
movzbl 1(rPC, \_reg), rINST
jmp *\_rFinish
.endm
/*
* Macro pair attempts to speed up FETCH_INST, GET_INST_OPCODE and GOTO_OPCODE
* by using a jump table. _rFinish and _reg should must be the same register for
* both macros.
*/
.macro FFETCH_ADV_RB _reg _rFinish
movzbl (\_reg, rPC), \_rFinish
.endm
.macro FGETOP_RB_JMP _reg _rFinish
movzbl 1(\_reg, rPC), rINST
addl \_reg, rPC
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
/*
* Attempts to speed up FETCH_INST, GET_INST_OPCODE using
* a jump table. This macro should be called before FINISH_JMP where
* rFinish should be the same register containing the opcode value.
* This is an attempt to split up FINISH in order to reduce or remove
* potential stalls due to the wait for rFINISH.
*/
.macro FINISH_FETCH _rFinish
movzbl (rPC), \_rFinish
movzbl 1(rPC), rINST
.endm
/*
* Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE using
* a jump table. This macro should be called before FINISH_JMP where
* rFinish should be the same register containing the opcode value.
* This is an attempt to split up FINISH in order to reduce or remove
* potential stalls due to the wait for rFINISH.
*/
.macro FINISH_FETCH_ADVANCE _count _rFinish
movzbl (\_count*2)(rPC), \_rFinish
movzbl (\_count*2 + 1)(rPC), rINST
addl $(\_count*2), rPC
.endm
/*
* Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE using
* a jump table. This macro should be called before FINISH_JMP where
* rFinish should be the same register containing the opcode value.
* This is an attempt to split up FINISH in order to reduce or remove
* potential stalls due to the wait for rFINISH.
*/
.macro FINISH_FETCH_ADVANCE_RB _reg _rFinish
movzbl (\_reg, rPC), \_rFinish
movzbl 1(\_reg, rPC), rINST
addl \_reg, rPC
.endm
/*
* Attempts to speed up GOTO_OPCODE using a jump table. This macro should
* be called after a FINISH_FETCH* instruction where rFinish should be the
* same register containing the opcode value. This is an attempt to split up
* FINISH in order to reduce or remove potential stalls due to the wait for rFINISH.
*/
.macro FINISH_JMP _rFinish
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
/*
* Attempts to speed up FETCH_INST, GET_INST_OPCODE, GOTO_OPCODE by using
* a jump table. Uses a single macro - but it should be faster if we
* split up the fetch for rFinish and the jump using rFinish.
*/
.macro FINISH_A
movzbl (rPC), rFinish
movzbl 1(rPC), rINST
jmp *dvmAsmInstructionJmpTable(,rFinish, 4)
.endm
/*
* Attempts to speed up FETCH_ADVANCE_INST, GET_INST_OPCODE,
* GOTO_OPCODE by using a jump table. Uses a single macro -
* but it should be faster if we split up the fetch for rFinish
* and the jump using rFinish.
*/
.macro FINISH _count
movzbl (\_count*2)(rPC), rFinish
movzbl (\_count*2 + 1)(rPC), rINST
addl $(\_count*2), rPC
jmp *dvmAsmInstructionJmpTable(,rFinish, 4)
.endm
/*
* Attempts to speed up FETCH_ADVANCE_INST_RB, GET_INST_OPCODE,
* GOTO_OPCODE by using a jump table. Uses a single macro -
* but it should be faster if we split up the fetch for rFinish
* and the jump using rFinish.
*/
.macro FINISH_RB _reg _rFinish
movzbl (\_reg, rPC), \_rFinish
movzbl 1(\_reg, rPC), rINST
addl \_reg, rPC
jmp *dvmAsmInstructionJmpTable(,\_rFinish, 4)
.endm
#define sReg0 LOCAL0_OFFSET(%ebp)
#define sReg1 LOCAL1_OFFSET(%ebp)
#define sReg2 LOCAL2_OFFSET(%ebp)
#define sReg3 LOCAL3_OFFSET(%ebp)
/*
* Hard coded helper values.
*/
.balign 16
.LdoubNeg:
.quad 0x8000000000000000
.L64bits:
.quad 0xFFFFFFFFFFFFFFFF
.LshiftMask2:
.quad 0x0000000000000000
.LshiftMask:
.quad 0x000000000000003F
.Lvalue64:
.quad 0x0000000000000040
.LvaluePosInfLong:
.quad 0x7FFFFFFFFFFFFFFF
.LvalueNegInfLong:
.quad 0x8000000000000000
.LvalueNanLong:
.quad 0x0000000000000000
.LintMin:
.long 0x80000000
.LintMax:
.long 0x7FFFFFFF
#endif
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "../common/asm-constants.h"
.global dvmAsmInstructionStart
.type dvmAsmInstructionStart, %function
dvmAsmInstructionStart = .L_OP_NOP
.text
/* ------------------------------ */
.balign 64
.L_OP_NOP: /* 0x00 */
/* File: x86/OP_NOP.S */
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE: /* 0x01 */
/* File: x86/OP_MOVE.S */
/* for move, move-object, long-to-int */
/* op vA, vB */
movzbl rINSTbl,%eax # eax<- BA
andb $0xf,%al # eax<- A
shrl $4,rINST # rINST<- B
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
SET_VREG %ecx %eax # fp[A]<-fp[B]
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_FROM16: /* 0x02 */
/* File: x86/OP_MOVE_FROM16.S */
/* for: move/from16, move-object/from16 */
/* op vAA, vBBBB */
movzx rINSTbl,%eax # eax <= AA
movw 2(rPC),rINSTw # rINSTw <= BBBB
GET_VREG_R %ecx rINST # ecx<- fp[BBBB]
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %ecx %eax # fp[AA]<- ecx]
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_16: /* 0x03 */
/* File: x86/OP_MOVE_16.S */
/* for: move/16, move-object/16 */
/* op vAAAA, vBBBB */
movzwl 4(rPC),%ecx # ecx<- BBBB
movzwl 2(rPC),%eax # eax<- AAAA
GET_VREG_R %ecx %ecx
FETCH_INST_OPCODE 3 %edx
ADVANCE_PC 3
SET_VREG %ecx %eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_WIDE: /* 0x04 */
/* File: x86/OP_MOVE_WIDE.S */
/* move-wide vA, vB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzbl rINSTbl,%ecx # ecx <- BA
sarl $4,rINST # rINST<- B
GET_VREG_WORD %eax rINST 0 # eax<- v[B+0]
GET_VREG_WORD rINST rINST 1 # rINST<- v[B+1]
andb $0xf,%cl # ecx <- A
FETCH_INST_OPCODE 1 %edx
SET_VREG_WORD rINST %ecx 1 # v[A+1]<- rINST
ADVANCE_PC 1
SET_VREG_WORD %eax %ecx 0 # v[A+0]<- eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
/* File: x86/OP_MOVE_WIDE_FROM16.S */
/* move-wide/from16 vAA, vBBBB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 2(rPC),%ecx # ecx<- BBBB
movzbl rINSTbl,%eax # eax<- AAAA
GET_VREG_WORD rINST %ecx 0 # rINST<- v[BBBB+0]
GET_VREG_WORD %ecx %ecx 1 # ecx<- v[BBBB+1]
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG_WORD rINST %eax 0 # v[AAAA+0]<- rINST
SET_VREG_WORD %ecx %eax 1 # v[AAAA+1]<- eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_WIDE_16: /* 0x06 */
/* File: x86/OP_MOVE_WIDE_16.S */
/* move-wide/16 vAAAA, vBBBB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 4(rPC),%ecx # ecx<- BBBB
movzwl 2(rPC),%eax # eax<- AAAA
GET_VREG_WORD rINST %ecx 0 # rINSTw_WORD<- v[BBBB+0]
GET_VREG_WORD %ecx %ecx 1 # ecx<- v[BBBB+1]
FETCH_INST_OPCODE 3 %edx
SET_VREG_WORD rINST %eax 0 # v[AAAA+0]<- rINST
ADVANCE_PC 3
SET_VREG_WORD %ecx %eax 1 # v[AAAA+1]<- ecx
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_OBJECT: /* 0x07 */
/* File: x86/OP_MOVE_OBJECT.S */
/* File: x86/OP_MOVE.S */
/* for move, move-object, long-to-int */
/* op vA, vB */
movzbl rINSTbl,%eax # eax<- BA
andb $0xf,%al # eax<- A
shrl $4,rINST # rINST<- B
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
SET_VREG %ecx %eax # fp[A]<-fp[B]
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
/* File: x86/OP_MOVE_OBJECT_FROM16.S */
/* File: x86/OP_MOVE_FROM16.S */
/* for: move/from16, move-object/from16 */
/* op vAA, vBBBB */
movzx rINSTbl,%eax # eax <= AA
movw 2(rPC),rINSTw # rINSTw <= BBBB
GET_VREG_R %ecx rINST # ecx<- fp[BBBB]
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %ecx %eax # fp[AA]<- ecx]
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_OBJECT_16: /* 0x09 */
/* File: x86/OP_MOVE_OBJECT_16.S */
/* File: x86/OP_MOVE_16.S */
/* for: move/16, move-object/16 */
/* op vAAAA, vBBBB */
movzwl 4(rPC),%ecx # ecx<- BBBB
movzwl 2(rPC),%eax # eax<- AAAA
GET_VREG_R %ecx %ecx
FETCH_INST_OPCODE 3 %edx
ADVANCE_PC 3
SET_VREG %ecx %eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_RESULT: /* 0x0a */
/* File: x86/OP_MOVE_RESULT.S */
/* for: move-result, move-result-object */
/* op vAA */
movl rGLUE,%eax # eax<- rGLUE
movzx rINSTbl,%ecx # ecx<- AA
movl offGlue_retval(%eax),%eax # eax<- glue->retval.l
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
SET_VREG %eax %ecx # fp[AA]<- retval.l
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
/* File: x86/OP_MOVE_RESULT_WIDE.S */
/* move-result-wide vAA */
movl rGLUE,%ecx
movl offGlue_retval(%ecx),%eax
movl 4+offGlue_retval(%ecx),%ecx
FETCH_INST_OPCODE 1 %edx
SET_VREG_WORD %eax rINST 0 # v[AA+0] <- eax
SET_VREG_WORD %ecx rINST 1 # v[AA+1] <- ecx
ADVANCE_PC 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
/* File: x86/OP_MOVE_RESULT_OBJECT.S */
/* File: x86/OP_MOVE_RESULT.S */
/* for: move-result, move-result-object */
/* op vAA */
movl rGLUE,%eax # eax<- rGLUE
movzx rINSTbl,%ecx # ecx<- AA
movl offGlue_retval(%eax),%eax # eax<- glue->retval.l
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
SET_VREG %eax %ecx # fp[AA]<- retval.l
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MOVE_EXCEPTION: /* 0x0d */
/* File: x86/OP_MOVE_EXCEPTION.S */
/* move-exception vAA */
movl rGLUE,%ecx
movl offGlue_self(%ecx),%ecx # ecx<- glue->self
movl offThread_exception(%ecx),%eax # eax<- dvmGetException bypass
SET_VREG %eax rINST # fp[AA]<- exception object
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
movl $0,offThread_exception(%ecx) # dvmClearException bypass
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_RETURN_VOID: /* 0x0e */
/* File: x86/OP_RETURN_VOID.S */
jmp common_returnFromMethod
/* ------------------------------ */
.balign 64
.L_OP_RETURN: /* 0x0f */
/* File: x86/OP_RETURN.S */
/*
* Return a 32-bit value. Copies the return value into the "glue"
* structure, then jumps to the return handler.
*
* for: return, return-object
*/
/* op vAA */
movl rGLUE,%ecx
GET_VREG_R %eax rINST # eax<- vAA
movl %eax,offGlue_retval(%ecx) # retval.i <- AA
jmp common_returnFromMethod
/* ------------------------------ */
.balign 64
.L_OP_RETURN_WIDE: /* 0x10 */
/* File: x86/OP_RETURN_WIDE.S */
/*
* Return a 64-bit value. Copies the return value into the "glue"
* structure, then jumps to the return handler.
*/
/* return-wide vAA */
movl rGLUE,%ecx
GET_VREG_WORD %eax rINST 0 # eax<- v[AA+0]
GET_VREG_WORD rINST rINST 1 # rINST<- v[AA+1]
movl %eax,offGlue_retval(%ecx)
movl rINST,4+offGlue_retval(%ecx)
jmp common_returnFromMethod
/* ------------------------------ */
.balign 64
.L_OP_RETURN_OBJECT: /* 0x11 */
/* File: x86/OP_RETURN_OBJECT.S */
/* File: x86/OP_RETURN.S */
/*
* Return a 32-bit value. Copies the return value into the "glue"
* structure, then jumps to the return handler.
*
* for: return, return-object
*/
/* op vAA */
movl rGLUE,%ecx
GET_VREG_R %eax rINST # eax<- vAA
movl %eax,offGlue_retval(%ecx) # retval.i <- AA
jmp common_returnFromMethod
/* ------------------------------ */
.balign 64
.L_OP_CONST_4: /* 0x12 */
/* File: x86/OP_CONST_4.S */
/* const/4 vA, #+B */
movsx rINSTbl,%eax # eax<-ssssssBx
movl $0xf,%ecx
andl %eax,%ecx # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
sarl $4,%eax
SET_VREG %eax %ecx
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_16: /* 0x13 */
/* File: x86/OP_CONST_16.S */
/* const/16 vAA, #+BBBB */
movswl 2(rPC),%ecx # ecx<- ssssBBBB
movl rINST,%eax # eax<- AA
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %ecx %eax # vAA<- ssssBBBB
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST: /* 0x14 */
/* File: x86/OP_CONST.S */
/* const vAA, #+BBBBbbbb */
movl 2(rPC),%eax # grab all 32 bits at once
movl rINST,%ecx # ecx<- AA
FETCH_INST_OPCODE 3 %edx
ADVANCE_PC 3
SET_VREG %eax %ecx # vAA<- eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_HIGH16: /* 0x15 */
/* File: x86/OP_CONST_HIGH16.S */
/* const/high16 vAA, #+BBBB0000 */
movzwl 2(rPC),%eax # eax<- 0000BBBB
movl rINST,%ecx # ecx<- AA
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
sall $16,%eax # eax<- BBBB0000
SET_VREG %eax %ecx # vAA<- eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_WIDE_16: /* 0x16 */
/* File: x86/OP_CONST_WIDE_16.S */
/* const-wide/16 vAA, #+BBBB */
movswl 2(rPC),%eax # eax<- ssssBBBB
cltd # rPC:eax<- ssssssssssssBBBB
SET_VREG_WORD %edx rINST 1 # store msw
FETCH_INST_OPCODE 2 %edx
SET_VREG_WORD %eax rINST 0 # store lsw
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_WIDE_32: /* 0x17 */
/* File: x86/OP_CONST_WIDE_32.S */
/* const-wide/32 vAA, #+BBBBbbbb */
movl 2(rPC),%eax # eax<- BBBBbbbb
cltd # rPC:eax<- ssssssssssssBBBB
SET_VREG_WORD %edx rINST,1 # store msw
FETCH_INST_OPCODE 3 %edx
SET_VREG_WORD %eax rINST 0 # store lsw
ADVANCE_PC 3
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_WIDE: /* 0x18 */
/* File: x86/OP_CONST_WIDE.S */
/* const-wide vAA, #+HHHHhhhhBBBBbbbb */
movl 2(rPC),%eax # eax<- lsw
movzbl rINSTbl,%ecx # ecx<- AA
movl 6(rPC),rINST # rINST<- msw
leal (rFP,%ecx,4),%ecx # dst addr
movl rINST,4(%ecx)
FETCH_INST_OPCODE 5 %edx
movl %eax,(%ecx)
ADVANCE_PC 5
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
/* File: x86/OP_CONST_WIDE_HIGH16.S */
/* const-wide/high16 vAA, #+BBBB000000000000 */
movzwl 2(rPC),%eax # eax<- 0000BBBB
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
sall $16,%eax # eax<- BBBB0000
SET_VREG_WORD %eax rINST 1 # v[AA+1]<- eax
xorl %eax,%eax
SET_VREG_WORD %eax rINST 0 # v[AA+0]<- eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_STRING: /* 0x1a */
/* File: x86/OP_CONST_STRING.S */
/* const/string vAA, String@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
movl offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
movl (%ecx,%eax,4),%eax # eax<- rResString[BBBB]
movl rINST,%ecx
FETCH_INST_OPCODE 2 %edx
testl %eax,%eax # resolved yet?
je .LOP_CONST_STRING_resolve
SET_VREG %eax %ecx # vAA<- rResString[BBBB]
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_STRING_JUMBO: /* 0x1b */
/* File: x86/OP_CONST_STRING_JUMBO.S */
/* const/string vAA, String@BBBBBBBB */
movl rGLUE,%ecx
movl 2(rPC),%eax # eax<- BBBBBBBB
movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
movl offDvmDex_pResStrings(%ecx),%ecx # ecx<- dvmDex->pResStrings
movl (%ecx,%eax,4),%eax # eax<- rResString[BBBB]
movl rINST,%ecx
FETCH_INST_OPCODE 3 %edx
testl %eax,%eax # resolved yet?
je .LOP_CONST_STRING_JUMBO_resolve
SET_VREG %eax %ecx # vAA<- rResString[BBBB]
ADVANCE_PC 3
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CONST_CLASS: /* 0x1c */
/* File: x86/OP_CONST_CLASS.S */
/* const/class vAA, Class@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx# ecx<- glue->methodClassDex
movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- dvmDex->pResClasses
movl (%ecx,%eax,4),%eax # eax<- rResClasses[BBBB]
movl rINST,%ecx
FETCH_INST_OPCODE 2 %edx
testl %eax,%eax # resolved yet?
je .LOP_CONST_CLASS_resolve
SET_VREG %eax %ecx # vAA<- rResClasses[BBBB]
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_MONITOR_ENTER: /* 0x1d */
/* File: x86/OP_MONITOR_ENTER.S */
/*
* Synchronize on an object.
*/
/* monitor-enter vAA */
movl rGLUE,%ecx
GET_VREG_R %eax rINST # eax<- vAA
movl offGlue_self(%ecx),%ecx # ecx<- glue->self
FETCH_INST_WORD 1
testl %eax,%eax # null object?
EXPORT_PC # need for precise GC, MONITOR_TRACKING
jne .LOP_MONITOR_ENTER_continue
jmp common_errNullObject
/* ------------------------------ */
.balign 64
.L_OP_MONITOR_EXIT: /* 0x1e */
/* File: x86/OP_MONITOR_EXIT.S */
/*
* Unlock an object.
*
* Exceptions that occur when unlocking a monitor need to appear as
* if they happened at the following instruction. See the Dalvik
* instruction spec.
*/
/* monitor-exit vAA */
GET_VREG_R %eax rINST
movl rGLUE,%ecx
EXPORT_PC
testl %eax,%eax # null object?
je .LOP_MONITOR_EXIT_errNullObject # go if so
movl offGlue_self(%ecx),%ecx # ecx<- glue->self
movl %eax,OUT_ARG1(%esp)
movl %ecx,OUT_ARG0(%esp)
jmp .LOP_MONITOR_EXIT_continue
/* ------------------------------ */
.balign 64
.L_OP_CHECK_CAST: /* 0x1f */
/* File: x86/OP_CHECK_CAST.S */
/*
* Check to see if a cast from one class to another is allowed.
*/
/* check-cast vAA, class@BBBB */
movl rGLUE,%ecx
GET_VREG_R rINST,rINST # rINST<- vAA (object)
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
testl rINST,rINST # is oject null?
movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
je .LOP_CHECK_CAST_okay # null obj, cast always succeeds
movl (%ecx,%eax,4),%eax # eax<- resolved class
movl offObject_clazz(rINST),%ecx # ecx<- obj->clazz
testl %eax,%eax # have we resolved this before?
je .LOP_CHECK_CAST_resolve # no, go do it now
.LOP_CHECK_CAST_resolved:
cmpl %eax,%ecx # same class (trivial success)?
jne .LOP_CHECK_CAST_fullcheck # no, do full check
.LOP_CHECK_CAST_okay:
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_INSTANCE_OF: /* 0x20 */
/* File: x86/OP_INSTANCE_OF.S */
/*
* Check to see if an object reference is an instance of a class.
*
* Most common situation is a non-null object, being compared against
* an already-resolved class.
*/
/* instance-of vA, vB, class@CCCC */
movl rINST,%eax # eax<- BA
sarl $4,%eax # eax<- B
GET_VREG_R %eax %eax # eax<- vB (obj)
movl rGLUE,%ecx
testl %eax,%eax # object null?
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
je .LOP_INSTANCE_OF_store # null obj, not instance, store it
movzwl 2(rPC),%edx # edx<- CCCC
movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
movl (%ecx,%edx,4),%ecx # ecx<- resolved class
movl offObject_clazz(%eax),%eax # eax<- obj->clazz
testl %ecx,%ecx # have we resolved this before?
je .LOP_INSTANCE_OF_resolve # not resolved, do it now
.LOP_INSTANCE_OF_resolved: # eax<- obj->clazz, ecx<- resolved class
cmpl %eax,%ecx # same class (trivial success)?
je .LOP_INSTANCE_OF_trivial # yes, trivial finish
jmp .LOP_INSTANCE_OF_fullcheck # no, do full check
/* ------------------------------ */
.balign 64
.L_OP_ARRAY_LENGTH: /* 0x21 */
/* File: x86/OP_ARRAY_LENGTH.S */
/*
* Return the length of an array.
*/
mov rINST,%eax # eax<- BA
sarl $4,rINST # rINST<- B
GET_VREG_R %ecx rINST # ecx<- vB (object ref)
andb $0xf,%al # eax<- A
testl %ecx,%ecx # is null?
je common_errNullObject
FETCH_INST_OPCODE 1 %edx
movl offArrayObject_length(%ecx),%ecx
ADVANCE_PC 1
SET_VREG %ecx %eax
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NEW_INSTANCE: /* 0x22 */
/* File: x86/OP_NEW_INSTANCE.S */
/*
* Create a new instance of a class.
*/
/* new-instance vAA, class@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
EXPORT_PC
movl (%ecx,%eax,4),%ecx # ecx<- resolved class
testl %ecx,%ecx # resolved?
je .LOP_NEW_INSTANCE_resolve # no, go do it
.LOP_NEW_INSTANCE_resolved: # on entry, ecx<- class
cmpb $CLASS_INITIALIZED,offClassObject_status(%ecx)
je .LOP_NEW_INSTANCE_initialized
jmp .LOP_NEW_INSTANCE_needinit
/* ------------------------------ */
.balign 64
.L_OP_NEW_ARRAY: /* 0x23 */
/* File: x86/OP_NEW_ARRAY.S */
/*
* Allocate an array of objects, specified with the array class
* and a count.
*
* The verifier guarantees that this is an array class, so we don't
* check for it here.
*/
/* new-array vA, vB, class@CCCC */
movl rGLUE,%ecx
EXPORT_PC
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
movzwl 2(rPC),%eax # eax<- CCCC
movl offDvmDex_pResClasses(%ecx),%ecx # ecx<- pDvmDex->pResClasses
movl (%ecx,%eax,4),%ecx # ecx<- resolved class
movzbl rINSTbl,%eax
sarl $4,%eax # eax<- B
GET_VREG_R %eax %eax # eax<- vB (array length)
andb $0xf,rINSTbl # rINST<- A
testl %eax,%eax
js common_errNegativeArraySize # bail
testl %ecx,%ecx # already resolved?
jne .LOP_NEW_ARRAY_finish # yes, fast path
jmp .LOP_NEW_ARRAY_resolve # resolve now
/* ------------------------------ */
.balign 64
.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
/* File: x86/OP_FILLED_NEW_ARRAY.S */
/*
* Create a new array with elements filled from registers.
*
* for: filled-new-array, filled-new-array/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
movl rGLUE,%eax
movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
movzwl 2(rPC),%ecx # ecx<- BBBB
movl offDvmDex_pResClasses(%eax),%eax # eax<- pDvmDex->pResClasses
movl (%eax,%ecx,4),%eax # eax<- resolved class
EXPORT_PC
testl %eax,%eax # already resolved?
jne .LOP_FILLED_NEW_ARRAY_continue # yes, continue
# less frequent path, so we'll redo some work
movl rGLUE,%eax
movl $0,OUT_ARG2(%esp) # arg2<- false
movl %ecx,OUT_ARG1(%esp) # arg1<- BBBB
movl offGlue_method(%eax),%eax # eax<- glue->method
jmp .LOP_FILLED_NEW_ARRAY_more
/* ------------------------------ */
.balign 64
.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
/* File: x86/OP_FILLED_NEW_ARRAY_RANGE.S */
/* File: x86/OP_FILLED_NEW_ARRAY.S */
/*
* Create a new array with elements filled from registers.
*
* for: filled-new-array, filled-new-array/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
movl rGLUE,%eax
movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
movzwl 2(rPC),%ecx # ecx<- BBBB
movl offDvmDex_pResClasses(%eax),%eax # eax<- pDvmDex->pResClasses
movl (%eax,%ecx,4),%eax # eax<- resolved class
EXPORT_PC
testl %eax,%eax # already resolved?
jne .LOP_FILLED_NEW_ARRAY_RANGE_continue # yes, continue
# less frequent path, so we'll redo some work
movl rGLUE,%eax
movl $0,OUT_ARG2(%esp) # arg2<- false
movl %ecx,OUT_ARG1(%esp) # arg1<- BBBB
movl offGlue_method(%eax),%eax # eax<- glue->method
jmp .LOP_FILLED_NEW_ARRAY_RANGE_more
/* ------------------------------ */
.balign 64
.L_OP_FILL_ARRAY_DATA: /* 0x26 */
/* File: x86/OP_FILL_ARRAY_DATA.S */
/* fill-array-data vAA, +BBBBBBBB */
movl 2(rPC),%ecx # ecx<- BBBBbbbb
leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
GET_VREG_R %eax rINST
EXPORT_PC
movl %eax,OUT_ARG0(%esp)
movl %ecx,OUT_ARG1(%esp)
call dvmInterpHandleFillArrayData
FETCH_INST_OPCODE 3 %edx
testl %eax,%eax # exception thrown?
je common_exceptionThrown
ADVANCE_PC 3
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_THROW: /* 0x27 */
/* File: x86/OP_THROW.S */
/*
* Throw an exception object in the current thread.
*/
/* throw vAA */
movl rGLUE,%ecx
EXPORT_PC
GET_VREG_R %eax rINST # eax<- exception object
movl offGlue_self(%ecx),%ecx # ecx<- glue->self
testl %eax,%eax # null object?
je common_errNullObject
movl %eax,offThread_exception(%ecx) # thread->exception<- obj
jmp common_exceptionThrown
/* ------------------------------ */
.balign 64
.L_OP_GOTO: /* 0x28 */
/* File: x86/OP_GOTO.S */
/*
* Unconditional branch, 8-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto +AA */
movsbl rINSTbl,rINST # ebx<- ssssssAA
testl rINST,rINST # test for <0
js common_backwardBranch
movl rINST,%eax
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_GOTO_16: /* 0x29 */
/* File: x86/OP_GOTO_16.S */
/*
* Unconditional branch, 16-bit offset.
*
* The branch distance is a signed code-unit offset
*/
/* goto/16 +AAAA */
movswl 2(rPC),rINST # rINST<- ssssAAAA
testl rINST,rINST # test for <0
js common_backwardBranch
movl rINST,%eax
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_GOTO_32: /* 0x2a */
/* File: x86/OP_GOTO_32.S */
/*
* Unconditional branch, 32-bit offset.
*
* The branch distance is a signed code-unit offset.
*
* Unlike most opcodes, this one is allowed to branch to itself, so
* our "backward branch" test must be "<=0" instead of "<0".
*/
/* goto/32 AAAAAAAA */
movl 2(rPC),rINST # rINST<- AAAAAAAA
cmpl $0,rINST # test for <= 0
jle common_backwardBranch
movl rINST,%eax
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_PACKED_SWITCH: /* 0x2b */
/* File: x86/OP_PACKED_SWITCH.S */
/*
* Handle a packed-switch or sparse-switch instruction. In both cases
* we decode it and hand it off to a helper function.
*
* We don't really expect backward branches in a switch statement, but
* they're perfectly legal, so we check for them here.
*
* for: packed-switch, sparse-switch
*/
/* op vAA, +BBBB */
movl 2(rPC),%ecx # ecx<- BBBBbbbb
GET_VREG_R %eax rINST # eax<- vAA
leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
movl %eax,OUT_ARG1(%esp) # ARG1<- vAA
movl %ecx,OUT_ARG0(%esp) # ARG0<- switchData
call dvmInterpHandlePackedSwitch
testl %eax,%eax
movl %eax,rINST # set up word offset
jle common_backwardBranch # check on special actions
ADVANCE_PC_INDEXED rINST
FETCH_INST
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_SPARSE_SWITCH: /* 0x2c */
/* File: x86/OP_SPARSE_SWITCH.S */
/* File: x86/OP_PACKED_SWITCH.S */
/*
* Handle a packed-switch or sparse-switch instruction. In both cases
* we decode it and hand it off to a helper function.
*
* We don't really expect backward branches in a switch statement, but
* they're perfectly legal, so we check for them here.
*
* for: packed-switch, sparse-switch
*/
/* op vAA, +BBBB */
movl 2(rPC),%ecx # ecx<- BBBBbbbb
GET_VREG_R %eax rINST # eax<- vAA
leal (rPC,%ecx,2),%ecx # ecx<- PC + BBBBbbbb*2
movl %eax,OUT_ARG1(%esp) # ARG1<- vAA
movl %ecx,OUT_ARG0(%esp) # ARG0<- switchData
call dvmInterpHandleSparseSwitch
testl %eax,%eax
movl %eax,rINST # set up word offset
jle common_backwardBranch # check on special actions
ADVANCE_PC_INDEXED rINST
FETCH_INST
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_CMPL_FLOAT: /* 0x2d */
/* File: x86/OP_CMPL_FLOAT.S */
/* File: x86/OP_CMPG_DOUBLE.S */
/* float/double_cmp[gl] vAA, vBB, vCC */
movzbl 3(rPC),%eax # eax<- CC
movzbl 2(rPC),%ecx # ecx<- BB
.if 0
fldl (rFP,%eax,4)
fldl (rFP,%ecx,4)
.else
flds (rFP,%eax,4)
flds (rFP,%ecx,4)
.endif
xorl %ecx,%ecx
fucompp # z if equal, p set if NaN, c set if st0 < st1
fnstsw %ax
sahf
movl rINST,%eax
FETCH_INST_OPCODE 2 %edx
jp .LOP_CMPL_FLOAT_isNaN
je .LOP_CMPL_FLOAT_finish
sbbl %ecx,%ecx
jb .LOP_CMPL_FLOAT_finish
incl %ecx
.LOP_CMPL_FLOAT_finish:
SET_VREG %ecx %eax
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CMPG_FLOAT: /* 0x2e */
/* File: x86/OP_CMPG_FLOAT.S */
/* File: x86/OP_CMPG_DOUBLE.S */
/* float/double_cmp[gl] vAA, vBB, vCC */
movzbl 3(rPC),%eax # eax<- CC
movzbl 2(rPC),%ecx # ecx<- BB
.if 0
fldl (rFP,%eax,4)
fldl (rFP,%ecx,4)
.else
flds (rFP,%eax,4)
flds (rFP,%ecx,4)
.endif
xorl %ecx,%ecx
fucompp # z if equal, p set if NaN, c set if st0 < st1
fnstsw %ax
sahf
movl rINST,%eax
FETCH_INST_OPCODE 2 %edx
jp .LOP_CMPG_FLOAT_isNaN
je .LOP_CMPG_FLOAT_finish
sbbl %ecx,%ecx
jb .LOP_CMPG_FLOAT_finish
incl %ecx
.LOP_CMPG_FLOAT_finish:
SET_VREG %ecx %eax
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CMPL_DOUBLE: /* 0x2f */
/* File: x86/OP_CMPL_DOUBLE.S */
/* File: x86/OP_CMPG_DOUBLE.S */
/* float/double_cmp[gl] vAA, vBB, vCC */
movzbl 3(rPC),%eax # eax<- CC
movzbl 2(rPC),%ecx # ecx<- BB
.if 1
fldl (rFP,%eax,4)
fldl (rFP,%ecx,4)
.else
flds (rFP,%eax,4)
flds (rFP,%ecx,4)
.endif
xorl %ecx,%ecx
fucompp # z if equal, p set if NaN, c set if st0 < st1
fnstsw %ax
sahf
movl rINST,%eax
FETCH_INST_OPCODE 2 %edx
jp .LOP_CMPL_DOUBLE_isNaN
je .LOP_CMPL_DOUBLE_finish
sbbl %ecx,%ecx
jb .LOP_CMPL_DOUBLE_finish
incl %ecx
.LOP_CMPL_DOUBLE_finish:
SET_VREG %ecx %eax
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CMPG_DOUBLE: /* 0x30 */
/* File: x86/OP_CMPG_DOUBLE.S */
/* float/double_cmp[gl] vAA, vBB, vCC */
movzbl 3(rPC),%eax # eax<- CC
movzbl 2(rPC),%ecx # ecx<- BB
.if 1
fldl (rFP,%eax,4)
fldl (rFP,%ecx,4)
.else
flds (rFP,%eax,4)
flds (rFP,%ecx,4)
.endif
xorl %ecx,%ecx
fucompp # z if equal, p set if NaN, c set if st0 < st1
fnstsw %ax
sahf
movl rINST,%eax
FETCH_INST_OPCODE 2 %edx
jp .LOP_CMPG_DOUBLE_isNaN
je .LOP_CMPG_DOUBLE_finish
sbbl %ecx,%ecx
jb .LOP_CMPG_DOUBLE_finish
incl %ecx
.LOP_CMPG_DOUBLE_finish:
SET_VREG %ecx %eax
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_CMP_LONG: /* 0x31 */
/* File: x86/OP_CMP_LONG.S */
/*
* Compare two 64-bit values. Puts 0, 1, or -1 into the destination
* register based on the results of the comparison.
*/
/* cmp-long vAA, vBB, vCC */
movzbl 2(rPC),%ecx # ecx<- BB
movzbl 3(rPC),%edx # edx<- CC
GET_VREG_WORD %eax %ecx,1 # eax<- v[BB+1]
GET_VREG_WORD %ecx %ecx 0 # ecx<- v[BB+0]
cmpl 4(rFP,%edx,4),%eax
jl .LOP_CMP_LONG_smaller
jg .LOP_CMP_LONG_bigger
sub (rFP,%edx,4),%ecx
ja .LOP_CMP_LONG_bigger
jb .LOP_CMP_LONG_smaller
jmp .LOP_CMP_LONG_finish
/* ------------------------------ */
.balign 64
.L_OP_IF_EQ: /* 0x32 */
/* File: x86/OP_IF_EQ.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
jne 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_NE: /* 0x33 */
/* File: x86/OP_IF_NE.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
je 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_LT: /* 0x34 */
/* File: x86/OP_IF_LT.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
jge 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_GE: /* 0x35 */
/* File: x86/OP_IF_GE.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
jl 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_GT: /* 0x36 */
/* File: x86/OP_IF_GT.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
jle 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_LE: /* 0x37 */
/* File: x86/OP_IF_LE.S */
/* File: x86/bincmp.S */
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl,%ecx # ecx <- A+
andb $0xf,%cl # ecx <- A
GET_VREG_R %eax %ecx # eax <- vA
sarl $4,rINST # rINST<- B
cmpl (rFP,rINST,4),%eax # compare (vA, vB)
movswl 2(rPC),rINST # Get signed branch offset
movl $2,%eax # assume not taken
jg 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_EQZ: /* 0x38 */
/* File: x86/OP_IF_EQZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
jne 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_NEZ: /* 0x39 */
/* File: x86/OP_IF_NEZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
je 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_LTZ: /* 0x3a */
/* File: x86/OP_IF_LTZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
jge 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_GEZ: /* 0x3b */
/* File: x86/OP_IF_GEZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
jl 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_GTZ: /* 0x3c */
/* File: x86/OP_IF_GTZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
jle 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_IF_LEZ: /* 0x3d */
/* File: x86/OP_IF_LEZ.S */
/* File: x86/zcmp.S */
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
cmpl $0,(rFP,rINST,4) # compare (vA, 0)
movswl 2(rPC),rINST # fetch signed displacement
movl $2,%eax # assume branch not taken
jg 1f
testl rINST,rINST
js common_backwardBranch
movl rINST,%eax
1:
FETCH_INST_INDEXED %eax
ADVANCE_PC_INDEXED %eax
GOTO_NEXT
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_3E: /* 0x3e */
/* File: x86/OP_UNUSED_3E.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_3F: /* 0x3f */
/* File: x86/OP_UNUSED_3F.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_40: /* 0x40 */
/* File: x86/OP_UNUSED_40.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_41: /* 0x41 */
/* File: x86/OP_UNUSED_41.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_42: /* 0x42 */
/* File: x86/OP_UNUSED_42.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_43: /* 0x43 */
/* File: x86/OP_UNUSED_43.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_AGET: /* 0x44 */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movl offArrayObject_contents(%eax,%ecx,4),%eax
.LOP_AGET_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_AGET_WIDE: /* 0x45 */
/* File: x86/OP_AGET_WIDE.S */
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jb .LOP_AGET_WIDE_finish # index < length, OK
jmp common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
/* ------------------------------ */
.balign 64
.L_OP_AGET_OBJECT: /* 0x46 */
/* File: x86/OP_AGET_OBJECT.S */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movl offArrayObject_contents(%eax,%ecx,4),%eax
.LOP_AGET_OBJECT_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_AGET_BOOLEAN: /* 0x47 */
/* File: x86/OP_AGET_BOOLEAN.S */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movzbl offArrayObject_contents(%eax,%ecx,1),%eax
.LOP_AGET_BOOLEAN_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_AGET_BYTE: /* 0x48 */
/* File: x86/OP_AGET_BYTE.S */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movsbl offArrayObject_contents(%eax,%ecx,1),%eax
.LOP_AGET_BYTE_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_AGET_CHAR: /* 0x49 */
/* File: x86/OP_AGET_CHAR.S */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movzwl offArrayObject_contents(%eax,%ecx,2),%eax
.LOP_AGET_CHAR_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_AGET_SHORT: /* 0x4a */
/* File: x86/OP_AGET_SHORT.S */
/* File: x86/OP_AGET.S */
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
movswl offArrayObject_contents(%eax,%ecx,2),%eax
.LOP_AGET_SHORT_finish:
FETCH_INST_OPCODE 2 %edx
SET_VREG %eax rINST
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_APUT: /* 0x4b */
/* File: x86/OP_APUT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
leal offArrayObject_contents(%eax,%ecx,4),%eax
.LOP_APUT_finish:
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
movl %ecx,(%eax)
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_APUT_WIDE: /* 0x4c */
/* File: x86/OP_APUT_WIDE.S */
/*
* Array put, 64 bits. vBB[vCC]<-vAA.
*
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jb .LOP_APUT_WIDE_finish # index < length, OK
jmp common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
/* ------------------------------ */
.balign 64
.L_OP_APUT_OBJECT: /* 0x4d */
/* File: x86/OP_APUT_OBJECT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
GET_VREG_R rINST rINST # rINST<- vAA
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jb .LOP_APUT_OBJECT_continue
jmp common_errArrayIndex # index >= length, bail. Expects
# arrayObj in eax
# index in ecx
/* ------------------------------ */
.balign 64
.L_OP_APUT_BOOLEAN: /* 0x4e */
/* File: x86/OP_APUT_BOOLEAN.S */
/* File: x86/OP_APUT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
leal offArrayObject_contents(%eax,%ecx,1),%eax
.LOP_APUT_BOOLEAN_finish:
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
movb %cl,(%eax)
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_APUT_BYTE: /* 0x4f */
/* File: x86/OP_APUT_BYTE.S */
/* File: x86/OP_APUT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
leal offArrayObject_contents(%eax,%ecx,1),%eax
.LOP_APUT_BYTE_finish:
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
movb %cl,(%eax)
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_APUT_CHAR: /* 0x50 */
/* File: x86/OP_APUT_CHAR.S */
/* File: x86/OP_APUT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
leal offArrayObject_contents(%eax,%ecx,2),%eax
.LOP_APUT_CHAR_finish:
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
movw %cx,(%eax)
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_APUT_SHORT: /* 0x51 */
/* File: x86/OP_APUT_SHORT.S */
/* File: x86/OP_APUT.S */
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA
*
* for: aput, aput-object, aput-boolean, aput-byte, aput-char, aput-short
*/
/* op vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax<- BB
movzbl 3(rPC),%ecx # ecx<- CC
GET_VREG_R %eax %eax # eax<- vBB (array object)
GET_VREG_R %ecx %ecx # ecs<- vCC (requested index)
testl %eax,%eax # null array object?
je common_errNullObject # bail if so
cmpl offArrayObject_length(%eax),%ecx
jae common_errArrayIndex # index >= length, bail. Expects:
# arrayObj in eax
# index in ecx
leal offArrayObject_contents(%eax,%ecx,2),%eax
.LOP_APUT_SHORT_finish:
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
movw %cx,(%eax)
ADVANCE_PC 2
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_IGET: /* 0x52 */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_WIDE: /* 0x53 */
/* File: x86/OP_IGET_WIDE.S */
/*
* 64-bit instance field get.
*
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_WIDE_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # for dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_WIDE_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_OBJECT: /* 0x54 */
/* File: x86/OP_IGET_OBJECT.S */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_OBJECT_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_OBJECT_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_BOOLEAN: /* 0x55 */
/* File: x86/OP_IGET_BOOLEAN.S */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_BOOLEAN_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_BOOLEAN_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_BYTE: /* 0x56 */
/* File: x86/OP_IGET_BYTE.S */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_BYTE_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_BYTE_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_CHAR: /* 0x57 */
/* File: x86/OP_IGET_CHAR.S */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_CHAR_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_CHAR_resolve
/* ------------------------------ */
.balign 64
.L_OP_IGET_SHORT: /* 0x58 */
/* File: x86/OP_IGET_SHORT.S */
/* File: x86/OP_IGET.S */
/*
* General 32-bit instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IGET_SHORT_finish # no, already resolved
movl %edx,OUT_ARG1(%esp) # needed by dvmResolveInstField
movl rGLUE,%edx
jmp .LOP_IGET_SHORT_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT: /* 0x59 */
/* File: x86/OP_IPUT.S */
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # %edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_WIDE: /* 0x5a */
/* File: x86/OP_IPUT_WIDE.S */
/*
* 64-bit instance field put.
*
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_WIDE_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_WIDE_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_OBJECT: /* 0x5b */
/* File: x86/OP_IPUT_OBJECT.S */
/*
* Object field put.
*
* for: iput-object
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_OBJECT_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_OBJECT_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_BOOLEAN: /* 0x5c */
/* File: x86/OP_IPUT_BOOLEAN.S */
/* File: x86/OP_IPUT.S */
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # %edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_BOOLEAN_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_BOOLEAN_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_BYTE: /* 0x5d */
/* File: x86/OP_IPUT_BYTE.S */
/* File: x86/OP_IPUT.S */
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # %edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_BYTE_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_BYTE_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_CHAR: /* 0x5e */
/* File: x86/OP_IPUT_CHAR.S */
/* File: x86/OP_IPUT.S */
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # %edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_CHAR_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_CHAR_resolve
/* ------------------------------ */
.balign 64
.L_OP_IPUT_SHORT: /* 0x5f */
/* File: x86/OP_IPUT_SHORT.S */
/* File: x86/OP_IPUT.S */
/*
* General 32-bit instance field put.
*
* for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
*/
/* op vA, vB, field@CCCC */
movl rGLUE,%ecx
movzwl 2(rPC),%edx # %edx<- 0000CCCC
movl offGlue_methodClassDex(%ecx),%eax # eax<- DvmDex
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
movl offDvmDex_pResFields(%eax),%eax # eax<- pDvmDex->pResFields
andb $0xf,rINSTbl # rINST<- A
GET_VREG_R %ecx %ecx # ecx<- fp[B], the object ptr
movl (%eax,%edx,4),%eax # resolved entry
testl %eax,%eax # is resolved entry null?
jne .LOP_IPUT_SHORT_finish # no, already resolved
movl %edx,OUT_ARG1(%esp)
movl rGLUE,%edx
jmp .LOP_IPUT_SHORT_resolve
/* ------------------------------ */
.balign 64
.L_OP_SGET: /* 0x60 */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_resolve # if not, make it so
.LOP_SGET_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_WIDE: /* 0x61 */
/* File: x86/OP_SGET_WIDE.S */
/*
* 64-bit SGET handler.
*
*/
/* sget-wide vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_WIDE_resolve # if not, make it so
.LOP_SGET_WIDE_finish: # field ptr in eax
movl offStaticField_value(%eax),%ecx # ecx<- lsw
movl 4+offStaticField_value(%eax),%eax # eax<- msw
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG_WORD %ecx rINST 0
SET_VREG_WORD %eax rINST 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_OBJECT: /* 0x62 */
/* File: x86/OP_SGET_OBJECT.S */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_OBJECT_resolve # if not, make it so
.LOP_SGET_OBJECT_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_BOOLEAN: /* 0x63 */
/* File: x86/OP_SGET_BOOLEAN.S */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_BOOLEAN_resolve # if not, make it so
.LOP_SGET_BOOLEAN_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_BYTE: /* 0x64 */
/* File: x86/OP_SGET_BYTE.S */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_BYTE_resolve # if not, make it so
.LOP_SGET_BYTE_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_CHAR: /* 0x65 */
/* File: x86/OP_SGET_CHAR.S */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_CHAR_resolve # if not, make it so
.LOP_SGET_CHAR_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SGET_SHORT: /* 0x66 */
/* File: x86/OP_SGET_SHORT.S */
/* File: x86/OP_SGET.S */
/*
* General 32-bit SGET handler.
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SGET_SHORT_resolve # if not, make it so
.LOP_SGET_SHORT_finish: # field ptr in eax
movl offStaticField_value(%eax),%eax
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
SET_VREG %eax rINST
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT: /* 0x67 */
/* File: x86/OP_SPUT.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_resolve # if not, make it so
.LOP_SPUT_finish: # field ptr in eax
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT_WIDE: /* 0x68 */
/* File: x86/OP_SPUT_WIDE.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_WIDE_resolve # if not, make it so
.LOP_SPUT_WIDE_finish: # field ptr in eax
GET_VREG_WORD %ecx rINST 0 # rINST<- lsw
GET_VREG_WORD rINST rINST 1 # ecx<- msw
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
movl rINST,4+offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT_OBJECT: /* 0x69 */
/* File: x86/OP_SPUT_OBJECT.S */
/*
* SPUT object handler.
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_OBJECT_resolve # if not, make it so
.LOP_SPUT_OBJECT_finish: # field ptr in eax
movzbl rINSTbl,%ecx # ecx<- AA
GET_VREG_R %ecx %ecx
jmp .LOP_SPUT_OBJECT_continue
/* ------------------------------ */
.balign 64
.L_OP_SPUT_BOOLEAN: /* 0x6a */
/* File: x86/OP_SPUT_BOOLEAN.S */
/* File: x86/OP_SPUT.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_BOOLEAN_resolve # if not, make it so
.LOP_SPUT_BOOLEAN_finish: # field ptr in eax
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT_BYTE: /* 0x6b */
/* File: x86/OP_SPUT_BYTE.S */
/* File: x86/OP_SPUT.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_BYTE_resolve # if not, make it so
.LOP_SPUT_BYTE_finish: # field ptr in eax
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT_CHAR: /* 0x6c */
/* File: x86/OP_SPUT_CHAR.S */
/* File: x86/OP_SPUT.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_CHAR_resolve # if not, make it so
.LOP_SPUT_CHAR_finish: # field ptr in eax
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_SPUT_SHORT: /* 0x6d */
/* File: x86/OP_SPUT_SHORT.S */
/* File: x86/OP_SPUT.S */
/*
* General 32-bit SPUT handler.
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
/* op vAA, field@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- field ref BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- DvmDex
movl offDvmDex_pResFields(%ecx),%ecx # ecx<- dvmDex->pResFields
movl (%ecx,%eax,4),%eax # eax<- resolved StaticField ptr
testl %eax,%eax # resolved entry null?
je .LOP_SPUT_SHORT_resolve # if not, make it so
.LOP_SPUT_SHORT_finish: # field ptr in eax
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 2 %edx
ADVANCE_PC 2
movl %ecx,offStaticField_value(%eax)
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_VIRTUAL: /* 0x6e */
/* File: x86/OP_INVOKE_VIRTUAL.S */
/*
* Handle a virtual method call.
*
* for: invoke-virtual, invoke-virtual/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%eax
movzwl 2(rPC),%ecx # ecx<- BBBB
movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%eax),%eax # eax<- pDvmDex->pResMethods
movl (%eax,%ecx,4),%eax # eax<- resolved baseMethod
testl %eax,%eax # already resolved?
jne .LOP_INVOKE_VIRTUAL_continue # yes, continue
movl rGLUE,%eax
movl %ecx,OUT_ARG1(%esp) # arg1<- ref
movl offGlue_method(%eax),%eax # eax<- glue->method
jmp .LOP_INVOKE_VIRTUAL_more
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_SUPER: /* 0x6f */
/* File: x86/OP_INVOKE_SUPER.S */
/*
* Handle a "super" method call.
*
* for: invoke-super, invoke-super/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,rINST
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(rINST),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movl (%ecx,%eax,4),%ecx # ecx<- resolved baseMethod
movl offGlue_method(rINST),%eax # eax<- method
movzwl 4(rPC),rINST # rINST<- GFED or CCCC
.if (!0)
andl $0xf,rINST # rINST<- D (or stays CCCC)
.endif
GET_VREG_R rINST rINST # rINST<- "this" ptr
testl rINST,rINST # null "this"?
je common_errNullObject # yes, throw
movl offMethod_clazz(%eax),%eax # eax<- method->clazz
testl %ecx,%ecx # already resolved?
jne .LOP_INVOKE_SUPER_continue # yes - go on
jmp .LOP_INVOKE_SUPER_resolve
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_DIRECT: /* 0x70 */
/* File: x86/OP_INVOKE_DIRECT.S */
/*
* Handle a direct method call.
*
* (We could defer the "is 'this' pointer null" test to the common
* method invocation code, and use a flag to indicate that static
* calls don't count. If we do this as part of copying the arguments
* out we could avoiding loading the first arg twice.)
*
* for: invoke-direct, invoke-direct/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movzwl 4(rPC),%edx # edx<- GFED or CCCC
movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
.if (!0)
andl $0xf,%edx # edx<- D (or stays CCCC)
.endif
testl %eax,%eax # already resolved?
GET_VREG_R %ecx %edx # ecx<- "this" ptr
je .LOP_INVOKE_DIRECT_resolve # not resolved, do it now
.LOP_INVOKE_DIRECT_finish:
testl %ecx,%ecx # null "this"?
jne common_invokeMethodNoRange # no, continue on
jmp common_errNullObject
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_STATIC: /* 0x71 */
/* File: x86/OP_INVOKE_STATIC.S */
/*
* Handle a static method call.
*
* for: invoke-static, invoke-static/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
testl %eax,%eax
jne common_invokeMethodNoRange
movl rGLUE,%ecx
movl offGlue_method(%ecx),%ecx # ecx<- glue->method
movzwl 2(rPC),%eax
movl offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
movl %ecx,OUT_ARG0(%esp) # arg0<- clazz
jmp .LOP_INVOKE_STATIC_continue
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_INTERFACE: /* 0x72 */
/* File: x86/OP_INVOKE_INTERFACE.S */
/*
* Handle an interface method call.
*
* for: invoke-interface, invoke-interface/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movzwl 4(rPC),%eax # eax<- FEDC or CCCC
movl rGLUE,%ecx
.if (!0)
andl $0xf,%eax # eax<- C (or stays CCCC)
.endif
GET_VREG_R %eax %eax # eax<- "this"
EXPORT_PC
testl %eax,%eax # null this?
je common_errNullObject # yes, fail
movl offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
movl %eax,OUT_ARG0(%esp) # arg0<- class
movl offGlue_methodClassDex(%ecx),%eax # eax<- methodClassDex
movl offGlue_method(%ecx),%ecx # ecx<- method
movl %eax,OUT_ARG3(%esp) # arg3<- dex
movzwl 2(rPC),%eax # eax<- BBBB
movl %ecx,OUT_ARG2(%esp) # arg2<- method
movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
jmp .LOP_INVOKE_INTERFACE_continue
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_73: /* 0x73 */
/* File: x86/OP_UNUSED_73.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
/* File: x86/OP_INVOKE_VIRTUAL_RANGE.S */
/* File: x86/OP_INVOKE_VIRTUAL.S */
/*
* Handle a virtual method call.
*
* for: invoke-virtual, invoke-virtual/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%eax
movzwl 2(rPC),%ecx # ecx<- BBBB
movl offGlue_methodClassDex(%eax),%eax # eax<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%eax),%eax # eax<- pDvmDex->pResMethods
movl (%eax,%ecx,4),%eax # eax<- resolved baseMethod
testl %eax,%eax # already resolved?
jne .LOP_INVOKE_VIRTUAL_RANGE_continue # yes, continue
movl rGLUE,%eax
movl %ecx,OUT_ARG1(%esp) # arg1<- ref
movl offGlue_method(%eax),%eax # eax<- glue->method
jmp .LOP_INVOKE_VIRTUAL_RANGE_more
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
/* File: x86/OP_INVOKE_SUPER_RANGE.S */
/* File: x86/OP_INVOKE_SUPER.S */
/*
* Handle a "super" method call.
*
* for: invoke-super, invoke-super/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,rINST
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(rINST),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movl (%ecx,%eax,4),%ecx # ecx<- resolved baseMethod
movl offGlue_method(rINST),%eax # eax<- method
movzwl 4(rPC),rINST # rINST<- GFED or CCCC
.if (!1)
andl $0xf,rINST # rINST<- D (or stays CCCC)
.endif
GET_VREG_R rINST rINST # rINST<- "this" ptr
testl rINST,rINST # null "this"?
je common_errNullObject # yes, throw
movl offMethod_clazz(%eax),%eax # eax<- method->clazz
testl %ecx,%ecx # already resolved?
jne .LOP_INVOKE_SUPER_RANGE_continue # yes - go on
jmp .LOP_INVOKE_SUPER_RANGE_resolve
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
/* File: x86/OP_INVOKE_DIRECT_RANGE.S */
/* File: x86/OP_INVOKE_DIRECT.S */
/*
* Handle a direct method call.
*
* (We could defer the "is 'this' pointer null" test to the common
* method invocation code, and use a flag to indicate that static
* calls don't count. If we do this as part of copying the arguments
* out we could avoiding loading the first arg twice.)
*
* for: invoke-direct, invoke-direct/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movzwl 4(rPC),%edx # edx<- GFED or CCCC
movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
.if (!1)
andl $0xf,%edx # edx<- D (or stays CCCC)
.endif
testl %eax,%eax # already resolved?
GET_VREG_R %ecx %edx # ecx<- "this" ptr
je .LOP_INVOKE_DIRECT_RANGE_resolve # not resolved, do it now
.LOP_INVOKE_DIRECT_RANGE_finish:
testl %ecx,%ecx # null "this"?
jne common_invokeMethodRange # no, continue on
jmp common_errNullObject
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
/* File: x86/OP_INVOKE_STATIC_RANGE.S */
/* File: x86/OP_INVOKE_STATIC.S */
/*
* Handle a static method call.
*
* for: invoke-static, invoke-static/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movl rGLUE,%ecx
movzwl 2(rPC),%eax # eax<- BBBB
movl offGlue_methodClassDex(%ecx),%ecx # ecx<- pDvmDex
EXPORT_PC
movl offDvmDex_pResMethods(%ecx),%ecx # ecx<- pDvmDex->pResMethods
movl (%ecx,%eax,4),%eax # eax<- resolved methodToCall
testl %eax,%eax
jne common_invokeMethodRange
movl rGLUE,%ecx
movl offGlue_method(%ecx),%ecx # ecx<- glue->method
movzwl 2(rPC),%eax
movl offMethod_clazz(%ecx),%ecx# ecx<- method->clazz
movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
movl %ecx,OUT_ARG0(%esp) # arg0<- clazz
jmp .LOP_INVOKE_STATIC_RANGE_continue
/* ------------------------------ */
.balign 64
.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
/* File: x86/OP_INVOKE_INTERFACE_RANGE.S */
/* File: x86/OP_INVOKE_INTERFACE.S */
/*
* Handle an interface method call.
*
* for: invoke-interface, invoke-interface/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
movzwl 4(rPC),%eax # eax<- FEDC or CCCC
movl rGLUE,%ecx
.if (!1)
andl $0xf,%eax # eax<- C (or stays CCCC)
.endif
GET_VREG_R %eax %eax # eax<- "this"
EXPORT_PC
testl %eax,%eax # null this?
je common_errNullObject # yes, fail
movl offObject_clazz(%eax),%eax# eax<- thisPtr->clazz
movl %eax,OUT_ARG0(%esp) # arg0<- class
movl offGlue_methodClassDex(%ecx),%eax # eax<- methodClassDex
movl offGlue_method(%ecx),%ecx # ecx<- method
movl %eax,OUT_ARG3(%esp) # arg3<- dex
movzwl 2(rPC),%eax # eax<- BBBB
movl %ecx,OUT_ARG2(%esp) # arg2<- method
movl %eax,OUT_ARG1(%esp) # arg1<- BBBB
jmp .LOP_INVOKE_INTERFACE_RANGE_continue
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_79: /* 0x79 */
/* File: x86/OP_UNUSED_79.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_UNUSED_7A: /* 0x7a */
/* File: x86/OP_UNUSED_7A.S */
/* File: x86/unused.S */
jmp common_abort
/* ------------------------------ */
.balign 64
.L_OP_NEG_INT: /* 0x7b */
/* File: x86/OP_NEG_INT.S */
/* File: x86/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
* specifies an instruction that performs "result = op eax".
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
GET_VREG_R %eax rINST # eax<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
negl %eax
SET_VREG %eax %ecx
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NOT_INT: /* 0x7c */
/* File: x86/OP_NOT_INT.S */
/* File: x86/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
* specifies an instruction that performs "result = op eax".
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
GET_VREG_R %eax rINST # eax<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
notl %eax
SET_VREG %eax %ecx
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NEG_LONG: /* 0x7d */
/* File: x86/OP_NEG_LONG.S */
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
andb $0xf,rINSTbl # rINST<- A
GET_VREG_WORD %eax %ecx 0 # eax<- v[B+0]
GET_VREG_WORD %ecx %ecx 1 # ecx<- v[B+1]
negl %eax
adcl $0,%ecx
negl %ecx
FETCH_INST_OPCODE 1 %edx
SET_VREG_WORD %eax rINST 0 # v[A+0]<- eax
SET_VREG_WORD %ecx rINST 1 # v[A+1]<- ecx
ADVANCE_PC 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NOT_LONG: /* 0x7e */
/* File: x86/OP_NOT_LONG.S */
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- BA
sarl $4,%ecx # ecx<- B
andb $0xf,rINSTbl # rINST<- A
GET_VREG_WORD %eax %ecx 0 # eax<- v[B+0]
GET_VREG_WORD %ecx %ecx 1 # ecx<- v[B+1]
FETCH_INST_OPCODE 1 %edx
notl %eax
notl %ecx
SET_VREG_WORD %eax rINST 0 # v[A+0]<- eax
SET_VREG_WORD %ecx rINST 1 # v[A+1]<- ecx
ADVANCE_PC 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NEG_FLOAT: /* 0x7f */
/* File: x86/OP_NEG_FLOAT.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
flds (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fchs
fstps (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_NEG_DOUBLE: /* 0x80 */
/* File: x86/OP_NEG_DOUBLE.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
fldl (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fchs
fstpl (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_INT_TO_LONG: /* 0x81 */
/* File: x86/OP_INT_TO_LONG.S */
/* int to long vA, vB */
movzbl rINSTbl,%eax # eax<- +A
sarl $4,%eax # eax<- B
GET_VREG_R %eax %eax # eax<- vB
andb $0xf,rINSTbl # rINST<- A
cltd # edx:eax<- sssssssBBBBBBBB
SET_VREG_WORD %edx rINST 1 # v[A+1]<- edx/rPC
FETCH_INST_OPCODE 1 %edx
SET_VREG_WORD %eax rINST 0 # v[A+0]<- %eax
ADVANCE_PC 1
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_INT_TO_FLOAT: /* 0x82 */
/* File: x86/OP_INT_TO_FLOAT.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
fildl (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fstps (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_INT_TO_DOUBLE: /* 0x83 */
/* File: x86/OP_INT_TO_DOUBLE.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
fildl (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fstpl (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_LONG_TO_INT: /* 0x84 */
/* File: x86/OP_LONG_TO_INT.S */
/* we ignore the high word, making this equivalent to a 32-bit reg move */
/* File: x86/OP_MOVE.S */
/* for move, move-object, long-to-int */
/* op vA, vB */
movzbl rINSTbl,%eax # eax<- BA
andb $0xf,%al # eax<- A
shrl $4,rINST # rINST<- B
GET_VREG_R %ecx rINST
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
SET_VREG %ecx %eax # fp[A]<-fp[B]
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_LONG_TO_FLOAT: /* 0x85 */
/* File: x86/OP_LONG_TO_FLOAT.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
fildll (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fstps (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_LONG_TO_DOUBLE: /* 0x86 */
/* File: x86/OP_LONG_TO_DOUBLE.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
fildll (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fstpl (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx
/* ------------------------------ */
.balign 64
.L_OP_FLOAT_TO_INT: /* 0x87 */
/* File: x86/OP_FLOAT_TO_INT.S */
/* File: x86/cvtfp_int.S */
/* On fp to int conversions, Java requires that
* if the result > maxint, it should be clamped to maxint. If it is less
* than minint, it should be clamped to minint. If it is a nan, the result
* should be zero. Further, the rounding mode is to truncate. This model
* differs from what is delivered normally via the x86 fpu, so we have
* to play some games.
*/
/* float/double to int/long vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
.if 0
fldl (rFP,rINST,4) # %st0<- vB
.else
flds (rFP,rINST,4) # %st0<- vB
.endif
ftst
fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
movzwl LOCAL0_OFFSET(%ebp),%eax
movb $0xc,%ah
movw %ax,LOCAL0_OFFSET+2(%ebp)
fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
FETCH_INST_OPCODE 1 %edx
andb $0xf,%cl # ecx<- A
.if 0
fistpll (rFP,%ecx,4) # convert and store
.else
fistpl (rFP,%ecx,4) # convert and store
.endif
fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
jmp .LOP_FLOAT_TO_INT_continue
/* ------------------------------ */
.balign 64
.L_OP_FLOAT_TO_LONG: /* 0x88 */
/* File: x86/OP_FLOAT_TO_LONG.S */
/* File: x86/cvtfp_int.S */
/* On fp to int conversions, Java requires that
* if the result > maxint, it should be clamped to maxint. If it is less
* than minint, it should be clamped to minint. If it is a nan, the result
* should be zero. Further, the rounding mode is to truncate. This model
* differs from what is delivered normally via the x86 fpu, so we have
* to play some games.
*/
/* float/double to int/long vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
.if 0
fldl (rFP,rINST,4) # %st0<- vB
.else
flds (rFP,rINST,4) # %st0<- vB
.endif
ftst
fnstcw LOCAL0_OFFSET(%ebp) # remember original rounding mode
movzwl LOCAL0_OFFSET(%ebp),%eax
movb $0xc,%ah
movw %ax,LOCAL0_OFFSET+2(%ebp)
fldcw LOCAL0_OFFSET+2(%ebp) # set "to zero" rounding mode
FETCH_INST_OPCODE 1 %edx
andb $0xf,%cl # ecx<- A
.if 1
fistpll (rFP,%ecx,4) # convert and store
.else
fistpl (rFP,%ecx,4) # convert and store
.endif
fldcw LOCAL0_OFFSET(%ebp) # restore previous rounding mode
jmp .LOP_FLOAT_TO_LONG_continue
/* ------------------------------ */
.balign 64
.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
/* File: x86/OP_FLOAT_TO_DOUBLE.S */
/* File: x86/fpcvt.S */
/*
* Generic 32-bit FP conversion operation.
*/
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx<- A+
sarl $4,rINST # rINST<- B
flds (rFP,rINST,4) # %st0<- vB
andb $0xf,%cl # ecx<- A
FETCH_INST_OPCODE 1 %edx
ADVANCE_PC 1
fstpl (rFP,%ecx,4) # vA<- %st0
GOTO_NEXT_R %edx