Add source code skeletons for x86 work. No actual JIT'ng yet.
Change-Id: Ic94a916e777e9bc5163cf205899daf9c18dcafe1
diff --git a/vm/Dvm.mk b/vm/Dvm.mk
index 85fc598..39d8b1b 100644
--- a/vm/Dvm.mk
+++ b/vm/Dvm.mk
@@ -294,6 +294,14 @@
arch/$(dvm_arch_variant)/Hints386ABI.c \
mterp/out/InterpC-$(dvm_arch_variant).c \
mterp/out/InterpAsm-$(dvm_arch_variant).S
+ ifeq ($(WITH_JIT),true)
+ LOCAL_SRC_FILES += \
+ compiler/codegen/x86/Assemble.c \
+ compiler/codegen/x86/ArchUtility.c \
+ compiler/codegen/x86/ia32/Codegen.c \
+ compiler/codegen/x86/ia32/CallingConvention.S \
+ compiler/template/out/CompilerTemplateAsm-ia32.S
+ endif
endif
endif
diff --git a/vm/compiler/Compiler.h b/vm/compiler/Compiler.h
index 739d517..6dd9cbd 100644
--- a/vm/compiler/Compiler.h
+++ b/vm/compiler/Compiler.h
@@ -83,7 +83,7 @@
DALVIK_JIT_THUMB,
DALVIK_JIT_THUMB2,
DALVIK_JIT_THUMB2EE,
- DALVIK_JIT_X86
+ DALVIK_JIT_IA32
} JitInstructionSetType;
/* Description of a compiled trace. */
diff --git a/vm/compiler/codegen/x86/ArchUtility.c b/vm/compiler/codegen/x86/ArchUtility.c
new file mode 100644
index 0000000..6d07b62
--- /dev/null
+++ b/vm/compiler/codegen/x86/ArchUtility.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../CompilerInternals.h"
+#include "libdex/OpCodeNames.h"
+#include "X86LIR.h"
+
+/* Dump instructions and constant pool contents */
+void dvmCompilerCodegenDump(CompilationUnit *cUnit)
+{
+}
diff --git a/vm/compiler/codegen/x86/Assemble.c b/vm/compiler/codegen/x86/Assemble.c
new file mode 100644
index 0000000..fcc4974
--- /dev/null
+++ b/vm/compiler/codegen/x86/Assemble.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "libdex/OpCode.h"
+#include "libdex/OpCodeNames.h"
+
+#include "../../CompilerInternals.h"
+#include "X86LIR.h"
+#include "Codegen.h"
+#include <unistd.h> /* for cacheflush */
+#include <sys/mman.h> /* for protection change */
+
+#define MAX_ASSEMBLER_RETRIES 10
+
+
+/* Track the number of times that the code cache is patched */
+#if defined(WITH_JIT_TUNING)
+#define UPDATE_CODE_CACHE_PATCHES() (gDvmJit.codeCachePatches++)
+#else
+#define UPDATE_CODE_CACHE_PATCHES()
+#endif
+
+/*
+ * FIXME - redo for x86
+ *
+ * Translation layout in the code cache. Note that the codeAddress pointer
+ * in JitTable will point directly to the code body (field codeAddress). The
+ * chain cell offset codeAddress - 2, and (if present) executionCount is at
+ * codeAddress - 6.
+ *
+ * +----------------------------+
+ * | Execution count | -> [Optional] 4 bytes
+ * +----------------------------+
+ * +--| Offset to chain cell counts| -> 2 bytes
+ * | +----------------------------+
+ * | | Code body | -> Start address for translation
+ * | | | variable in 2-byte chunks
+ * | . . (JitTable's codeAddress points here)
+ * | . .
+ * | | |
+ * | +----------------------------+
+ * | | Chaining Cells | -> 12/16 bytes each, must be 4 byte aligned
+ * | . .
+ * | . .
+ * | | |
+ * | +----------------------------+
+ * | | Gap for large switch stmt | -> # cases >= MAX_CHAINED_SWITCH_CASES
+ * | +----------------------------+
+ * +->| Chaining cell counts | -> 8 bytes, chain cell counts by type
+ * +----------------------------+
+ * | Trace description | -> variable sized
+ * . .
+ * | |
+ * +----------------------------+
+ * | Literal pool | -> 4-byte aligned, variable size
+ * . .
+ * . .
+ * | |
+ * +----------------------------+
+ *
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ */
+void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info)
+{
+}
+
+/*
+ * Perform translation chain operation.
+ */
+void* dvmJitChain(void* tgtAddr, u4* branchAddr)
+{
+ return 0;
+}
+
+/*
+ * This method is called from the invoke templates for virtual and interface
+ * methods to speculatively setup a chain to the callee. The templates are
+ * written in assembly and have setup method, cell, and clazz at r0, r2, and
+ * r3 respectively, so there is a unused argument in the list. Upon return one
+ * of the following three results may happen:
+ * 1) Chain is not setup because the callee is native. Reset the rechain
+ * count to a big number so that it will take a long time before the next
+ * rechain attempt to happen.
+ * 2) Chain is not setup because the callee has not been created yet. Reset
+ * the rechain count to a small number and retry in the near future.
+ * 3) Ask all other threads to stop before patching this chaining cell.
+ * This is required because another thread may have passed the class check
+ * but hasn't reached the chaining cell yet to follow the chain. If we
+ * patch the content before halting the other thread, there could be a
+ * small window for race conditions to happen that it may follow the new
+ * but wrong chain to invoke a different method.
+ */
+const Method *dvmJitToPatchPredictedChain(const Method *method,
+ InterpState *interpState,
+ PredictedChainingCell *cell,
+ const ClassObject *clazz)
+{
+ return 0;
+}
+
+/*
+ * Patch the inline cache content based on the content passed from the work
+ * order.
+ */
+void dvmCompilerPatchInlineCache(void)
+{
+}
+
+/*
+ * Unchain a trace given the starting address of the translation
+ * in the code cache. Refer to the diagram in dvmCompilerAssembleLIR.
+ * Returns the address following the last cell unchained. Note that
+ * the incoming codeAddr is a thumb code address, and therefore has
+ * the low bit set.
+ */
+u4* dvmJitUnchain(void* codeAddr)
+{
+ return 0;
+}
+
+/* Unchain all translation in the cache. */
+void dvmJitUnchainAll()
+{
+}
+
+/* Create a copy of the trace descriptor of an existing compilation */
+JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
+ const JitEntry *knownEntry)
+{
+ return 0;
+}
+
+/* Sort the trace profile counts and dump them */
+void dvmCompilerSortAndPrintTraceProfiles()
+{
+}
diff --git a/vm/compiler/codegen/x86/CalloutHelper.h b/vm/compiler/codegen/x86/CalloutHelper.h
new file mode 100644
index 0000000..3229a26
--- /dev/null
+++ b/vm/compiler/codegen/x86/CalloutHelper.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+
+#ifndef _DALVIK_VM_COMPILER_CODEGEN_X86_CALLOUT_HELPER_H
+#define _DALVIK_VM_COMPILER_CODEGEN_X86_CALLOUT_HELPER_H
+
+/*
+ * Declare/comment prototypes of all native callout functions invoked by the
+ * JIT'ed code here and use the LOAD_FUNC_ADDR macro to load the address into
+ * a register. In this way we have a centralized place to find out all native
+ * helper functions and we can grep for LOAD_FUNC_ADDR to find out all the
+ * callsites.
+ */
+
+/* Load a statically compiled function address as a constant */
+#define LOAD_FUNC_ADDR(cUnit, reg, addr) loadConstant(cUnit, reg, addr)
+
+/* Originally declared in Sync.h */
+bool dvmUnlockObject(struct Thread* self, struct Object* obj); //OP_MONITOR_EXIT
+
+/* Originally declared in oo/TypeCheck.h */
+bool dvmCanPutArrayElement(const ClassObject* elemClass, // OP_APUT_OBJECT
+ const ClassObject* arrayClass);
+int dvmInstanceofNonTrivial(const ClassObject* instance, // OP_CHECK_CAST &&
+ const ClassObject* clazz); // OP_INSTANCE_OF
+
+/* Originally declared in oo/Array.h */
+ArrayObject* dvmAllocArrayByClass(ClassObject* arrayClass, // OP_NEW_ARRAY
+ size_t length, int allocFlags);
+
+/* Originally declared in interp/InterpDefs.h */
+bool dvmInterpHandleFillArrayData(ArrayObject* arrayObject,// OP_FILL_ARRAY_DATA
+ const u2* arrayData);
+
+/* Originally declared in alloc/Alloc.h */
+Object* dvmAllocObject(ClassObject* clazz, int flags); // OP_NEW_INSTANCE
+
+/*
+ * Functions declared in gDvmInlineOpsTable[] are used for
+ * OP_EXECUTE_INLINE & OP_EXECUTE_INLINE_RANGE.
+ *
+ * org_apache_harmony_dalvik_NativeTestTarget_emptyInlineMethod
+ * javaLangString_charAt
+ * javaLangString_compareTo
+ * javaLangString_equals
+ * javaLangString_indexOf_I
+ * javaLangString_indexOf_II
+ * javaLangString_length
+ * javaLangMath_abs_int
+ * javaLangMath_abs_long
+ * javaLangMath_abs_float
+ * javaLangMath_abs_double
+ * javaLangMath_min_int
+ * javaLangMath_max_int
+ * javaLangMath_sqrt
+ * javaLangMath_cos
+ * javaLangMath_sin
+ */
+double sqrt(double x); // INLINE_MATH_SQRT
+
+#endif /* _DALVIK_VM_COMPILER_CODEGEN_X86_CALLOUT_HELPER_H */
diff --git a/vm/compiler/codegen/x86/Codegen.h b/vm/compiler/codegen/x86/Codegen.h
new file mode 100644
index 0000000..1ccdad3
--- /dev/null
+++ b/vm/compiler/codegen/x86/Codegen.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains register alloction support and is intended to be
+ * included by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ */
+
+#include "compiler/CompilerIR.h"
+#include "CalloutHelper.h"
diff --git a/vm/compiler/codegen/x86/CodegenDriver.c b/vm/compiler/codegen/x86/CodegenDriver.c
new file mode 100644
index 0000000..026a0b5
--- /dev/null
+++ b/vm/compiler/codegen/x86/CodegenDriver.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains codegen and support common to all supported
+ * X86 variants. It is included by:
+ *
+ * Codegen-$(TARGET_ARCH_VARIANT).c
+ *
+ * which combines this common code with specific support found in the
+ * applicable directory below this one.
+ */
+
+static int opcodeCoverage[256];
+static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ */
+
+#if 0
+static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, X86LIR *labelList)
+{
+ /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
+ return true;
+}
+
+static bool handleFmt10x(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt12x(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt21s(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ X86LIR *labelList)
+{
+ return true;
+}
+
+static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt22cs(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ X86LIR *labelList)
+{
+ return true;
+}
+
+static bool handleFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
+ X86LIR *labelList)
+{
+ return true;
+}
+
+static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, X86LIR *labelList)
+{
+ return true;
+}
+
+/*
+ * NOTE: Handles both range and non-range versions (arguments
+ * have already been normalized by this point).
+ */
+static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+
+static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
+{
+ return true;
+}
+#endif
+
+
+void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
+{
+}
+
+/* Accept the work and start compiling */
+bool dvmCompilerDoWork(CompilerWorkOrder *work)
+{
+ bool res;
+
+ if (gDvmJit.codeCacheFull) {
+ return false;
+ }
+
+ switch (work->kind) {
+ case kWorkOrderTrace:
+ /* Start compilation with maximally allowed trace length */
+ res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+ work->bailPtr, 0 /* no hints */);
+ break;
+ case kWorkOrderTraceDebug: {
+ bool oldPrintMe = gDvmJit.printMe;
+ gDvmJit.printMe = true;
+ /* Start compilation with maximally allowed trace length */
+ res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+ work->bailPtr, 0 /* no hints */);
+ gDvmJit.printMe = oldPrintMe;
+ break;
+ }
+ default:
+ res = false;
+ LOGE("Jit: unknown work order type");
+ assert(0); // Bail if debug build, discard otherwise
+ }
+ return res;
+}
+
+/* Architectural-specific debugging helpers go here */
+void dvmCompilerArchDump(void)
+{
+ /* Print compiled opcode in this VM instance */
+ int i, start, streak;
+ char buf[1024];
+
+ streak = i = 0;
+ buf[0] = 0;
+ while (opcodeCoverage[i] == 0 && i < 256) {
+ i++;
+ }
+ if (i == 256) {
+ return;
+ }
+ for (start = i++, streak = 1; i < 256; i++) {
+ if (opcodeCoverage[i]) {
+ streak++;
+ } else {
+ if (streak == 1) {
+ sprintf(buf+strlen(buf), "%x,", start);
+ } else {
+ sprintf(buf+strlen(buf), "%x-%x,", start, start + streak - 1);
+ }
+ streak = 0;
+ while (opcodeCoverage[i] == 0 && i < 256) {
+ i++;
+ }
+ if (i < 256) {
+ streak = 1;
+ start = i;
+ }
+ }
+ }
+ if (streak) {
+ if (streak == 1) {
+ sprintf(buf+strlen(buf), "%x", start);
+ } else {
+ sprintf(buf+strlen(buf), "%x-%x", start, start + streak - 1);
+ }
+ }
+ if (strlen(buf)) {
+ LOGD("dalvik.vm.jit.op = %s", buf);
+ }
+}
+
+/* Common initialization routine for an architecture family */
+bool dvmCompilerArchInit()
+{
+ return dvmCompilerArchVariantInit();
+}
+
+void *dvmCompilerGetInterpretTemplate()
+{
+ return (void*) ((int)gDvmJit.codeCache +
+ templateEntryOffsets[TEMPLATE_INTERPRET]);
+}
+
+void dvmCompilerInitializeRegAlloc(CompilationUnit *cUnit)
+{
+}
diff --git a/vm/compiler/codegen/x86/X86LIR.h b/vm/compiler/codegen/x86/X86LIR.h
new file mode 100644
index 0000000..969c371
--- /dev/null
+++ b/vm/compiler/codegen/x86/X86LIR.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Dalvik.h"
+#include "compiler/CompilerInternals.h"
+
+#ifndef _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
+#define _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
+
+/*
+ * For both JIT & interpreter:
+ * esi is Dalvik FP
+ * ebp is native FP
+ * esp is native SP
+ *
+ * For interpreter:
+ * edx is Dalvik PC (rPC)
+ * ebx is rINST
+ *
+ * For JIT:
+ * eax, edx, ecx are scratch & caller-save
+ * ebx, edi are scratch & callee-save
+ *
+ * Calling conventions:
+ * 32-bit return in eax
+ * 64-bit return in edx:eax
+ * fp on top of fp stack st(0)
+ * Parameters passed on stack, pushed left to right
+ * On entry to target, first parm is at 4(%esp).
+ * For performance, we'll maintain 16-byte stack alignment
+ *
+ * When transitioning from code cache to interp:
+ * materialize Dalvik PC of target in rPC/%edx
+ * Preload rINST/%ebx such that high 24 bits are zero and
+ * bl contains the non-opcode 8-bits of the 16-bit Dalvik
+ * instruction at (rPC)
+ */
+
+/* Keys for target-specific scheduling and other optimizations here */
+typedef enum X86TargetOptHints {
+ kMaxHoistDistance,
+} X86TargetOptHints;
+
+ /*
+ * Data structure tracking the mapping between a Dalvik register (pair) and a
+ * native register (pair). The idea is to reuse the previously loaded value
+ * if possible, otherwise to keep the value in a native register as long as
+ * possible.
+ */
+typedef struct RegisterInfo {
+ int reg; // Reg number
+ bool inUse; // Has it been allocated?
+ bool pair; // Part of a register pair?
+ int partner; // If pair, other reg of pair
+ bool live; // Is there an associated SSA name?
+ bool dirty; // If live, is it dirty?
+ int sReg; // Name of live value
+ struct LIR *defStart; // Starting inst in last def sequence
+ struct LIR *defEnd; // Ending inst in last def sequence
+} RegisterInfo;
+
+typedef struct RegisterPool {
+ BitVector *nullCheckedRegs; // Track which registers have been null-checked
+ int numCoreTemps;
+ RegisterInfo *coreTemps;
+ int nextCoreTemp;
+ int numFPTemps;
+ RegisterInfo *FPTemps;
+ int nextFPTemp;
+ int numCoreRegs;
+ RegisterInfo *coreRegs;
+ int numFPRegs;
+ RegisterInfo *FPRegs;
+} RegisterPool;
+
+typedef enum OpSize {
+ kWord,
+ kLong,
+ kSingle,
+ kDouble,
+ kUnsignedHalf,
+ kSignedHalf,
+ kUnsignedByte,
+ kSignedByte,
+} OpSize;
+
+typedef enum OpKind {
+ kOpMov,
+ kOpMvn,
+ kOpCmp,
+ kOpLsl,
+ kOpLsr,
+ kOpAsr,
+ kOpRor,
+ kOpNot,
+ kOpAnd,
+ kOpOr,
+ kOpXor,
+ kOpNeg,
+ kOpAdd,
+ kOpAdc,
+ kOpSub,
+ kOpSbc,
+ kOpRsub,
+ kOpMul,
+ kOpDiv,
+ kOpRem,
+ kOpBic,
+ kOpCmn,
+ kOpTst,
+ kOpBkpt,
+ kOpBlx,
+ kOpPush,
+ kOpPop,
+ kOp2Char,
+ kOp2Short,
+ kOp2Byte,
+ kOpCondBr,
+ kOpUncondBr,
+} OpKind;
+
+typedef struct X86LIR {
+ LIR generic;
+ //X86OpCode opCode;
+ int operands[4]; // [0..3] = [dest, src1, src2, extra]
+ bool isNop; // LIR is optimized away
+ bool branchInsertSV;// mark for insertion of branch before this instruction,
+ // used to identify mem ops for self verification mode
+ int age; // default is 0, set lazily by the optimizer
+ int aliasInfo; // For Dalvik register access & litpool disambiguation
+ u8 useMask; // Resource mask for use
+ u8 defMask; // Resource mask for def
+} X86LIR;
+
+/* Utility macros to traverse the LIR/X86LIR list */
+#define NEXT_LIR(lir) ((X86LIR *) lir->generic.next)
+#define PREV_LIR(lir) ((X86LIR *) lir->generic.prev)
+
+#define NEXT_LIR_LVALUE(lir) (lir)->generic.next
+#define PREV_LIR_LVALUE(lir) (lir)->generic.prev
+
+#define CHAIN_CELL_OFFSET_TAG 0xcdab
+
+#define CHAIN_CELL_NORMAL_SIZE 12
+#define CHAIN_CELL_PREDICTED_SIZE 16
+
+#endif /* _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H */
diff --git a/vm/compiler/codegen/x86/ia32/ArchVariant.c b/vm/compiler/codegen/x86/ia32/ArchVariant.c
new file mode 100644
index 0000000..2c0dfcb
--- /dev/null
+++ b/vm/compiler/codegen/x86/ia32/ArchVariant.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file is included by Codegen-x86.c, and implements architecture
+ * variant-specific code.
+ */
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+JitInstructionSetType dvmCompilerInstructionSet(void)
+{
+ return DALVIK_JIT_IA32;
+}
+
+/* Architecture-specific initializations and checks go here */
+bool dvmCompilerArchVariantInit(void)
+{
+ /* First, declare dvmCompiler_TEMPLATE_XXX for each template */
+#define JIT_TEMPLATE(X) extern void dvmCompiler_TEMPLATE_##X();
+#include "../../../template/ia32/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+ int i = 0;
+ extern void dvmCompilerTemplateStart(void);
+
+ /*
+ * Then, populate the templateEntryOffsets array with the offsets from the
+ * the dvmCompilerTemplateStart symbol for each template.
+ */
+#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
+ (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
+#include "../../../template/ia32/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+ /* Target-specific configuration */
+ gDvmJit.jitTableSize = 1 << 9; // 512
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+ gDvmJit.threshold = 200;
+ gDvmJit.codeCacheSize = 512*1024;
+
+#if defined(WITH_SELF_VERIFICATION)
+ /* Force into blocking mode */
+ gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
+#endif
+
+ /* Codegen-specific assumptions */
+ assert(offsetof(ClassObject, vtable) < 128 &&
+ (offsetof(ClassObject, vtable) & 0x3) == 0);
+ assert(offsetof(ArrayObject, length) < 128 &&
+ (offsetof(ArrayObject, length) & 0x3) == 0);
+ assert(offsetof(ArrayObject, contents) < 256);
+
+ /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
+ assert(sizeof(StackSaveArea) < 236);
+
+ /*
+ * EA is calculated by doing "Rn + imm5 << 2", make sure that the last
+ * offset from the struct is less than 128.
+ */
+ assert((offsetof(InterpState, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) <= 128);
+ return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOGE("Unknown target optimization hint key: %d",key);
+ res = 0;
+ }
+ return res;
+}
+
+void dvmCompilerGenMemBarrier(CompilationUnit *cUnit)
+{
+}
diff --git a/vm/compiler/codegen/x86/ia32/ArchVariant.h b/vm/compiler/codegen/x86/ia32/ArchVariant.h
new file mode 100644
index 0000000..ac4293f
--- /dev/null
+++ b/vm/compiler/codegen/x86/ia32/ArchVariant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DALVIK_VM_COMPILER_CODEGEN_X86_IA32_ARCHVARIANT_H
+#define _DALVIK_VM_COMPILER_CODEGEN_X86_IA32_ARCHVARIANT_H
+
+/* Create the TemplateOpcode enum */
+#define JIT_TEMPLATE(X) TEMPLATE_##X,
+typedef enum {
+#include "../../../template/ia32/TemplateOpList.h"
+/*
+ * For example,
+ * TEMPLATE_CMP_LONG,
+ * TEMPLATE_RETURN,
+ * ...
+ */
+ TEMPLATE_LAST_MARK,
+} TemplateOpCode;
+#undef JIT_TEMPLATE
+
+#endif /* _DALVIK_VM_COMPILER_CODEGEN_X86_IA32_ARCHVARIANT_H */
diff --git a/vm/compiler/codegen/x86/ia32/CallingConvention.S b/vm/compiler/codegen/x86/ia32/CallingConvention.S
new file mode 100644
index 0000000..cc4187a
--- /dev/null
+++ b/vm/compiler/codegen/x86/ia32/CallingConvention.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Save & restore for callee-save FP registers.
+ * On entry:
+ * tos : pointer to save area of JIT_CALLEE_SAVE_WORD_SIZE
+ */
+ .text
+ .align 2
+ .global dvmJitCalleeSave
+ .type dvmJitCalleeSave, %function
+dvmJitCalleeSave:
+ ret
+
+ .global dvmJitCalleeRestore
+ .type dvmJitCalleeRestore, %function
+dvmJitCalleeRestore:
+ ret
diff --git a/vm/compiler/codegen/x86/ia32/Codegen.c b/vm/compiler/codegen/x86/ia32/Codegen.c
new file mode 100644
index 0000000..e86bacc
--- /dev/null
+++ b/vm/compiler/codegen/x86/ia32/Codegen.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define _CODEGEN_C
+#define _IA32
+
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "libdex/OpCode.h"
+#include "libdex/OpCodeNames.h"
+#include "compiler/CompilerInternals.h"
+#include "compiler/codegen/x86/X86LIR.h"
+#include "mterp/common/FindInterface.h"
+//#include "compiler/codegen/x86/Ralloc.h"
+#include "compiler/codegen/x86/Codegen.h"
+#include "compiler/Loop.h"
+#include "ArchVariant.h"
+
+/* Architectural independent building blocks */
+//#include "../CodegenCommon.c"
+
+/* Architectural independent building blocks */
+//#include "../Thumb/Factory.c"
+/* Factory utilities dependent on arch-specific features */
+//#include "../CodegenFactory.c"
+
+/* ia32 register allocation */
+//#include "../ia32/Ralloc.c"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../CodegenDriver.c"
+
+/* Architecture manifest */
+#include "ArchVariant.c"
diff --git a/vm/compiler/template/config-ia32 b/vm/compiler/template/config-ia32
new file mode 100644
index 0000000..5709017
--- /dev/null
+++ b/vm/compiler/template/config-ia32
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv5TE architecture targets.
+#
+
+# file header and basic definitions
+#import c/header.c
+import ia32/header.S
+
+# C pre-processor defines for stub C instructions
+#import cstubs/stubdefs.c
+
+# highly-platform-specific defs
+import ia32/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+#import c/opcommon.c
+
+# opcode list; argument to op-start is default directory
+op-start ia32
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.c
+
+# end of defs; include this when cstubs/stubdefs.c is included
+#import cstubs/enddefs.c
+
+# common subroutines for asm
+import ia32/footer.S
diff --git a/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S b/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S
new file mode 100644
index 0000000..4c98917
--- /dev/null
+++ b/vm/compiler/template/ia32/TEMPLATE_INTERPRET.S
@@ -0,0 +1,27 @@
+ /*
+ * TODO: figure out how best to do this on x86, as we don't have
+ * an lr equivalent and probably don't want to push.
+ *
+ * This handler transfers control to the interpeter without performing
+ * any lookups. It may be called either as part of a normal chaining
+ * operation, or from the transition code in header.S. We distinquish
+ * the two cases by looking at the link register. If called from a
+ * translation chain, it will point to the chaining Dalvik PC -3.
+ * On entry:
+ * lr - if NULL:
+ * r1 - the Dalvik PC to begin interpretation.
+ * else
+ * [lr, #3] contains Dalvik PC to begin interpretation
+ * rGLUE - pointer to interpState
+ * rFP - Dalvik frame pointer
+ *
+ *cmp lr, #0
+ *ldrne r1,[lr, #3]
+ *ldr r2, .LinterpPunt
+ *mov r0, r1 @ set Dalvik PC
+ *bx r2
+ *@ doesn't return
+ */
+
+.LinterpPunt:
+ .long dvmJitToInterpPunt
diff --git a/vm/compiler/template/ia32/TemplateOpList.h b/vm/compiler/template/ia32/TemplateOpList.h
new file mode 100644
index 0000000..a5000da
--- /dev/null
+++ b/vm/compiler/template/ia32/TemplateOpList.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Dalvik opcode list that uses additional templates to complete JIT execution.
+ */
+#ifndef JIT_TEMPLATE
+#define JIT_TEMPLATE(X)
+#endif
+
+JIT_TEMPLATE(INTERPRET)
diff --git a/vm/compiler/template/ia32/footer.S b/vm/compiler/template/ia32/footer.S
new file mode 100644
index 0000000..1b1a1ae
--- /dev/null
+++ b/vm/compiler/template/ia32/footer.S
@@ -0,0 +1,21 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 4
+/*
+ * FIXME - need a cacheflush for x86
+ */
+ .global cacheflush
+cacheflush:
+ movl $$0xdeadf0f0, %eax
+ call *%eax
+
+
+ .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
diff --git a/vm/compiler/template/ia32/header.S b/vm/compiler/template/ia32/header.S
new file mode 100644
index 0000000..57f5a5b
--- /dev/null
+++ b/vm/compiler/template/ia32/header.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
diff --git a/vm/compiler/template/ia32/platform.S b/vm/compiler/template/ia32/platform.S
new file mode 100644
index 0000000..a84e62d
--- /dev/null
+++ b/vm/compiler/template/ia32/platform.S
@@ -0,0 +1,7 @@
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
+
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
new file mode 100644
index 0000000..6ccb067
--- /dev/null
+++ b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
@@ -0,0 +1,104 @@
+/*
+ * This file was generated automatically by gen-template.py for 'ia32'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: ia32/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
+
+/* File: ia32/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
+
+
+
+ .global dvmCompilerTemplateStart
+ .type dvmCompilerTemplateStart, %function
+ .text
+
+dvmCompilerTemplateStart:
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INTERPRET
+dvmCompiler_TEMPLATE_INTERPRET:
+/* File: ia32/TEMPLATE_INTERPRET.S */
+ /*
+ * TODO: figure out how best to do this on x86, as we don't have
+ * an lr equivalent and probably don't want to push.
+ *
+ * This handler transfers control to the interpeter without performing
+ * any lookups. It may be called either as part of a normal chaining
+ * operation, or from the transition code in header.S. We distinquish
+ * the two cases by looking at the link register. If called from a
+ * translation chain, it will point to the chaining Dalvik PC -3.
+ * On entry:
+ * lr - if NULL:
+ * r1 - the Dalvik PC to begin interpretation.
+ * else
+ * [lr, #3] contains Dalvik PC to begin interpretation
+ * rGLUE - pointer to interpState
+ * rFP - Dalvik frame pointer
+ *
+ *cmp lr, #0
+ *ldrne r1,[lr, #3]
+ *ldr r2, .LinterpPunt
+ *mov r0, r1 @ set Dalvik PC
+ *bx r2
+ *@ doesn't return
+ */
+
+.LinterpPunt:
+ .long dvmJitToInterpPunt
+
+ .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
+/* File: ia32/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 4
+/*
+ * FIXME - need a cacheflush for x86
+ */
+ .global cacheflush
+cacheflush:
+ movl $0xdeadf0f0, %eax
+ call *%eax
+
+
+ .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
+
diff --git a/vm/compiler/template/rebuild.sh b/vm/compiler/template/rebuild.sh
index 8c47dd7..f04d097 100755
--- a/vm/compiler/template/rebuild.sh
+++ b/vm/compiler/template/rebuild.sh
@@ -19,4 +19,4 @@
# generated as part of the build.
#
set -e
-for arch in armv5te armv5te-vfp armv7-a armv7-a-neon; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
+for arch in ia32 armv5te armv5te-vfp armv7-a armv7-a-neon; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
diff --git a/vm/mterp/cstubs/stubdefs.c b/vm/mterp/cstubs/stubdefs.c
index bf870c6..9911ce1 100644
--- a/vm/mterp/cstubs/stubdefs.c
+++ b/vm/mterp/cstubs/stubdefs.c
@@ -14,12 +14,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -43,11 +46,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
index 82fd5fb..2325708 100644
--- a/vm/mterp/out/InterpAsm-x86.S
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -8908,6 +8908,29 @@
* Common subroutines and data.
*/
+#if defined(WITH_JIT)
+/*
+ * Placeholder entries for x86 JIT
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ jmp common_abort
+#endif
+
/*
* Common code when a backwards branch is taken
*
diff --git a/vm/mterp/out/InterpC-allstubs.c b/vm/mterp/out/InterpC-allstubs.c
index 6c1d935..b3e42c5 100644
--- a/vm/mterp/out/InterpC-allstubs.c
+++ b/vm/mterp/out/InterpC-allstubs.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-armv4t.c b/vm/mterp/out/InterpC-armv4t.c
index 2e7716b..6184760 100644
--- a/vm/mterp/out/InterpC-armv4t.c
+++ b/vm/mterp/out/InterpC-armv4t.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-armv5te-vfp.c b/vm/mterp/out/InterpC-armv5te-vfp.c
index 48b8dbd..b07eeed 100644
--- a/vm/mterp/out/InterpC-armv5te-vfp.c
+++ b/vm/mterp/out/InterpC-armv5te-vfp.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-armv5te.c b/vm/mterp/out/InterpC-armv5te.c
index dfadf21..40679ac 100644
--- a/vm/mterp/out/InterpC-armv5te.c
+++ b/vm/mterp/out/InterpC-armv5te.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-armv7-a-neon.c b/vm/mterp/out/InterpC-armv7-a-neon.c
index adccb2d..d3f653e 100644
--- a/vm/mterp/out/InterpC-armv7-a-neon.c
+++ b/vm/mterp/out/InterpC-armv7-a-neon.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-armv7-a.c b/vm/mterp/out/InterpC-armv7-a.c
index d40fd7c..832f090 100644
--- a/vm/mterp/out/InterpC-armv7-a.c
+++ b/vm/mterp/out/InterpC-armv7-a.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-x86-atom.c b/vm/mterp/out/InterpC-x86-atom.c
index dfcc8ad..d192ddf 100644
--- a/vm/mterp/out/InterpC-x86-atom.c
+++ b/vm/mterp/out/InterpC-x86-atom.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/out/InterpC-x86.c b/vm/mterp/out/InterpC-x86.c
index 146f4eb..dc1e06d 100644
--- a/vm/mterp/out/InterpC-x86.c
+++ b/vm/mterp/out/InterpC-x86.c
@@ -428,12 +428,15 @@
#define GOTO_TARGET_DECL(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__);
+/* (void)xxx to quiet unused variable compiler warnings. */
#define GOTO_TARGET(_target, ...) \
void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \
u2 ref, vsrc1, vsrc2, vdst; \
u2 inst = FETCH(0); \
const Method* methodToCall; \
- StackSaveArea* debugSaveArea;
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
#define GOTO_TARGET_END }
@@ -457,11 +460,13 @@
* Opcode handler framing macros. Here, each opcode is a separate function
* that takes a "glue" argument and returns void. We can't declare
* these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
*/
#define HANDLE_OPCODE(_op) \
void dvmMterp_##_op(MterpGlue* glue) { \
u2 ref, vsrc1, vsrc2, vdst; \
- u2 inst = FETCH(0);
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
#define OP_END }
diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S
index d43a662..d276183 100644
--- a/vm/mterp/x86/footer.S
+++ b/vm/mterp/x86/footer.S
@@ -17,6 +17,29 @@
* Common subroutines and data.
*/
+#if defined(WITH_JIT)
+/*
+ * Placeholder entries for x86 JIT
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ jmp common_abort
+#endif
+
/*
* Common code when a backwards branch is taken
*