Merge "Throw an exception in JIT'ed code if dvmFindInterfaceMethodInCache returns NULL" into froyo
diff --git a/vm/Globals.h b/vm/Globals.h
index e27d5d8..fb2518d 100644
--- a/vm/Globals.h
+++ b/vm/Globals.h
@@ -740,6 +740,9 @@
int invokePolymorphic;
int invokeNative;
int returnOp;
+ int icPatchFast;
+ int icPatchQueued;
+ int icPatchDropped;
u8 jitTime;
/* Compiled code cache */
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index 6cb1c77..6a58d8b 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -340,6 +340,7 @@
#if defined(WITH_JIT_TUNING)
/* Track method-level compilation statistics */
gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
+ gDvm.verboseShutdown = true;
#endif
dvmUnlockMutex(&gDvmJit.compilerLock);
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index 493f95e..1951e07 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -1403,31 +1403,54 @@
* Attempt to enqueue a work order to patch an inline cache for a predicted
* chaining cell for virtual/interface calls.
*/
-bool inlineCachePatchEnqueue(PredictedChainingCell *cellAddr,
- PredictedChainingCell *newContent)
+static bool inlineCachePatchEnqueue(PredictedChainingCell *cellAddr,
+ PredictedChainingCell *newContent)
{
bool result = true;
+ /*
+ * Make sure only one thread gets here since updating the cell (ie fast
+ * path and queueing the request (ie the queued path) have to be done
+ * in an atomic fashion.
+ */
dvmLockMutex(&gDvmJit.compilerICPatchLock);
+ /* Fast path for uninitialized chaining cell */
if (cellAddr->clazz == NULL &&
cellAddr->branch == PREDICTED_CHAIN_BX_PAIR_INIT) {
+ cellAddr->method = newContent->method;
+ cellAddr->branch = newContent->branch;
+ cellAddr->counter = newContent->counter;
/*
* The update order matters - make sure clazz is updated last since it
* will bring the uninitialized chaining cell to life.
*/
- cellAddr->method = newContent->method;
- cellAddr->branch = newContent->branch;
- cellAddr->counter = newContent->counter;
+ MEM_BARRIER();
cellAddr->clazz = newContent->clazz;
cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
+#if defined(WITH_JIT_TUNING)
+ gDvmJit.icPatchFast++;
+#endif
}
+ /*
+ * Otherwise the patch request will be queued and handled in the next
+ * GC cycle. At that time all other mutator threads are suspended so
+ * there will be no partial update in the inline cache state.
+ */
else if (gDvmJit.compilerICPatchIndex < COMPILER_IC_PATCH_QUEUE_SIZE) {
int index = gDvmJit.compilerICPatchIndex++;
gDvmJit.compilerICPatchQueue[index].cellAddr = cellAddr;
gDvmJit.compilerICPatchQueue[index].cellContent = *newContent;
- } else {
+#if defined(WITH_JIT_TUNING)
+ gDvmJit.icPatchQueued++;
+#endif
+ }
+ /* Queue is full - just drop this patch request */
+ else {
result = false;
+#if defined(WITH_JIT_TUNING)
+ gDvmJit.icPatchDropped++;
+#endif
}
dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
diff --git a/vm/interp/Jit.c b/vm/interp/Jit.c
index 6ec5584..f475773 100644
--- a/vm/interp/Jit.c
+++ b/vm/interp/Jit.c
@@ -456,12 +456,17 @@
LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
gDvmJit.normalExit, gDvmJit.puntExit);
+
LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
"%d switch overflow",
gDvmJit.noChainExit[kInlineCacheMiss],
gDvmJit.noChainExit[kCallsiteInterpreted],
gDvmJit.noChainExit[kSwitchOverflow]);
+ LOGD("JIT: ICPatch: %d fast, %d queued; %d dropped",
+ gDvmJit.icPatchFast, gDvmJit.icPatchQueued,
+ gDvmJit.icPatchDropped);
+
LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
gDvmJit.invokeNative, gDvmJit.returnOp);