Merge "Don't duplicate annotation set ref lists when merging."
diff --git a/tests/080-oom-throw/expected.txt b/tests/080-oom-throw/expected.txt
index 811f68c..73cc0d8 100644
--- a/tests/080-oom-throw/expected.txt
+++ b/tests/080-oom-throw/expected.txt
@@ -1,2 +1,2 @@
-Array allocation failed
-Instance allocation failed
+NEW_ARRAY correctly threw OOME
+NEW_INSTANCE correctly threw OOME
diff --git a/tests/080-oom-throw/src/Main.java b/tests/080-oom-throw/src/Main.java
index 3d75f3d..3ffe2f3 100644
--- a/tests/080-oom-throw/src/Main.java
+++ b/tests/080-oom-throw/src/Main.java
@@ -16,64 +16,61 @@
 
 public class Main {
     static class ArrayMemEater {
-        static int blowup(char[][] holder, int size) {
-            int i = 0;
+        static boolean sawOome;
+
+        static void blowup(char[][] holder) {
             try {
-                for ( ; i < size; i++)
-                    holder[i] = new char[128];
+                for (int i = 0; i < holder.length; ++i) {
+                    holder[i] = new char[128 * 1024];
+                }
             } catch (OutOfMemoryError oome) {
-                return i;
+                ArrayMemEater.sawOome = true;
             }
-
-            return size;
-        }
-
-        static void confuseCompilerOptimization(char[][] holder) {
         }
     }
 
     static class InstanceMemEater {
+        static boolean sawOome;
+
         InstanceMemEater next;
-        double d1, d2, d3, d4, d5, d6, d7, d8;
+        double d1, d2, d3, d4, d5, d6, d7, d8; // Bloat this object so we fill the heap faster.
 
-        static InstanceMemEater blowup() {
-            InstanceMemEater memEater;
+        static InstanceMemEater allocate() {
             try {
-                memEater = new InstanceMemEater();
+                return new InstanceMemEater();
             } catch (OutOfMemoryError e) {
-                memEater = null;
+                InstanceMemEater.sawOome = true;
+                return null;
             }
-            return memEater;
         }
 
-        static void confuseCompilerOptimization(InstanceMemEater memEater) {
+        static void confuseCompilerOptimization(InstanceMemEater instance) {
         }
     }
 
-    static void triggerArrayOOM() {
-        int size = 1 * 1024 * 1024;
-        char[][] holder = new char[size][];
-
-        int count = ArrayMemEater.blowup(holder, size);
-        ArrayMemEater.confuseCompilerOptimization(holder);
-        if (count < size) {
-            System.out.println("Array allocation failed");
-        }
+    static boolean triggerArrayOOM() {
+        ArrayMemEater.blowup(new char[1 * 1024 * 1024][]);
+        return ArrayMemEater.sawOome;
     }
 
-    static void triggerInstanceOOM() {
-        InstanceMemEater memEater = InstanceMemEater.blowup();
+    static boolean triggerInstanceOOM() {
+        InstanceMemEater memEater = InstanceMemEater.allocate();
         InstanceMemEater lastMemEater = memEater;
         do {
-            lastMemEater.next = InstanceMemEater.blowup();
+            lastMemEater.next = InstanceMemEater.allocate();
             lastMemEater = lastMemEater.next;
         } while (lastMemEater != null);
         memEater.confuseCompilerOptimization(memEater);
-        System.out.println("Instance allocation failed");
+        return InstanceMemEater.sawOome;
     }
 
     public static void main(String[] args) {
-        triggerArrayOOM();
-        triggerInstanceOOM();
+        if (triggerArrayOOM()) {
+            System.out.println("NEW_ARRAY correctly threw OOME");
+        }
+
+        if (triggerInstanceOOM()) {
+            System.out.println("NEW_INSTANCE correctly threw OOME");
+        }
     }
 }
diff --git a/tests/302-float-conversion/expected.txt b/tests/302-float-conversion/expected.txt
new file mode 100644
index 0000000..6939a5c
--- /dev/null
+++ b/tests/302-float-conversion/expected.txt
@@ -0,0 +1 @@
+Result is as expected
diff --git a/tests/302-float-conversion/info.txt b/tests/302-float-conversion/info.txt
new file mode 100644
index 0000000..2b8bc21
--- /dev/null
+++ b/tests/302-float-conversion/info.txt
@@ -0,0 +1,4 @@
+Tests whether constant conversions of double values to long values are
+properly handled by the VM. For example, x86 systems using the x87 stack
+ should not overflow under constant conversions.
+
diff --git a/tests/302-float-conversion/src/Main.java b/tests/302-float-conversion/src/Main.java
new file mode 100644
index 0000000..dc512c5
--- /dev/null
+++ b/tests/302-float-conversion/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+    static final long NUM_ITERATIONS = 50000;
+    static volatile double negInfinity = Double.NEGATIVE_INFINITY;
+
+    public static void main(String args[]) {
+
+        long sumInf = 0;
+        long sumRes = 0;
+
+        for (long i = 0 ; i < NUM_ITERATIONS ; i++) {
+            //Every second iteration, sumInf becomes 0
+            sumInf += (long) negInfinity;
+
+            //Some extra work for compilers to make this
+            //loop seem important
+            if (sumInf == Long.MIN_VALUE) {
+                sumRes++;
+            }
+        }
+
+        if (sumRes == NUM_ITERATIONS / 2) {
+            System.out.println("Result is as expected");
+        } else {
+            System.out.println("Conversions failed over " + NUM_ITERATIONS + " iterations");
+        }
+    }
+}
diff --git a/vm/SignalCatcher.cpp b/vm/SignalCatcher.cpp
index a4beb6b..d4302aa 100644
--- a/vm/SignalCatcher.cpp
+++ b/vm/SignalCatcher.cpp
@@ -191,7 +191,7 @@
             ALOGE("Unable to open stack trace file '%s': %s",
                 gDvm.stackTraceFile, strerror(errno));
         } else {
-            ssize_t actual = write(fd, traceBuf, traceLen);
+            ssize_t actual = TEMP_FAILURE_RETRY(write(fd, traceBuf, traceLen));
             if (actual != (ssize_t) traceLen) {
                 ALOGE("Failed to write stack traces to %s (%d of %zd): %s",
                     gDvm.stackTraceFile, (int) actual, traceLen,
diff --git a/vm/Thread.cpp b/vm/Thread.cpp
index 5ca055f..7c8395e 100644
--- a/vm/Thread.cpp
+++ b/vm/Thread.cpp
@@ -1309,52 +1309,12 @@
          * so use OutOfMemoryError.
          */
 
-#if HAVE_ANDROID_OS
-        struct mallinfo malloc_info;
-        malloc_info = mallinfo();
-        ALOGE("Native heap free: %zd of %zd bytes", malloc_info.fordblks, malloc_info.uordblks);
-#endif
-
-        size_t thread_count = 0;
-        DIR* d = opendir("/proc/self/task");
-        if (d != NULL) {
-            dirent* entry = NULL;
-            while ((entry = readdir(d)) != NULL) {
-                char* end;
-                strtol(entry->d_name, &end, 10);
-                if (!*end) {
-                    ++thread_count;
-                }
-            }
-            closedir(d);
-        }
-
-        ALOGE("pthread_create (%d threads) failed: %s", thread_count, strerror(cc));
-
-        // Super-verbose output to help track down http://b/8470684.
-        size_t map_count = 0;
-        FILE* fp = fopen("/proc/self/maps", "r");
-        if (fp != NULL) {
-            char buf[1024];
-            while (fgets(buf, sizeof(buf), fp) != NULL) {
-                ALOGE("/proc/self/maps: %s", buf);
-                ++map_count;
-            }
-            fclose(fp);
-        }
-
         dvmSetFieldObject(threadObj, gDvm.offJavaLangThread_vmThread, NULL);
 
+        ALOGE("pthread_create (stack size %d bytes) failed: %s", stackSize, strerror(cc));
         dvmThrowExceptionFmt(gDvm.exOutOfMemoryError,
-                             "pthread_create (%d threads, %d map entries, "
-#if HAVE_ANDROID_OS
-                             "%zd free of %zd native heap bytes"
-#endif
-                             ") failed: %s", thread_count, map_count,
-#if HAVE_ANDROID_OS
-                             malloc_info.fordblks, malloc_info.uordblks,
-#endif
-                             strerror(cc));
+                             "pthread_create (stack size %d bytes) failed: %s",
+                             stackSize, strerror(cc));
         goto fail;
     }
 
@@ -3309,6 +3269,36 @@
     }
 }
 
+static bool shouldShowNativeStack(Thread* thread) {
+    // In native code somewhere in the VM? That's interesting.
+    if (thread->status == THREAD_VMWAIT) {
+        return true;
+    }
+
+    // In an Object.wait variant? That's not interesting.
+    if (thread->status == THREAD_TIMED_WAIT || thread->status == THREAD_WAIT) {
+        return false;
+    }
+
+    // The Signal Catcher thread? That's not interesting.
+    if (thread->status == THREAD_RUNNING) {
+        return false;
+    }
+
+    // In some other native method? That's interesting.
+    // We don't just check THREAD_NATIVE because native methods will be in
+    // state THREAD_SUSPENDED if they're calling back into the VM, or THREAD_MONITOR
+    // if they're blocked on a monitor, or one of the thread-startup states if
+    // it's early enough in their life cycle (http://b/7432159).
+    u4* fp = thread->interpSave.curFrame;
+    if (fp == NULL) {
+        // The thread has no managed frames, so native frames are all there is.
+        return true;
+    }
+    const Method* currentMethod = SAVEAREA_FROM_FP(fp)->method;
+    return currentMethod != NULL && dvmIsNativeMethod(currentMethod);
+}
+
 /*
  * Print information about the specified thread.
  *
@@ -3387,16 +3377,7 @@
 
     dumpSchedStat(target, thread->systemTid);
 
-    /*
-     * Grab the native stack, if possible.
-     *
-     * The native thread is still running, even if the Dalvik side is
-     * suspended.  This means the thread can move itself out of NATIVE state
-     * while we're in here, shifting to SUSPENDED after a brief moment at
-     * RUNNING.  At that point the native stack isn't all that interesting,
-     * though, so if we fail to dump it there's little lost.
-     */
-    if (thread->status == THREAD_NATIVE || thread->status == THREAD_VMWAIT) {
+    if (shouldShowNativeStack(thread)) {
         dvmDumpNativeStack(target, thread->systemTid);
     }
 
diff --git a/vm/alloc/HeapSource.cpp b/vm/alloc/HeapSource.cpp
index ee40af82..93cdd2f 100644
--- a/vm/alloc/HeapSource.cpp
+++ b/vm/alloc/HeapSource.cpp
@@ -457,6 +457,14 @@
             dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
         }
 
+        // Many JDWP requests cause allocation. We can't take the heap lock and wait to
+        // transition to runnable so we can start a GC if a debugger is connected, because
+        // we don't know that the JDWP thread isn't about to allocate and require the
+        // heap lock itself, leading to deadlock. http://b/8191824.
+        if (gDvm.debuggerConnected) {
+            continue;
+        }
+
         dvmLockHeap();
         /*
          * Another thread may have started a concurrent garbage
diff --git a/vm/compiler/Compiler.cpp b/vm/compiler/Compiler.cpp
index cdd62cc..188027f 100644
--- a/vm/compiler/Compiler.cpp
+++ b/vm/compiler/Compiler.cpp
@@ -448,7 +448,7 @@
     pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
     if (!pJitProfTable) {
         ALOGE("jit prof table allocation failed");
-        free(pJitProfTable);
+        free(pJitTable);
         dvmUnlockMutex(&gDvmJit.tableLock);
         goto fail;
     }
@@ -464,6 +464,8 @@
                              calloc(1, sizeof(*pJitTraceProfCounters));
     if (!pJitTraceProfCounters) {
         ALOGE("jit trace prof counters allocation failed");
+        free(pJitTable);
+        free(pJitProfTable);
         dvmUnlockMutex(&gDvmJit.tableLock);
         goto fail;
     }
diff --git a/vm/compiler/codegen/x86/CodegenInterface.cpp b/vm/compiler/codegen/x86/CodegenInterface.cpp
index e7dd8af..46f0979 100644
--- a/vm/compiler/codegen/x86/CodegenInterface.cpp
+++ b/vm/compiler/codegen/x86/CodegenInterface.cpp
@@ -70,6 +70,9 @@
     gDvmJit.codeCacheSize = 512*1024;
     gDvmJit.optLevel = kJitOptLevelO1;
 
+    //Disable Method-JIT
+    gDvmJit.disableOpt |= (1 << kMethodJit);
+
 #if defined(WITH_SELF_VERIFICATION)
     /* Force into blocking mode */
     gDvmJit.blockingMode = true;
@@ -324,7 +327,7 @@
         cellAddr->clazz = newContent->clazz;
         //cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
 #endif
-#if defined(IA_JIT_TUNING)
+#if defined(WITH_JIT_TUNING)
         gDvmJit.icPatchInit++;
 #endif
         COMPILER_TRACE_CHAINING(
@@ -717,6 +720,12 @@
 #ifndef PREDICTED_CHAINING
     //assume rPC for callee->insns in %ebx
     scratchRegs[0] = PhysicalReg_EAX;
+#if defined(WITH_JIT_TUNING)
+    /* Predicted chaining is not enabled. Fall back to interpreter and
+     * indicate that predicted chaining was not done.
+     */
+    move_imm_to_reg(OpndSize_32, kInlineCacheMiss, PhysicalReg_EDX, true);
+#endif
     call_dvmJitToInterpTraceSelectNoChain();
 #else
     /* make sure section for predicited chaining cell is 4-byte aligned */
diff --git a/vm/compiler/codegen/x86/LowerAlu.cpp b/vm/compiler/codegen/x86/LowerAlu.cpp
index 2231bac..c8c4d66 100644
--- a/vm/compiler/codegen/x86/LowerAlu.cpp
+++ b/vm/compiler/codegen/x86/LowerAlu.cpp
@@ -291,56 +291,74 @@
         load_fp_stack_VR(OpndSize_32, vB); //flds
     }
 
-    load_fp_stack_global_data_API("valuePosInfLong", OpndSize_64);
+    //Check if it is the special Negative Infinity value
     load_fp_stack_global_data_API("valueNegInfLong", OpndSize_64);
-
-    //ST(0) ST(1) ST(2) --> LintMin LintMax value
-    compare_fp_stack(true, 2, false/*isDouble*/); //ST(2)
-    //ST(0) ST(1) --> LintMax value
+    //Stack status: ST(0) ST(1) --> LlongMin value
+    compare_fp_stack(true, 1, false/*isDouble*/); // Pops ST(1)
     conditional_jump(Condition_AE, ".float_to_long_negInf", true);
     rememberState(1);
-    compare_fp_stack(true, 1, false/*isDouble*/); //ST(1)
+
+    //Check if it is the special Positive Infinity value
+    load_fp_stack_global_data_API("valuePosInfLong", OpndSize_64);
+    //Stack status: ST(0) ST(1) --> LlongMax value
+    compare_fp_stack(true, 1, false/*isDouble*/); // Pops ST(1)
     rememberState(2);
-    //ST(0) --> value
     conditional_jump(Condition_C, ".float_to_long_nanInf", true);
-    //fnstcw, orw, fldcw, xorw
+
+    //Normal Case
+    //We want to truncate to 0 for conversion. That will be rounding mode 0x11
     load_effective_addr(-2, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
     store_fpu_cw(false/*checkException*/, 0, PhysicalReg_ESP, true);
+    //Change control word to rounding mode 11:
     alu_binary_imm_mem(OpndSize_16, or_opc, 0xc00, 0, PhysicalReg_ESP, true);
+    //Load the control word
     load_fpu_cw(0, PhysicalReg_ESP, true);
+    //Reset the control word
     alu_binary_imm_mem(OpndSize_16, xor_opc, 0xc00, 0, PhysicalReg_ESP, true);
+    //Perform the actual conversion
     store_int_fp_stack_VR(true/*pop*/, OpndSize_64, vA); //fistpll
-    //fldcw
+    // Restore the original control word
     load_fpu_cw(0, PhysicalReg_ESP, true);
     load_effective_addr(2, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
     rememberState(3);
+    /* NOTE: We do not need to pop out the original value we pushed
+     * since load_fpu_cw above already clears the stack for
+     * normal values.
+     */
     unconditional_jump(".float_to_long_okay", true);
+
+    //We can be here for positive infinity or NaN. Check parity bit
     insertLabel(".float_to_long_nanInf", true);
     conditional_jump(Condition_NP, ".float_to_long_posInf", true);
-    //fstpl??
     goToState(2);
-
+    //Save corresponding Long NaN value
     load_global_data_API("valueNanLong", OpndSize_64, 1, false);
-
     set_virtual_reg(vA, OpndSize_64, 1, false);
     transferToState(3);
+    //Pop out the original value we pushed
+    compare_fp_stack(true, 0, false/*isDouble*/); //ST(0)
     unconditional_jump(".float_to_long_okay", true);
-    insertLabel(".float_to_long_posInf", true);
-    //fstpl
-    goToState(2);
 
+    insertLabel(".float_to_long_posInf", true);
+    goToState(2);
+    //Save corresponding Long Positive Infinity value
     load_global_data_API("valuePosInfLong", OpndSize_64, 2, false);
     set_virtual_reg(vA, OpndSize_64, 2, false);
     transferToState(3);
+    //Pop out the original value we pushed
+    compare_fp_stack(true, 0, false/*isDouble*/); //ST(0)
     unconditional_jump(".float_to_long_okay", true);
+
     insertLabel(".float_to_long_negInf", true);
     //fstpl
-    //fstpl
     goToState(1);
-
+    //Load corresponding Long Negative Infinity value
     load_global_data_API("valueNegInfLong", OpndSize_64, 3, false);
     set_virtual_reg(vA, OpndSize_64, 3, false);
     transferToState(3);
+    //Pop out the original value we pushed
+    compare_fp_stack(true, 0, false/*isDouble*/); //ST(0)
+
     insertLabel(".float_to_long_okay", true);
     return 0;
 }
diff --git a/vm/compiler/codegen/x86/LowerGetPut.cpp b/vm/compiler/codegen/x86/LowerGetPut.cpp
index c87b174..be519b1 100644
--- a/vm/compiler/codegen/x86/LowerGetPut.cpp
+++ b/vm/compiler/codegen/x86/LowerGetPut.cpp
@@ -668,7 +668,17 @@
     void *fieldPtr = (void*)
         (currentMethod->clazz->pDvmDex->pResFields[tmp]);
 #endif
-    assert(fieldPtr != NULL);
+
+    /* Usually, fieldPtr should not be null. The interpreter should resolve
+     * it before we come here, or not allow this opcode in a trace. However,
+     * we can be in a loop trace and this opcode might have been picked up
+     * by exhaustTrace. Sending a -1 here will terminate the loop formation
+     * and fall back to normal trace, which will not have this opcode.
+     */
+    if (!fieldPtr) {
+        return -1;
+    }
+
     move_imm_to_reg(OpndSize_32, (int)fieldPtr, PhysicalReg_EAX, true);
     if(flag == SGET) {
         move_mem_to_reg(OpndSize_32, offStaticField_value, PhysicalReg_EAX, true, 7, false); //access field
diff --git a/vm/compiler/codegen/x86/LowerInvoke.cpp b/vm/compiler/codegen/x86/LowerInvoke.cpp
index 3d02190..10bc197 100644
--- a/vm/compiler/codegen/x86/LowerInvoke.cpp
+++ b/vm/compiler/codegen/x86/LowerInvoke.cpp
@@ -833,6 +833,12 @@
         if(callNoChain) {
             scratchRegs[0] = PhysicalReg_EAX;
             load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
+#if defined(WITH_JIT_TUNING)
+            /* Predicted chaining failed. Fall back to interpreter and indicate
+             * inline cache miss.
+             */
+            move_imm_to_reg(OpndSize_32, kInlineCacheMiss, PhysicalReg_EDX, true);
+#endif
             call_dvmJitToInterpTraceSelectNoChain(); //input: rPC in %ebx
         } else {
             //jump to the stub at (%esp)
@@ -906,6 +912,11 @@
         //move rPC by 6 (3 bytecode units for INVOKE)
         alu_binary_imm_reg(OpndSize_32, add_opc, 6, PhysicalReg_EBX, true);
         scratchRegs[0] = PhysicalReg_EAX;
+#if defined(WITH_JIT_TUNING)
+        /* Return address not in code cache. Indicate that continuing with interpreter
+         */
+        move_imm_to_reg(OpndSize_32, kCallsiteInterpreted, PhysicalReg_EDX, true);
+#endif
         call_dvmJitToInterpTraceSelectNoChain(); //rPC in %ebx
     }
     return;
diff --git a/vm/compiler/codegen/x86/LowerJump.cpp b/vm/compiler/codegen/x86/LowerJump.cpp
index 2b10d6b..d4b0df3 100644
--- a/vm/compiler/codegen/x86/LowerJump.cpp
+++ b/vm/compiler/codegen/x86/LowerJump.cpp
@@ -1163,6 +1163,13 @@
     //get rPC, %eax has the relative PC offset
     alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
     scratchRegs[0] = PhysicalReg_SCRATCH_2;
+#if defined(WITH_JIT_TUNING)
+    /* Fall back to interpreter after resolving address of switch target.
+     * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
+     * count the times we return from a Switch
+     */
+    move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
+#endif
     jumpToInterpNoChain();
     rPC += 3;
     return 0;
@@ -1220,6 +1227,13 @@
     //get rPC, %eax has the relative PC offset
     alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
     scratchRegs[0] = PhysicalReg_SCRATCH_2;
+#if defined(WITH_JIT_TUNING)
+    /* Fall back to interpreter after resolving address of switch target.
+     * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
+     * count the times we return from a Switch
+     */
+    move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
+#endif
     jumpToInterpNoChain();
     rPC += 3;
     return 0;
diff --git a/vm/compiler/codegen/x86/LowerReturn.cpp b/vm/compiler/codegen/x86/LowerReturn.cpp
index 928c05c..294d6b5 100644
--- a/vm/compiler/codegen/x86/LowerReturn.cpp
+++ b/vm/compiler/codegen/x86/LowerReturn.cpp
@@ -95,7 +95,11 @@
     typedef void (*vmHelper)(int);
     vmHelper funcPtr = dvmJitToInterpNoChainNoProfile; //%eax is the input
     move_imm_to_reg(OpndSize_32, (int)funcPtr, C_SCRATCH_1, isScratchPhysical);
-
+#if defined(WITH_JIT_TUNING)
+    /* Return address not in code cache. Indicate that continuing with interpreter.
+     */
+    move_imm_to_mem(OpndSize_32, kCallsiteInterpreted, 0, PhysicalReg_ESP, true);
+#endif
     unconditional_jump_reg(C_SCRATCH_1, isScratchPhysical);
     touchEax();
     return 0;
diff --git a/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S b/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
index 8a9b115..6652b71 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_MUL_LONG.S
@@ -22,7 +22,6 @@
     mul     ip, r2, r1                  @  ip<- ZxW
     umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
     mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
-    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
-    mov     r0,r9
-    mov     r1,r10
+    mov     r0, r9
+    add     r1, r2, r10                 @  r1<- r10 + low(ZxW + (YxX))
     bx      lr
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index b10afcf..e8e2d52 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -17,12 +17,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S b/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
index bdfdf28..d970372 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_STRING_INDEXOF.S
@@ -15,22 +15,23 @@
      *    r2:   Starting offset in string data
      */
 
+    ldr    r3, [r0, #STRING_FIELDOFF_VALUE]
     ldr    r7, [r0, #STRING_FIELDOFF_OFFSET]
     ldr    r8, [r0, #STRING_FIELDOFF_COUNT]
-    ldr    r0, [r0, #STRING_FIELDOFF_VALUE]
+
 
     /*
      * At this point, we have:
-     *    r0: object pointer
      *    r1: char to match
      *    r2: starting offset
+     *    r3: object pointer (final result -> r0)
      *    r7: offset
      *    r8: string length
      */
 
      /* Build pointer to start of string data */
-     add   r0, #16
-     add   r0, r0, r7, lsl #1
+     add   r3, #16
+     add   r0, r3, r7, lsl #1
 
      /* Save a copy of starting data in r7 */
      mov   r7, r0
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 27319e7..7ba1596 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -178,12 +178,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@
     mul     ip, r2, r1                  @  ip<- ZxW
     umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
     mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
-    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
-    mov     r0,r9
-    mov     r1,r10
+    mov     r0, r9
+    add     r1, r2, r10                 @  r1<- r10 + low(ZxW + (YxX))
     bx      lr
 
 /* ------------------------------ */
@@ -1265,22 +1264,23 @@
      *    r2:   Starting offset in string data
      */
 
+    ldr    r3, [r0, #STRING_FIELDOFF_VALUE]
     ldr    r7, [r0, #STRING_FIELDOFF_OFFSET]
     ldr    r8, [r0, #STRING_FIELDOFF_COUNT]
-    ldr    r0, [r0, #STRING_FIELDOFF_VALUE]
+
 
     /*
      * At this point, we have:
-     *    r0: object pointer
      *    r1: char to match
      *    r2: starting offset
+     *    r3: object pointer (final result -> r0)
      *    r7: offset
      *    r8: string length
      */
 
      /* Build pointer to start of string data */
-     add   r0, #16
-     add   r0, r0, r7, lsl #1
+     add   r3, #16
+     add   r0, r3, r7, lsl #1
 
      /* Save a copy of starting data in r7 */
      mov   r7, r0
@@ -1516,12 +1516,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 68f6441..7d67595 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -178,12 +178,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
@@ -732,9 +732,8 @@
     mul     ip, r2, r1                  @  ip<- ZxW
     umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
     mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
-    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
-    mov     r0,r9
-    mov     r1,r10
+    mov     r0, r9
+    add     r1, r2, r10                 @  r1<- r10 + low(ZxW + (YxX))
     bx      lr
 
 /* ------------------------------ */
@@ -996,22 +995,23 @@
      *    r2:   Starting offset in string data
      */
 
+    ldr    r3, [r0, #STRING_FIELDOFF_VALUE]
     ldr    r7, [r0, #STRING_FIELDOFF_OFFSET]
     ldr    r8, [r0, #STRING_FIELDOFF_COUNT]
-    ldr    r0, [r0, #STRING_FIELDOFF_VALUE]
+
 
     /*
      * At this point, we have:
-     *    r0: object pointer
      *    r1: char to match
      *    r2: starting offset
+     *    r3: object pointer (final result -> r0)
      *    r7: offset
      *    r8: string length
      */
 
      /* Build pointer to start of string data */
-     add   r0, #16
-     add   r0, r0, r7, lsl #1
+     add   r3, #16
+     add   r0, r3, r7, lsl #1
 
      /* Save a copy of starting data in r7 */
      mov   r7, r0
@@ -1247,12 +1247,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 7573bd8..0dfdd87 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -178,12 +178,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@
     mul     ip, r2, r1                  @  ip<- ZxW
     umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
     mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
-    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
-    mov     r0,r9
-    mov     r1,r10
+    mov     r0, r9
+    add     r1, r2, r10                 @  r1<- r10 + low(ZxW + (YxX))
     bx      lr
 
 /* ------------------------------ */
@@ -1265,22 +1264,23 @@
      *    r2:   Starting offset in string data
      */
 
+    ldr    r3, [r0, #STRING_FIELDOFF_VALUE]
     ldr    r7, [r0, #STRING_FIELDOFF_OFFSET]
     ldr    r8, [r0, #STRING_FIELDOFF_COUNT]
-    ldr    r0, [r0, #STRING_FIELDOFF_VALUE]
+
 
     /*
      * At this point, we have:
-     *    r0: object pointer
      *    r1: char to match
      *    r2: starting offset
+     *    r3: object pointer (final result -> r0)
      *    r7: offset
      *    r8: string length
      */
 
      /* Build pointer to start of string data */
-     add   r0, #16
-     add   r0, r0, r7, lsl #1
+     add   r3, #16
+     add   r0, r3, r7, lsl #1
 
      /* Save a copy of starting data in r7 */
      mov   r7, r0
@@ -1516,12 +1516,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index fd21a0e..7a4fa2c 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -178,12 +178,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
@@ -520,9 +520,8 @@
     mul     ip, r2, r1                  @  ip<- ZxW
     umull   r9, r10, r2, r0             @  r9/r10 <- ZxX
     mla     r2, r0, r3, ip              @  r2<- YxX + (ZxW)
-    add     r10, r2, r10                @  r10<- r10 + low(ZxW + (YxX))
-    mov     r0,r9
-    mov     r1,r10
+    mov     r0, r9
+    add     r1, r2, r10                 @  r1<- r10 + low(ZxW + (YxX))
     bx      lr
 
 /* ------------------------------ */
@@ -1265,22 +1264,23 @@
      *    r2:   Starting offset in string data
      */
 
+    ldr    r3, [r0, #STRING_FIELDOFF_VALUE]
     ldr    r7, [r0, #STRING_FIELDOFF_OFFSET]
     ldr    r8, [r0, #STRING_FIELDOFF_COUNT]
-    ldr    r0, [r0, #STRING_FIELDOFF_VALUE]
+
 
     /*
      * At this point, we have:
-     *    r0: object pointer
      *    r1: char to match
      *    r2: starting offset
+     *    r3: object pointer (final result -> r0)
      *    r7: offset
      *    r8: string length
      */
 
      /* Build pointer to start of string data */
-     add   r0, #16
-     add   r0, r0, r7, lsl #1
+     add   r3, #16
+     add   r0, r3, r7, lsl #1
 
      /* Save a copy of starting data in r7 */
      mov   r7, r0
@@ -1516,12 +1516,12 @@
     ldr     r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
     ldrb    r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
     ldr     rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
 #if !defined(WITH_SELF_VERIFICATION)
     ldr     r9,  [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
 #else
     mov     r9, #0                      @ disable chaining
 #endif
-    ldr     r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
                                         @ r2<- method we're returning to
     cmp     r2, #0                      @ break frame?
 #if !defined(WITH_SELF_VERIFICATION)
diff --git a/vm/jdwp/JdwpAdb.cpp b/vm/jdwp/JdwpAdb.cpp
index 87db1d2..8fb5391 100644
--- a/vm/jdwp/JdwpAdb.cpp
+++ b/vm/jdwp/JdwpAdb.cpp
@@ -345,7 +345,7 @@
 
     if (netState->wakeFds[1] >= 0) {
         ALOGV("+++ writing to wakePipe");
-        write(netState->wakeFds[1], "", 1);
+        TEMP_FAILURE_RETRY(write(netState->wakeFds[1], "", 1));
     }
 }
 
@@ -629,8 +629,8 @@
         }
 
         errno = 0;
-        cc = write(netState->clientSock, netState->inputBuffer,
-                kMagicHandshakeLen);
+        cc = TEMP_FAILURE_RETRY(write(netState->clientSock, netState->inputBuffer,
+                                      kMagicHandshakeLen));
         if (cc != kMagicHandshakeLen) {
             ALOGE("Failed writing handshake bytes: %s (%d of %d)",
                 strerror(errno), cc, (int) kMagicHandshakeLen);
diff --git a/vm/jdwp/JdwpHandler.cpp b/vm/jdwp/JdwpHandler.cpp
index 5ce432c..9126584 100644
--- a/vm/jdwp/JdwpHandler.cpp
+++ b/vm/jdwp/JdwpHandler.cpp
@@ -376,6 +376,7 @@
     ALOGV("  Req to create string '%s'", str);
 
     ObjectId stringId = dvmDbgCreateString(str);
+    free(str);
     if (stringId == 0)
         return ERR_OUT_OF_MEMORY;
 
diff --git a/vm/jdwp/JdwpMain.cpp b/vm/jdwp/JdwpMain.cpp
index 90e4c45..55e278d 100644
--- a/vm/jdwp/JdwpMain.cpp
+++ b/vm/jdwp/JdwpMain.cpp
@@ -45,8 +45,8 @@
 ssize_t JdwpNetStateBase::writePacket(ExpandBuf* pReply)
 {
     dvmDbgLockMutex(&socketLock);
-    ssize_t cc = write(clientSock, expandBufGetBuffer(pReply),
-            expandBufGetLength(pReply));
+    ssize_t cc = TEMP_FAILURE_RETRY(write(clientSock, expandBufGetBuffer(pReply),
+                                          expandBufGetLength(pReply)));
     dvmDbgUnlockMutex(&socketLock);
 
     return cc;
@@ -59,7 +59,7 @@
     int iovcnt)
 {
     dvmDbgLockMutex(&socketLock);
-    ssize_t actual = writev(clientSock, iov, iovcnt);
+    ssize_t actual = TEMP_FAILURE_RETRY(writev(clientSock, iov, iovcnt));
     dvmDbgUnlockMutex(&socketLock);
 
     return actual;
diff --git a/vm/jdwp/JdwpSocket.cpp b/vm/jdwp/JdwpSocket.cpp
index ad0a287..eaea607 100644
--- a/vm/jdwp/JdwpSocket.cpp
+++ b/vm/jdwp/JdwpSocket.cpp
@@ -226,7 +226,7 @@
     /* if we might be sitting in select, kick us loose */
     if (netState->wakePipe[1] >= 0) {
         ALOGV("+++ writing to wakePipe");
-        (void) write(netState->wakePipe[1], "", 1);
+        TEMP_FAILURE_RETRY(write(netState->wakePipe[1], "", 1));
     }
 }
 static void netShutdownExtern(JdwpState* state)
@@ -789,8 +789,8 @@
         }
 
         errno = 0;
-        cc = write(netState->clientSock, netState->inputBuffer,
-                kMagicHandshakeLen);
+        cc = TEMP_FAILURE_RETRY(write(netState->clientSock, netState->inputBuffer,
+                                      kMagicHandshakeLen));
         if (cc != kMagicHandshakeLen) {
             ALOGE("Failed writing handshake bytes: %s (%d of %d)",
                 strerror(errno), cc, (int) kMagicHandshakeLen);
diff --git a/vm/mterp/out/InterpAsm-x86.S b/vm/mterp/out/InterpAsm-x86.S
index 760e674..c87f306 100644
--- a/vm/mterp/out/InterpAsm-x86.S
+++ b/vm/mterp/out/InterpAsm-x86.S
@@ -15754,7 +15754,9 @@
  */
 dvmJitToInterpNoChainNoProfile:
 #if defined(WITH_JIT_TUNING)
+    SPILL_TMP1(%eax)
     call   dvmBumpNoChain
+    UNSPILL_TMP1(%eax)
 #endif
     movl   %eax, rPC
     movl   rSELF, %eax
@@ -15782,6 +15784,7 @@
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
 #if defined(WITH_JIT_TUNING)
+    movl   %edx, OUT_ARG0(%esp)
     call   dvmBumpNoChain
 #endif
     movl   %ebx, rPC
@@ -15895,6 +15898,11 @@
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
 dvmJitToInterpNoChain: #rPC in eax
+#if defined(WITH_JIT_TUNING)
+    SPILL_TMP1(%eax)
+    call   dvmBumpNoChain
+    UNSPILL_TMP1(%eax)
+#endif
     ## TODO, need to clean up stack manipulation ... this isn't signal safe and
     ## doesn't use the calling conventions of header.S
     movl        %eax, rPC
diff --git a/vm/mterp/x86/footer.S b/vm/mterp/x86/footer.S
index 3b5c79e..054dc11 100644
--- a/vm/mterp/x86/footer.S
+++ b/vm/mterp/x86/footer.S
@@ -77,7 +77,9 @@
  */
 dvmJitToInterpNoChainNoProfile:
 #if defined(WITH_JIT_TUNING)
+    SPILL_TMP1(%eax)
     call   dvmBumpNoChain
+    UNSPILL_TMP1(%eax)
 #endif
     movl   %eax, rPC
     movl   rSELF, %eax
@@ -105,6 +107,7 @@
     .global dvmJitToInterpTraceSelectNoChain
 dvmJitToInterpTraceSelectNoChain:
 #if defined(WITH_JIT_TUNING)
+    movl   %edx, OUT_ARG0(%esp)
     call   dvmBumpNoChain
 #endif
     movl   %ebx, rPC
@@ -218,6 +221,11 @@
     .global dvmJitToInterpNoChain
 dvmJitToInterpNoChain:
 dvmJitToInterpNoChain: #rPC in eax
+#if defined(WITH_JIT_TUNING)
+    SPILL_TMP1(%eax)
+    call   dvmBumpNoChain
+    UNSPILL_TMP1(%eax)
+#endif
     ## TODO, need to clean up stack manipulation ... this isn't signal safe and
     ## doesn't use the calling conventions of header.S
     movl        %eax, rPC
diff --git a/vm/native/dalvik_system_DexFile.cpp b/vm/native/dalvik_system_DexFile.cpp
index f328a19..69cb71d 100644
--- a/vm/native/dalvik_system_DexFile.cpp
+++ b/vm/native/dalvik_system_DexFile.cpp
@@ -232,6 +232,7 @@
         free(sourceName);
     }
 
+    free(outputName);
     RETURN_PTR(pDexOrJar);
 }