Stamp out some x86/host mode warnings

Nuked a void* cast warnings and moved cacheflush into a target-specific
utility wrapper.

Change-Id: I36c841288b9ec7e03c0cb29b2e89db344f36fad1
diff --git a/vm/compiler/Compiler.c b/vm/compiler/Compiler.c
index fe42f4c..c8ff62e 100644
--- a/vm/compiler/Compiler.c
+++ b/vm/compiler/Compiler.c
@@ -178,8 +178,8 @@
     gDvmJit.codeCacheByteUsed = templateSize;
 
     /* Only flush the part in the code cache that is being used now */
-    cacheflush((intptr_t) gDvmJit.codeCache,
-               (intptr_t) gDvmJit.codeCache + templateSize, 0);
+    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
+                          (intptr_t) gDvmJit.codeCache + templateSize, 0);
 
     int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
                           PROTECT_CODE_CACHE_ATTRS);
@@ -281,8 +281,9 @@
     memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
            0,
            gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
-    cacheflush((intptr_t) gDvmJit.codeCache,
-               (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
+    dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
+                          (intptr_t) gDvmJit.codeCache +
+                          gDvmJit.codeCacheByteUsed, 0);
 
     PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
 
diff --git a/vm/compiler/CompilerUtility.h b/vm/compiler/CompilerUtility.h
index d3f2d6a..3e65a2e 100644
--- a/vm/compiler/CompilerUtility.h
+++ b/vm/compiler/CompilerUtility.h
@@ -73,5 +73,7 @@
 void dvmDumpBlockBitVector(const GrowableList *blocks, char *msg,
                            const BitVector *bv, int length);
 void dvmGetBlockName(struct BasicBlock *bb, char *name);
+int dvmCompilerCacheFlush(long start, long end, long flags);
+
 
 #endif /* _DALVIK_COMPILER_UTILITY */
diff --git a/vm/compiler/codegen/arm/ArchUtility.c b/vm/compiler/codegen/arm/ArchUtility.c
index 8682c1a..95b96c4 100644
--- a/vm/compiler/codegen/arm/ArchUtility.c
+++ b/vm/compiler/codegen/arm/ArchUtility.c
@@ -385,3 +385,9 @@
              armLIR->operands[0]);
     }
 }
+
+/* Target-specific cache flushing */
+int dvmCompilerCacheFlush(long start, long end, long flags)
+{
+    return cacheflush(start, end, flags);
+}
diff --git a/vm/compiler/codegen/arm/Assemble.c b/vm/compiler/codegen/arm/Assemble.c
index 16e8e15..4154387 100644
--- a/vm/compiler/codegen/arm/Assemble.c
+++ b/vm/compiler/codegen/arm/Assemble.c
@@ -20,7 +20,6 @@
 #include "../../CompilerInternals.h"
 #include "ArmLIR.h"
 #include "Codegen.h"
-#include <unistd.h>             /* for cacheflush */
 #include <sys/mman.h>           /* for protection change */
 
 #define MAX_ASSEMBLER_RETRIES 10
@@ -1353,8 +1352,8 @@
     installDataContent(cUnit);
 
     /* Flush dcache and invalidate the icache to maintain coherence */
-    cacheflush((long)cUnit->baseAddr,
-               (long)((char *) cUnit->baseAddr + offset), 0);
+    dvmCompilerCacheFlush((long)cUnit->baseAddr,
+                          (long)((char *) cUnit->baseAddr + offset), 0);
     UPDATE_CODE_CACHE_PATCHES();
 
     PROTECT_CODE_CACHE(cUnit->baseAddr, offset);
@@ -1449,7 +1448,7 @@
         UNPROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
 
         *branchAddr = newInst;
-        cacheflush((long)branchAddr, (long)branchAddr + 4, 0);
+        dvmCompilerCacheFlush((long)branchAddr, (long)branchAddr + 4, 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(branchAddr, sizeof(*branchAddr));
@@ -1489,7 +1488,7 @@
          */
         android_atomic_release_store((int32_t)newContent->clazz,
             (volatile int32_t *)(void *)&cellAddr->clazz);
-        cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
+        dvmCompilerCacheFlush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(cellAddr, sizeof(*cellAddr));
@@ -1681,7 +1680,7 @@
     }
 
     /* Then synchronize the I/D cache */
-    cacheflush((long) minAddr, (long) (maxAddr+1), 0);
+    dvmCompilerCacheFlush((long) minAddr, (long) (maxAddr+1), 0);
     UPDATE_CODE_CACHE_PATCHES();
 
     PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
@@ -1802,7 +1801,7 @@
                     highAddress = lastAddress;
             }
         }
-        cacheflush((long)lowAddress, (long)highAddress, 0);
+        dvmCompilerCacheFlush((long)lowAddress, (long)highAddress, 0);
         UPDATE_CODE_CACHE_PATCHES();
 
         PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
diff --git a/vm/compiler/codegen/x86/ArchUtility.c b/vm/compiler/codegen/x86/ArchUtility.c
index 171c3b5..f7c48d6 100644
--- a/vm/compiler/codegen/x86/ArchUtility.c
+++ b/vm/compiler/codegen/x86/ArchUtility.c
@@ -22,3 +22,9 @@
 void dvmCompilerCodegenDump(CompilationUnit *cUnit)
 {
 }
+
+/* Target-specific cache flushing (not needed for x86 */
+int dvmCompilerCacheFlush(long start, long end, long flags)
+{
+    return 0;
+}
diff --git a/vm/compiler/codegen/x86/Assemble.c b/vm/compiler/codegen/x86/Assemble.c
index 31264ce..3c0b3c7 100644
--- a/vm/compiler/codegen/x86/Assemble.c
+++ b/vm/compiler/codegen/x86/Assemble.c
@@ -20,7 +20,6 @@
 #include "../../CompilerInternals.h"
 #include "X86LIR.h"
 #include "Codegen.h"
-#include <unistd.h>             /* for cacheflush */
 #include <sys/mman.h>           /* for protection change */
 
 #define MAX_ASSEMBLER_RETRIES 10
diff --git a/vm/compiler/codegen/x86/CodegenDriver.c b/vm/compiler/codegen/x86/CodegenDriver.c
index 4a5d481..4f31563 100644
--- a/vm/compiler/codegen/x86/CodegenDriver.c
+++ b/vm/compiler/codegen/x86/CodegenDriver.c
@@ -35,6 +35,7 @@
 static int opcodeCoverage[kNumPackedOpcodes];
 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
 
+#if 0   // Avoid compiler warnings when x86 disabled during development
 /*
  * Bail to the interpreter.  Will not return to this trace.
  * On entry, rPC must be set correctly.
@@ -79,6 +80,7 @@
     storeWordDisp(cUnit, rESP, OUT_ARG0, rECX);
     opReg(cUnit, kOpCall, rEAX);
 }
+#endif
 
 /*
  * The following are the first-level codegen routines that analyze the format
@@ -211,6 +213,7 @@
 /* Accept the work and start compiling */
 bool dvmCompilerDoWork(CompilerWorkOrder *work)
 {
+    JitTraceDescription *desc;
     bool res;
 
     if (gDvmJit.codeCacheFull) {
@@ -220,14 +223,16 @@
     switch (work->kind) {
         case kWorkOrderTrace:
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+            desc = (JitTraceDescription *)work->info;
+            res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
                                   work->bailPtr, 0 /* no hints */);
             break;
         case kWorkOrderTraceDebug: {
             bool oldPrintMe = gDvmJit.printMe;
             gDvmJit.printMe = true;
             /* Start compilation with maximally allowed trace length */
-            res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
+            desc = (JitTraceDescription *)work->info;
+            res = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
                                   work->bailPtr, 0 /* no hints */);
             gDvmJit.printMe = oldPrintMe;
             break;
diff --git a/vm/compiler/template/ia32/footer.S b/vm/compiler/template/ia32/footer.S
index d11af69..d350c77 100644
--- a/vm/compiler/template/ia32/footer.S
+++ b/vm/compiler/template/ia32/footer.S
@@ -6,14 +6,6 @@
 
     .text
     .align  4
-/*
- * FIXME - verify that we don't need an explicit cache flush
- * for x86.
- */
-    .global cacheflush
-cacheflush:
-    ret
-
 
     .global dmvCompilerTemplateEnd
 dmvCompilerTemplateEnd:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index e1d0524..8efbcaa 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -181,7 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -285,7 +275,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -344,7 +335,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -468,7 +460,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -477,7 +470,8 @@
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ native return; r9=self, r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1511,15 +1505,18 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
 #endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ Refresh Jit's on/off status
     ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 5a47750..0df3ae6 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -181,7 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -285,7 +275,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -344,7 +335,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -468,7 +460,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -477,7 +470,8 @@
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ native return; r9=self, r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -527,7 +521,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ PIC way of "bl __aeabi_cdcmple"
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
     bhi     .LTEMPLATE_CMPG_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -540,7 +535,8 @@
 .LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -569,7 +565,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ PIC way of "bl __aeabi_cdcmple"
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
     bhi     .LTEMPLATE_CMPL_DOUBLE_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -582,7 +579,8 @@
 .LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -631,7 +629,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ cmp <=: C clear if <, Z set if eq
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
     bhi     .LTEMPLATE_CMPG_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -642,7 +641,8 @@
 .LTEMPLATE_CMPG_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mov     r0, #1                            @ r1<- 1 or -1 for NaN
@@ -691,7 +691,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ cmp <=: C clear if <, Z set if eq
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
     bhi     .LTEMPLATE_CMPL_FLOAT_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -702,7 +703,8 @@
 .LTEMPLATE_CMPL_FLOAT_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     mvn     r0, #0                            @ r1<- 1 or -1 for NaN
@@ -1234,15 +1236,18 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
 #endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ Refresh Jit's on/off status
     ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index 9fb8892..ee3f8cb 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -181,7 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -285,7 +275,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -344,7 +335,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -468,7 +460,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -477,7 +470,8 @@
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ native return; r9=self, r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1511,15 +1505,18 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
 #endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ Refresh Jit's on/off status
     ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 6d40d60..3875f5a 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -108,17 +108,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
 
     .global dvmCompilerTemplateStart
     .type   dvmCompilerTemplateStart, %function
@@ -181,7 +170,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
@@ -285,7 +275,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
@@ -344,7 +335,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
@@ -468,7 +460,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -477,7 +470,8 @@
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ native return; r9=self, r10=newSaveArea
     @ equivalent to dvmPopJniLocals
@@ -1511,15 +1505,18 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
 #endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ Refresh Jit's on/off status
     ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
index 1256ee4..ae548e4 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-ia32.S
@@ -97,14 +97,6 @@
 
     .text
     .align  4
-/*
- * FIXME - verify that we don't need an explicit cache flush
- * for x86.
- */
-    .global cacheflush
-cacheflush:
-    ret
-
 
     .global dmvCompilerTemplateEnd
 dmvCompilerTemplateEnd: