Remove junk from platform.S now armv4t is gone.

Change-Id: I30079aacc753c89cfc3a3f64bd900a0cc858d65f
diff --git a/vm/compiler/template/armv5te-vfp/platform.S b/vm/compiler/template/armv5te-vfp/platform.S
index 880e875..e0666a5 100644
--- a/vm/compiler/template/armv5te-vfp/platform.S
+++ b/vm/compiler/template/armv5te-vfp/platform.S
@@ -3,14 +3,3 @@
  *  CPU-version-specific defines and utility
  * ===========================================================================
  */
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
index 15988f6..4fd5a71 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
@@ -16,7 +16,8 @@
     /* op vAA, vBB, vCC */
     push    {r0-r3}                     @ save operands
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ PIC way of "bl __aeabi_cdcmple"
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ PIC way of "bl __aeabi_cdcmple"
     bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r1<- -1
     moveq   r0, #0                      @ (equal) r1<- 0, trumps less than
@@ -29,7 +30,8 @@
 .L${opcode}_gt_or_nan:
     pop     {r2-r3}                     @ restore operands in reverse order
     pop     {r0-r1}                     @ restore operands in reverse order
-    LDR_PC_LR ".L__aeabi_cdcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cdcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     $naninst                            @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
index eb1c7e1..d0f2bec 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
@@ -36,7 +36,8 @@
     mov     r9, r0                      @ Save copies - we may need to redo
     mov     r10, r1
     mov     r11, lr                     @ save return address
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ cmp <=: C clear if <, Z set if eq
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ cmp <=: C clear if <, Z set if eq
     bhi     .L${opcode}_gt_or_nan       @ C set and Z clear, disambiguate
     mvncc   r0, #0                      @ (less than) r0<- -1
     moveq   r0, #0                      @ (equal) r0<- 0, trumps less than
@@ -47,7 +48,8 @@
 .L${opcode}_gt_or_nan:
     mov     r0, r10                     @ restore in reverse order
     mov     r1, r9
-    LDR_PC_LR ".L__aeabi_cfcmple"       @ r0<- Z set if eq, C clear if <
+    mov     lr, pc
+    ldr     pc, .L__aeabi_cfcmple       @ r0<- Z set if eq, C clear if <
     movcc   r0, #1                      @ (greater than) r1<- 1
     bxcc    r11
     $naninst                            @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index 475f3cc..f1650d9 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -42,7 +42,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve clobbered live registers
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r2,lr}             @ restore registers
 #endif
 
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index f4f4025..2a22a22 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -46,7 +46,8 @@
     mov     r0, r2
     mov     r1, r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                @ restore r0-r3
 #endif
 
@@ -55,7 +56,8 @@
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}                @ restore r2 and r6
     @ r0=JNIMethod, r1=rGlue
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ native return; r9=self, r10=newSaveArea
     @ equivalent to dvmPopJniLocals
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index 044d0ee..405065f 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -50,7 +50,8 @@
     stmfd   sp!, {r0-r3}                    @ preserve r0-r3
     mov     r1, r6
     @ r0=methodToCall, r1=rGlue
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}                    @ restore r0-r3
 #endif
 
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index b2e71ee..e0adbc5 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -9,7 +9,8 @@
     stmfd   sp!, {r0-r2,lr}             @ preserve live registers
     mov     r0, r6
     @ r0=rGlue
-    LDR_PC_LR ".LdvmFastJavaMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastJavaMethodTraceExit
     ldmfd   sp!, {r0-r2,lr}             @ restore live registers
 #endif
     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index a391dbe..ba0335b 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -28,15 +28,18 @@
     stmfd   sp!, {r0-r3}
     mov     r0, r2
     mov     r1, r6
-    LDR_PC_LR ".LdvmFastMethodTraceEnter"
+    mov     lr, pc
+    ldr     pc, .LdvmFastMethodTraceEnter
     ldmfd   sp!, {r0-r3}
 #endif
 
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc
+    ldr     pc, [r2, #offMethod_nativeFunc]
 
 #if defined(WITH_INLINE_PROFILING)
     ldmfd   sp!, {r0-r1}
-    LDR_PC_LR ".LdvmFastNativeMethodTraceExit"
+    mov     lr, pc
+    ldr     pc, .LdvmFastNativeMethodTraceExit
 #endif
     @ Refresh Jit's on/off status
     ldr     r3, [rGLUE, #offGlue_ppJitProfTable]
diff --git a/vm/compiler/template/armv5te/platform.S b/vm/compiler/template/armv5te/platform.S
index 880e875..e0666a5 100644
--- a/vm/compiler/template/armv5te/platform.S
+++ b/vm/compiler/template/armv5te/platform.S
@@ -3,14 +3,3 @@
  *  CPU-version-specific defines and utility
  * ===========================================================================
  */
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
index f7ca704..d29523d 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE.S
@@ -53,7 +53,7 @@
     ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
 0:
     ldr     r9, .L${opcode}_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .L${opcode}_table:
diff --git a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
index cf8b151..38ab35b 100644
--- a/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
+++ b/vm/mterp/armv5te/OP_EXECUTE_INLINE_RANGE.S
@@ -48,7 +48,7 @@
     GET_VREG(r0, ip)                    @ r0<- vBase[0]
 0:
     ldr     r9, .L${opcode}_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .L${opcode}_table:
diff --git a/vm/mterp/armv5te/entry.S b/vm/mterp/armv5te/entry.S
index eddac53..445d580 100644
--- a/vm/mterp/armv5te/entry.S
+++ b/vm/mterp/armv5te/entry.S
@@ -153,7 +153,7 @@
     ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
-    LDMFD_PC "r4-r10,fp"                    @ restore 9 regs and return
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
 
 
 /*
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index 6118a6c..7a2208d 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -720,9 +720,8 @@
     stmfd   sp!, {r2,r6}
 #endif
 
-    @mov     lr, pc                      @ set return addr
-    @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
 
 #if defined(WITH_INLINE_PROFILING)
     @ r0=JNIMethod, r1=rGLUE
diff --git a/vm/mterp/armv5te/platform.S b/vm/mterp/armv5te/platform.S
index 17bec6f..ff3150b 100644
--- a/vm/mterp/armv5te/platform.S
+++ b/vm/mterp/armv5te/platform.S
@@ -5,36 +5,6 @@
  */
 
 /*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
-/*
  * Macro for data memory barrier; not meaningful pre-ARMv6K.
  */
 .macro  SMP_DMB
diff --git a/vm/mterp/armv7-a/platform.S b/vm/mterp/armv7-a/platform.S
index 5e6e837..96ff2c2 100644
--- a/vm/mterp/armv7-a/platform.S
+++ b/vm/mterp/armv7-a/platform.S
@@ -4,36 +4,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
 #if !defined(ANDROID_SMP)
 # error "Must define ANDROID_SMP"
 #endif
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 32ba8bc..bd3ad4e 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -214,36 +214,6 @@
  */
 
 /*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
-/*
  * Macro for data memory barrier; not meaningful pre-ARMv6K.
  */
 .macro  SMP_DMB
@@ -411,7 +381,7 @@
     ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
-    LDMFD_PC "r4-r10,fp"                    @ restore 9 regs and return
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
 
 
 /*
@@ -2261,9 +2231,6 @@
 /* File: armv5te/OP_APUT_OBJECT.S */
     /*
      * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
      */
     /* op vAA, vBB, vCC */
     FETCH(r0, 1)                        @ r0<- CCBB
@@ -9295,7 +9262,7 @@
     ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_table:
@@ -9324,7 +9291,7 @@
     GET_VREG(r0, ip)                    @ r0<- vBase[0]
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_RANGE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_RANGE_table:
@@ -10110,9 +10077,8 @@
     stmfd   sp!, {r2,r6}
 #endif
 
-    @mov     lr, pc                      @ set return addr
-    @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
 
 #if defined(WITH_INLINE_PROFILING)
     @ r0=JNIMethod, r1=rGLUE
@@ -10385,9 +10351,11 @@
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /*
- * Invalid array index.
- * r0: index
- * r1: size
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We shuffle them here before calling the C function.
+ * r1: index
+ * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index f86a791..9a2d214 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -214,36 +214,6 @@
  */
 
 /*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
-/*
  * Macro for data memory barrier; not meaningful pre-ARMv6K.
  */
 .macro  SMP_DMB
@@ -411,7 +381,7 @@
     ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
-    LDMFD_PC "r4-r10,fp"                    @ restore 9 regs and return
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
 
 
 /*
@@ -2283,9 +2253,6 @@
 /* File: armv5te/OP_APUT_OBJECT.S */
     /*
      * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
      */
     /* op vAA, vBB, vCC */
     FETCH(r0, 1)                        @ r0<- CCBB
@@ -9753,7 +9720,7 @@
     ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_table:
@@ -9782,7 +9749,7 @@
     GET_VREG(r0, ip)                    @ r0<- vBase[0]
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_RANGE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_RANGE_table:
@@ -10568,9 +10535,8 @@
     stmfd   sp!, {r2,r6}
 #endif
 
-    @mov     lr, pc                      @ set return addr
-    @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
 
 #if defined(WITH_INLINE_PROFILING)
     @ r0=JNIMethod, r1=rGLUE
@@ -10843,9 +10809,11 @@
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /*
- * Invalid array index.
- * r0: index
- * r1: size
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We shuffle them here before calling the C function.
+ * r1: index
+ * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 6e28216..7193c73 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -213,36 +213,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
 #if !defined(ANDROID_SMP)
 # error "Must define ANDROID_SMP"
 #endif
@@ -425,7 +395,7 @@
     ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
-    LDMFD_PC "r4-r10,fp"                    @ restore 9 regs and return
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
 
 
 /*
@@ -2265,9 +2235,6 @@
 /* File: armv5te/OP_APUT_OBJECT.S */
     /*
      * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
      */
     /* op vAA, vBB, vCC */
     FETCH(r0, 1)                        @ r0<- CCBB
@@ -9233,7 +9200,7 @@
     ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_table:
@@ -9262,7 +9229,7 @@
     GET_VREG(r0, ip)                    @ r0<- vBase[0]
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_RANGE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_RANGE_table:
@@ -10048,9 +10015,8 @@
     stmfd   sp!, {r2,r6}
 #endif
 
-    @mov     lr, pc                      @ set return addr
-    @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
 
 #if defined(WITH_INLINE_PROFILING)
     @ r0=JNIMethod, r1=rGLUE
@@ -10323,9 +10289,11 @@
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /*
- * Invalid array index.
- * r0: index
- * r1: size
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We shuffle them here before calling the C function.
+ * r1: index
+ * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index 78ef2b4..b323fa8 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -213,36 +213,6 @@
  * ===========================================================================
  */
 
-/*
- * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5.  Essentially a
- * one-way branch.
- *
- * May modify IP.  Does not modify LR.
- */
-.macro  LDR_PC source
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5.
- * Jump to subroutine.
- *
- * May modify IP and LR.
- */
-.macro  LDR_PC_LR source
-    mov     lr, pc
-    ldr     pc, \source
-.endm
-
-/*
- * Macro for "LDMFD SP!, {...regs...,PC}".
- *
- * May modify IP and LR.
- */
-.macro  LDMFD_PC regs
-    ldmfd   sp!, {\regs,pc}
-.endm
-
 #if !defined(ANDROID_SMP)
 # error "Must define ANDROID_SMP"
 #endif
@@ -425,7 +395,7 @@
     ldr     sp, [r0, #offGlue_bailPtr]      @ sp<- saved SP
     mov     r0, r1                          @ return the changeInterp value
     add     sp, sp, #4                      @ un-align 64
-    LDMFD_PC "r4-r10,fp"                    @ restore 9 regs and return
+    ldmfd   sp!, {r4-r10,fp,pc}             @ restore 9 regs and return
 
 
 /*
@@ -2265,9 +2235,6 @@
 /* File: armv5te/OP_APUT_OBJECT.S */
     /*
      * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
      */
     /* op vAA, vBB, vCC */
     FETCH(r0, 1)                        @ r0<- CCBB
@@ -9233,7 +9200,7 @@
     ldr     r0, [rFP, ip, lsl #2]       @ r0<- vC
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_table:
@@ -9262,7 +9229,7 @@
     GET_VREG(r0, ip)                    @ r0<- vBase[0]
 0:
     ldr     r9, .LOP_EXECUTE_INLINE_RANGE_table       @ table of InlineOperation
-    LDR_PC  "[r9, r10, lsl #4]"         @ sizeof=16, "func" is first entry
+    ldr     pc, [r9, r10, lsl #4]       @ sizeof=16, "func" is first entry
     @ (not reached)
 
 .LOP_EXECUTE_INLINE_RANGE_table:
@@ -10048,9 +10015,8 @@
     stmfd   sp!, {r2,r6}
 #endif
 
-    @mov     lr, pc                      @ set return addr
-    @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
-    LDR_PC_LR "[r2, #offMethod_nativeFunc]"
+    mov     lr, pc                      @ set return addr
+    ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
 
 #if defined(WITH_INLINE_PROFILING)
     @ r0=JNIMethod, r1=rGLUE
@@ -10323,9 +10289,11 @@
     GOTO_OPCODE(ip)                     @ jump to next instruction
 
 /*
- * Invalid array index.
- * r0: index
- * r1: size
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We shuffle them here before calling the C function.
+ * r1: index
+ * r3: size
  */
 common_errArrayIndex:
     EXPORT_PC()